2 * Copyright (c) 2016-2020 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_inet6.h"
32 #include "opt_ipsec.h"
33 #include "opt_tcpdebug.h"
34 #include "opt_ratelimit.h"
35 #include <sys/param.h>
37 #include <sys/module.h>
38 #include <sys/kernel.h>
40 #include <sys/hhook.h>
43 #include <sys/malloc.h>
45 #include <sys/mutex.h>
47 #include <sys/proc.h> /* for proc0 declaration */
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
53 #include <sys/qmath.h>
55 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
59 #include <sys/refcount.h>
60 #include <sys/queue.h>
61 #include <sys/tim_filter.h>
63 #include <sys/kthread.h>
64 #include <sys/kern_prefetch.h>
65 #include <sys/protosw.h>
67 #include <sys/sched.h>
68 #include <machine/cpu.h>
72 #include <net/route.h>
73 #include <net/route/nhop.h>
76 #define TCPSTATES /* for logging */
78 #include <netinet/in.h>
79 #include <netinet/in_kdtrace.h>
80 #include <netinet/in_pcb.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
83 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
84 #include <netinet/ip_var.h>
85 #include <netinet/ip6.h>
86 #include <netinet6/in6_pcb.h>
87 #include <netinet6/ip6_var.h>
88 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_log_buf.h>
92 #include <netinet/tcp_seq.h>
93 #include <netinet/tcp_timer.h>
94 #include <netinet/tcp_var.h>
95 #include <netinet/tcp_hpts.h>
96 #include <netinet/tcp_ratelimit.h>
97 #include <netinet/tcp_accounting.h>
98 #include <netinet/tcpip.h>
99 #include <netinet/cc/cc.h>
100 #include <netinet/cc/cc_newreno.h>
101 #include <netinet/tcp_fastopen.h>
102 #include <netinet/tcp_lro.h>
103 #ifdef NETFLIX_SHARED_CWND
104 #include <netinet/tcp_shared_cwnd.h>
107 #include <netinet/tcp_debug.h>
108 #endif /* TCPDEBUG */
110 #include <netinet/tcp_offload.h>
113 #include <netinet6/tcp6_var.h>
116 #include <netipsec/ipsec_support.h>
118 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
119 #include <netipsec/ipsec.h>
120 #include <netipsec/ipsec6.h>
123 #include <netinet/udp.h>
124 #include <netinet/udp_var.h>
125 #include <machine/in_cksum.h>
128 #include <security/mac/mac_framework.h>
130 #include "sack_filter.h"
131 #include "tcp_rack.h"
132 #include "rack_bbr_common.h"
134 uma_zone_t rack_zone;
135 uma_zone_t rack_pcb_zone;
138 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t)))
141 VNET_DECLARE(uint32_t, newreno_beta);
142 VNET_DECLARE(uint32_t, newreno_beta_ecn);
143 #define V_newreno_beta VNET(newreno_beta)
144 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
147 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block");
148 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options");
150 struct sysctl_ctx_list rack_sysctl_ctx;
151 struct sysctl_oid *rack_sysctl_root;
157 * The RACK module incorporates a number of
158 * TCP ideas that have been put out into the IETF
159 * over the last few years:
160 * - Matt Mathis's Rate Halving which slowly drops
161 * the congestion window so that the ack clock can
162 * be maintained during a recovery.
163 * - Yuchung Cheng's RACK TCP (for which its named) that
164 * will stop us using the number of dup acks and instead
165 * use time as the gage of when we retransmit.
166 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
167 * of Dukkipati et.al.
168 * RACK depends on SACK, so if an endpoint arrives that
169 * cannot do SACK the state machine below will shuttle the
170 * connection back to using the "default" TCP stack that is
173 * To implement RACK the original TCP stack was first decomposed
174 * into a functional state machine with individual states
175 * for each of the possible TCP connection states. The do_segement
176 * functions role in life is to mandate the connection supports SACK
177 * initially and then assure that the RACK state matches the conenction
178 * state before calling the states do_segment function. Each
179 * state is simplified due to the fact that the original do_segment
180 * has been decomposed and we *know* what state we are in (no
181 * switches on the state) and all tests for SACK are gone. This
182 * greatly simplifies what each state does.
184 * TCP output is also over-written with a new version since it
185 * must maintain the new rack scoreboard.
188 static int32_t rack_tlp_thresh = 1;
189 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
190 static int32_t rack_tlp_use_greater = 1;
191 static int32_t rack_reorder_thresh = 2;
192 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
194 static uint8_t rack_req_measurements = 1;
195 /* Attack threshold detections */
196 static uint32_t rack_highest_sack_thresh_seen = 0;
197 static uint32_t rack_highest_move_thresh_seen = 0;
198 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
199 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */
200 static int32_t rack_hw_rate_caps = 1; /* 1; */
201 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
202 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
203 static int32_t rack_hw_up_only = 1;
204 static int32_t rack_stats_gets_ms_rtt = 1;
205 static int32_t rack_prr_addbackmax = 2;
207 static int32_t rack_pkt_delay = 1000;
208 static int32_t rack_send_a_lot_in_prr = 1;
209 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */
210 static int32_t rack_verbose_logging = 0;
211 static int32_t rack_ignore_data_after_close = 1;
212 static int32_t rack_enable_shared_cwnd = 1;
213 static int32_t rack_use_cmp_acks = 1;
214 static int32_t rack_use_fsb = 1;
215 static int32_t rack_use_rfo = 1;
216 static int32_t rack_use_rsm_rfo = 1;
217 static int32_t rack_max_abc_post_recovery = 2;
218 static int32_t rack_client_low_buf = 0;
219 #ifdef TCP_ACCOUNTING
220 static int32_t rack_tcp_accounting = 0;
222 static int32_t rack_limits_scwnd = 1;
223 static int32_t rack_enable_mqueue_for_nonpaced = 0;
224 static int32_t rack_disable_prr = 0;
225 static int32_t use_rack_rr = 1;
226 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
227 static int32_t rack_persist_min = 250000; /* 250usec */
228 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
229 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
230 static int32_t rack_default_init_window = 0; /* Use system default */
231 static int32_t rack_limit_time_with_srtt = 0;
232 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
233 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */
234 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
235 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
236 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
238 * Currently regular tcp has a rto_min of 30ms
239 * the backoff goes 12 times so that ends up
240 * being a total of 122.850 seconds before a
241 * connection is killed.
243 static uint32_t rack_def_data_window = 20;
244 static uint32_t rack_goal_bdp = 2;
245 static uint32_t rack_min_srtts = 1;
246 static uint32_t rack_min_measure_usec = 0;
247 static int32_t rack_tlp_min = 10000; /* 10ms */
248 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */
249 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */
250 static const int32_t rack_free_cache = 2;
251 static int32_t rack_hptsi_segments = 40;
252 static int32_t rack_rate_sample_method = USE_RTT_LOW;
253 static int32_t rack_pace_every_seg = 0;
254 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
255 static int32_t rack_slot_reduction = 4;
256 static int32_t rack_wma_divisor = 8; /* For WMA calculation */
257 static int32_t rack_cwnd_block_ends_measure = 0;
258 static int32_t rack_rwnd_block_ends_measure = 0;
259 static int32_t rack_def_profile = 0;
261 static int32_t rack_lower_cwnd_at_tlp = 0;
262 static int32_t rack_limited_retran = 0;
263 static int32_t rack_always_send_oldest = 0;
264 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
266 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
267 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
268 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */
271 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */
272 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */
273 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
274 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */
275 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */
277 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */
278 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */
279 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */
280 static uint32_t rack_probertt_use_min_rtt_exit = 0;
281 static uint32_t rack_probe_rtt_sets_cwnd = 0;
282 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
283 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */
284 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */
285 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */
286 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */
287 static uint32_t rack_probertt_filter_life = 10000000;
288 static uint32_t rack_probertt_lower_within = 10;
289 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */
290 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */
291 static int32_t rack_probertt_clear_is = 1;
292 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */
293 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */
296 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */
298 /* Timely information */
299 /* Combine these two gives the range of 'no change' to bw */
300 /* ie the up/down provide the upper and lower bound */
301 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */
302 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */
303 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */
304 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */
305 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */
306 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */
307 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */
308 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */
309 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */
310 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */
311 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */
312 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */
313 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */
314 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */
315 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */
316 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */
317 static int32_t rack_use_max_for_nobackoff = 0;
318 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */
319 static int32_t rack_timely_no_stopping = 0;
320 static int32_t rack_down_raise_thresh = 100;
321 static int32_t rack_req_segs = 1;
322 static uint64_t rack_bw_rate_cap = 0;
324 /* Weird delayed ack mode */
325 static int32_t rack_use_imac_dack = 0;
326 /* Rack specific counters */
327 counter_u64_t rack_badfr;
328 counter_u64_t rack_badfr_bytes;
329 counter_u64_t rack_rtm_prr_retran;
330 counter_u64_t rack_rtm_prr_newdata;
331 counter_u64_t rack_timestamp_mismatch;
332 counter_u64_t rack_reorder_seen;
333 counter_u64_t rack_paced_segments;
334 counter_u64_t rack_unpaced_segments;
335 counter_u64_t rack_calc_zero;
336 counter_u64_t rack_calc_nonzero;
337 counter_u64_t rack_saw_enobuf;
338 counter_u64_t rack_saw_enobuf_hw;
339 counter_u64_t rack_saw_enetunreach;
340 counter_u64_t rack_per_timer_hole;
341 counter_u64_t rack_large_ackcmp;
342 counter_u64_t rack_small_ackcmp;
344 counter_u64_t rack_adjust_map_bw;
346 /* Tail loss probe counters */
347 counter_u64_t rack_tlp_tot;
348 counter_u64_t rack_tlp_newdata;
349 counter_u64_t rack_tlp_retran;
350 counter_u64_t rack_tlp_retran_bytes;
351 counter_u64_t rack_tlp_retran_fail;
352 counter_u64_t rack_to_tot;
353 counter_u64_t rack_to_arm_rack;
354 counter_u64_t rack_to_arm_tlp;
355 counter_u64_t rack_hot_alloc;
356 counter_u64_t rack_to_alloc;
357 counter_u64_t rack_to_alloc_hard;
358 counter_u64_t rack_to_alloc_emerg;
359 counter_u64_t rack_to_alloc_limited;
360 counter_u64_t rack_alloc_limited_conns;
361 counter_u64_t rack_split_limited;
363 #define MAX_NUM_OF_CNTS 13
364 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS];
365 counter_u64_t rack_multi_single_eq;
366 counter_u64_t rack_proc_non_comp_ack;
368 counter_u64_t rack_fto_send;
369 counter_u64_t rack_fto_rsm_send;
370 counter_u64_t rack_nfto_resend;
371 counter_u64_t rack_non_fto_send;
372 counter_u64_t rack_extended_rfo;
374 counter_u64_t rack_sack_proc_all;
375 counter_u64_t rack_sack_proc_short;
376 counter_u64_t rack_sack_proc_restart;
377 counter_u64_t rack_sack_attacks_detected;
378 counter_u64_t rack_sack_attacks_reversed;
379 counter_u64_t rack_sack_used_next_merge;
380 counter_u64_t rack_sack_splits;
381 counter_u64_t rack_sack_used_prev_merge;
382 counter_u64_t rack_sack_skipped_acked;
383 counter_u64_t rack_ack_total;
384 counter_u64_t rack_express_sack;
385 counter_u64_t rack_sack_total;
386 counter_u64_t rack_move_none;
387 counter_u64_t rack_move_some;
389 counter_u64_t rack_used_tlpmethod;
390 counter_u64_t rack_used_tlpmethod2;
391 counter_u64_t rack_enter_tlp_calc;
392 counter_u64_t rack_input_idle_reduces;
393 counter_u64_t rack_collapsed_win;
394 counter_u64_t rack_tlp_does_nada;
395 counter_u64_t rack_try_scwnd;
396 counter_u64_t rack_hw_pace_init_fail;
397 counter_u64_t rack_hw_pace_lost;
398 counter_u64_t rack_sbsndptr_right;
399 counter_u64_t rack_sbsndptr_wrong;
401 /* Temp CPU counters */
402 counter_u64_t rack_find_high;
404 counter_u64_t rack_progress_drops;
405 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
406 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
409 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
411 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax) do { \
412 (tv) = (value) + TICKS_2_USEC(tcp_rexmit_slop); \
413 if ((u_long)(tv) < (u_long)(tvmin)) \
415 if ((u_long)(tv) > (u_long)(tvmax)) \
420 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line);
423 rack_process_ack(struct mbuf *m, struct tcphdr *th,
424 struct socket *so, struct tcpcb *tp, struct tcpopt *to,
425 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
427 rack_process_data(struct mbuf *m, struct tcphdr *th,
428 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
429 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
431 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
432 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
433 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
434 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
436 static struct rack_sendmap *
437 rack_check_recovery_mode(struct tcpcb *tp,
440 rack_cong_signal(struct tcpcb *tp,
441 uint32_t type, uint32_t ack);
442 static void rack_counter_destroy(void);
444 rack_ctloutput(struct socket *so, struct sockopt *sopt,
445 struct inpcb *inp, struct tcpcb *tp);
446 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
448 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
450 rack_do_segment(struct mbuf *m, struct tcphdr *th,
451 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
453 static void rack_dtor(void *mem, int32_t size, void *arg);
455 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
456 uint32_t flex1, uint32_t flex2,
457 uint32_t flex3, uint32_t flex4,
458 uint32_t flex5, uint32_t flex6,
459 uint16_t flex7, uint8_t mod);
461 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
462 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, struct rack_sendmap *rsm);
463 static struct rack_sendmap *
464 rack_find_high_nonack(struct tcp_rack *rack,
465 struct rack_sendmap *rsm);
466 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
467 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
468 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
470 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
471 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
473 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
474 tcp_seq th_ack, int line);
476 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
477 static int32_t rack_handoff_ok(struct tcpcb *tp);
478 static int32_t rack_init(struct tcpcb *tp);
479 static void rack_init_sysctls(void);
481 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
482 struct tcphdr *th, int entered_rec, int dup_ack_struck);
484 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
485 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t ts,
486 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff);
489 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
490 struct rack_sendmap *rsm);
491 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
492 static int32_t rack_output(struct tcpcb *tp);
495 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
496 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
497 uint32_t cts, int *moved_two);
498 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
499 static void rack_remxt_tmr(struct tcpcb *tp);
501 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
502 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
503 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
504 static int32_t rack_stopall(struct tcpcb *tp);
506 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
508 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
509 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
510 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
512 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
513 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag);
515 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
516 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag);
518 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
519 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
520 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
522 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
523 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
524 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
526 rack_do_closing(struct mbuf *m, struct tcphdr *th,
527 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
528 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
530 rack_do_established(struct mbuf *m, struct tcphdr *th,
531 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
532 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
534 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
535 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
536 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
538 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
539 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
540 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
542 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
543 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
544 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
546 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
547 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
548 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
550 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
551 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
552 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
554 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
555 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
556 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
557 struct rack_sendmap *
558 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
560 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
561 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
563 tcp_rack_partialack(struct tcpcb *tp);
565 rack_set_profile(struct tcp_rack *rack, int prof);
567 rack_apply_deferred_options(struct tcp_rack *rack);
569 int32_t rack_clear_counter=0;
572 rack_set_cc_pacing(struct tcp_rack *rack)
575 struct cc_newreno_opts opt;
576 struct newreno old, *ptr;
580 if (rack->rc_pacing_cc_set)
584 if (tp->cc_algo == NULL) {
586 printf("No cc algorithm?\n");
589 rack->rc_pacing_cc_set = 1;
590 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
591 /* Not new-reno we can't play games with beta! */
592 printf("cc_algo:%s is not NEWRENO:%s\n",
593 tp->cc_algo->name, CCALGONAME_NEWRENO);
596 ptr = ((struct newreno *)tp->ccv->cc_data);
597 if (CC_ALGO(tp)->ctl_output == NULL) {
598 /* Huh, why does new_reno no longer have a set function? */
599 printf("no ctl_output for algo:%s\n", tp->cc_algo->name);
603 /* Just the default values */
604 old.beta = V_newreno_beta_ecn;
605 old.beta_ecn = V_newreno_beta_ecn;
606 old.newreno_flags = 0;
608 old.beta = ptr->beta;
609 old.beta_ecn = ptr->beta_ecn;
610 old.newreno_flags = ptr->newreno_flags;
612 sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
613 sopt.sopt_dir = SOPT_SET;
614 opt.name = CC_NEWRENO_BETA;
615 opt.val = rack->r_ctl.rc_saved_beta.beta;
616 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
618 printf("Error returned by ctl_output %d\n", error);
622 * Hack alert we need to set in our newreno_flags
623 * so that Abe behavior is also applied.
625 ((struct newreno *)tp->ccv->cc_data)->newreno_flags = CC_NEWRENO_BETA_ECN;
626 opt.name = CC_NEWRENO_BETA_ECN;
627 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
628 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
630 printf("Error returned by ctl_output %d\n", error);
633 /* Save off the original values for restoral */
634 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
636 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
637 union tcp_log_stackspecific log;
640 ptr = ((struct newreno *)tp->ccv->cc_data);
641 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
642 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
644 log.u_bbr.flex1 = ptr->beta;
645 log.u_bbr.flex2 = ptr->beta_ecn;
646 log.u_bbr.flex3 = ptr->newreno_flags;
648 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
649 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
650 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
651 log.u_bbr.flex7 = rack->gp_ready;
652 log.u_bbr.flex7 <<= 1;
653 log.u_bbr.flex7 |= rack->use_fixed_rate;
654 log.u_bbr.flex7 <<= 1;
655 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
656 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
658 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
659 0, &log, false, NULL, NULL, 0, &tv);
664 rack_undo_cc_pacing(struct tcp_rack *rack)
666 struct newreno old, *ptr;
669 if (rack->rc_pacing_cc_set == 0)
672 rack->rc_pacing_cc_set = 0;
673 if (tp->cc_algo == NULL)
676 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
677 /* Not new-reno nothing to do! */
680 ptr = ((struct newreno *)tp->ccv->cc_data);
683 * This happens at rack_fini() if the
684 * cc module gets freed on us. In that
685 * case we loose our "new" settings but
686 * thats ok, since the tcb is going away anyway.
690 /* Grab out our set values */
691 memcpy(&old, ptr, sizeof(struct newreno));
692 /* Copy back in the original values */
693 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno));
694 /* Now save back the values we had set in (for when pacing is restored) */
695 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
696 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
697 union tcp_log_stackspecific log;
700 ptr = ((struct newreno *)tp->ccv->cc_data);
701 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
702 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
703 log.u_bbr.flex1 = ptr->beta;
704 log.u_bbr.flex2 = ptr->beta_ecn;
705 log.u_bbr.flex3 = ptr->newreno_flags;
706 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
707 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
708 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags;
709 log.u_bbr.flex7 = rack->gp_ready;
710 log.u_bbr.flex7 <<= 1;
711 log.u_bbr.flex7 |= rack->use_fixed_rate;
712 log.u_bbr.flex7 <<= 1;
713 log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
714 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
716 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
717 0, &log, false, NULL, NULL, 0, &tv);
721 #ifdef NETFLIX_PEAKRATE
723 rack_update_peakrate_thr(struct tcpcb *tp)
725 /* Keep in mind that t_maxpeakrate is in B/s. */
727 peak = uqmax((tp->t_maxseg * 2),
728 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC));
729 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX);
734 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
740 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
741 if (error || req->newptr == NULL)
744 error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
749 printf("Clearing RACK counters\n");
751 counter_u64_zero(rack_badfr);
752 counter_u64_zero(rack_badfr_bytes);
753 counter_u64_zero(rack_rtm_prr_retran);
754 counter_u64_zero(rack_rtm_prr_newdata);
755 counter_u64_zero(rack_timestamp_mismatch);
756 counter_u64_zero(rack_reorder_seen);
757 counter_u64_zero(rack_tlp_tot);
758 counter_u64_zero(rack_tlp_newdata);
759 counter_u64_zero(rack_tlp_retran);
760 counter_u64_zero(rack_tlp_retran_bytes);
761 counter_u64_zero(rack_tlp_retran_fail);
762 counter_u64_zero(rack_to_tot);
763 counter_u64_zero(rack_to_arm_rack);
764 counter_u64_zero(rack_to_arm_tlp);
765 counter_u64_zero(rack_paced_segments);
766 counter_u64_zero(rack_calc_zero);
767 counter_u64_zero(rack_calc_nonzero);
768 counter_u64_zero(rack_unpaced_segments);
769 counter_u64_zero(rack_saw_enobuf);
770 counter_u64_zero(rack_saw_enobuf_hw);
771 counter_u64_zero(rack_saw_enetunreach);
772 counter_u64_zero(rack_per_timer_hole);
773 counter_u64_zero(rack_large_ackcmp);
774 counter_u64_zero(rack_small_ackcmp);
776 counter_u64_zero(rack_adjust_map_bw);
778 counter_u64_zero(rack_to_alloc_hard);
779 counter_u64_zero(rack_to_alloc_emerg);
780 counter_u64_zero(rack_sack_proc_all);
781 counter_u64_zero(rack_fto_send);
782 counter_u64_zero(rack_fto_rsm_send);
783 counter_u64_zero(rack_extended_rfo);
784 counter_u64_zero(rack_hw_pace_init_fail);
785 counter_u64_zero(rack_hw_pace_lost);
786 counter_u64_zero(rack_sbsndptr_wrong);
787 counter_u64_zero(rack_sbsndptr_right);
788 counter_u64_zero(rack_non_fto_send);
789 counter_u64_zero(rack_nfto_resend);
790 counter_u64_zero(rack_sack_proc_short);
791 counter_u64_zero(rack_sack_proc_restart);
792 counter_u64_zero(rack_to_alloc);
793 counter_u64_zero(rack_to_alloc_limited);
794 counter_u64_zero(rack_alloc_limited_conns);
795 counter_u64_zero(rack_split_limited);
796 for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
797 counter_u64_zero(rack_proc_comp_ack[i]);
799 counter_u64_zero(rack_multi_single_eq);
800 counter_u64_zero(rack_proc_non_comp_ack);
801 counter_u64_zero(rack_find_high);
802 counter_u64_zero(rack_sack_attacks_detected);
803 counter_u64_zero(rack_sack_attacks_reversed);
804 counter_u64_zero(rack_sack_used_next_merge);
805 counter_u64_zero(rack_sack_used_prev_merge);
806 counter_u64_zero(rack_sack_splits);
807 counter_u64_zero(rack_sack_skipped_acked);
808 counter_u64_zero(rack_ack_total);
809 counter_u64_zero(rack_express_sack);
810 counter_u64_zero(rack_sack_total);
811 counter_u64_zero(rack_move_none);
812 counter_u64_zero(rack_move_some);
813 counter_u64_zero(rack_used_tlpmethod);
814 counter_u64_zero(rack_used_tlpmethod2);
815 counter_u64_zero(rack_enter_tlp_calc);
816 counter_u64_zero(rack_progress_drops);
817 counter_u64_zero(rack_tlp_does_nada);
818 counter_u64_zero(rack_try_scwnd);
819 counter_u64_zero(rack_collapsed_win);
821 rack_clear_counter = 0;
826 rack_init_sysctls(void)
829 struct sysctl_oid *rack_counters;
830 struct sysctl_oid *rack_attack;
831 struct sysctl_oid *rack_pacing;
832 struct sysctl_oid *rack_timely;
833 struct sysctl_oid *rack_timers;
834 struct sysctl_oid *rack_tlp;
835 struct sysctl_oid *rack_misc;
836 struct sysctl_oid *rack_measure;
837 struct sysctl_oid *rack_probertt;
838 struct sysctl_oid *rack_hw_pacing;
840 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
841 SYSCTL_CHILDREN(rack_sysctl_root),
844 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
845 "Rack Sack Attack Counters and Controls");
846 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
847 SYSCTL_CHILDREN(rack_sysctl_root),
850 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
852 SYSCTL_ADD_S32(&rack_sysctl_ctx,
853 SYSCTL_CHILDREN(rack_sysctl_root),
854 OID_AUTO, "rate_sample_method", CTLFLAG_RW,
855 &rack_rate_sample_method , USE_RTT_LOW,
856 "What method should we use for rate sampling 0=high, 1=low ");
857 /* Probe rtt related controls */
858 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
859 SYSCTL_CHILDREN(rack_sysctl_root),
862 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
863 "ProbeRTT related Controls");
864 SYSCTL_ADD_U16(&rack_sysctl_ctx,
865 SYSCTL_CHILDREN(rack_probertt),
866 OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
867 &rack_atexit_prtt_hbp, 130,
868 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
869 SYSCTL_ADD_U16(&rack_sysctl_ctx,
870 SYSCTL_CHILDREN(rack_probertt),
871 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
872 &rack_atexit_prtt, 130,
873 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
874 SYSCTL_ADD_U16(&rack_sysctl_ctx,
875 SYSCTL_CHILDREN(rack_probertt),
876 OID_AUTO, "gp_per_mul", CTLFLAG_RW,
877 &rack_per_of_gp_probertt, 60,
878 "What percentage of goodput do we pace at in probertt");
879 SYSCTL_ADD_U16(&rack_sysctl_ctx,
880 SYSCTL_CHILDREN(rack_probertt),
881 OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
882 &rack_per_of_gp_probertt_reduce, 10,
883 "What percentage of goodput do we reduce every gp_srtt");
884 SYSCTL_ADD_U16(&rack_sysctl_ctx,
885 SYSCTL_CHILDREN(rack_probertt),
886 OID_AUTO, "gp_per_low", CTLFLAG_RW,
887 &rack_per_of_gp_lowthresh, 40,
888 "What percentage of goodput do we allow the multiplier to fall to");
889 SYSCTL_ADD_U32(&rack_sysctl_ctx,
890 SYSCTL_CHILDREN(rack_probertt),
891 OID_AUTO, "time_between", CTLFLAG_RW,
892 & rack_time_between_probertt, 96000000,
893 "How many useconds between the lowest rtt falling must past before we enter probertt");
894 SYSCTL_ADD_U32(&rack_sysctl_ctx,
895 SYSCTL_CHILDREN(rack_probertt),
896 OID_AUTO, "safety", CTLFLAG_RW,
897 &rack_probe_rtt_safety_val, 2000000,
898 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
899 SYSCTL_ADD_U32(&rack_sysctl_ctx,
900 SYSCTL_CHILDREN(rack_probertt),
901 OID_AUTO, "sets_cwnd", CTLFLAG_RW,
902 &rack_probe_rtt_sets_cwnd, 0,
903 "Do we set the cwnd too (if always_lower is on)");
904 SYSCTL_ADD_U32(&rack_sysctl_ctx,
905 SYSCTL_CHILDREN(rack_probertt),
906 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
907 &rack_max_drain_wait, 2,
908 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
909 SYSCTL_ADD_U32(&rack_sysctl_ctx,
910 SYSCTL_CHILDREN(rack_probertt),
911 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
913 "We must drain this many gp_srtt's waiting for flight to reach goal");
914 SYSCTL_ADD_U32(&rack_sysctl_ctx,
915 SYSCTL_CHILDREN(rack_probertt),
916 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
917 &rack_probertt_use_min_rtt_entry, 1,
918 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
919 SYSCTL_ADD_U32(&rack_sysctl_ctx,
920 SYSCTL_CHILDREN(rack_probertt),
921 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
922 &rack_probertt_use_min_rtt_exit, 0,
923 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
924 SYSCTL_ADD_U32(&rack_sysctl_ctx,
925 SYSCTL_CHILDREN(rack_probertt),
926 OID_AUTO, "length_div", CTLFLAG_RW,
927 &rack_probertt_gpsrtt_cnt_div, 0,
928 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
929 SYSCTL_ADD_U32(&rack_sysctl_ctx,
930 SYSCTL_CHILDREN(rack_probertt),
931 OID_AUTO, "length_mul", CTLFLAG_RW,
932 &rack_probertt_gpsrtt_cnt_mul, 0,
933 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
934 SYSCTL_ADD_U32(&rack_sysctl_ctx,
935 SYSCTL_CHILDREN(rack_probertt),
936 OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
937 &rack_min_probertt_hold, 200000,
938 "What is the minimum time we hold probertt at target");
939 SYSCTL_ADD_U32(&rack_sysctl_ctx,
940 SYSCTL_CHILDREN(rack_probertt),
941 OID_AUTO, "filter_life", CTLFLAG_RW,
942 &rack_probertt_filter_life, 10000000,
943 "What is the time for the filters life in useconds");
944 SYSCTL_ADD_U32(&rack_sysctl_ctx,
945 SYSCTL_CHILDREN(rack_probertt),
946 OID_AUTO, "lower_within", CTLFLAG_RW,
947 &rack_probertt_lower_within, 10,
948 "If the rtt goes lower within this percentage of the time, go into probe-rtt");
949 SYSCTL_ADD_U32(&rack_sysctl_ctx,
950 SYSCTL_CHILDREN(rack_probertt),
951 OID_AUTO, "must_move", CTLFLAG_RW,
952 &rack_min_rtt_movement, 250,
953 "How much is the minimum movement in rtt to count as a drop for probertt purposes");
954 SYSCTL_ADD_U32(&rack_sysctl_ctx,
955 SYSCTL_CHILDREN(rack_probertt),
956 OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
957 &rack_probertt_clear_is, 1,
958 "Do we clear I/S counts on exiting probe-rtt");
959 SYSCTL_ADD_S32(&rack_sysctl_ctx,
960 SYSCTL_CHILDREN(rack_probertt),
961 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
962 &rack_max_drain_hbp, 1,
963 "How many extra drain gpsrtt's do we get in highly buffered paths");
964 SYSCTL_ADD_S32(&rack_sysctl_ctx,
965 SYSCTL_CHILDREN(rack_probertt),
966 OID_AUTO, "hbp_threshold", CTLFLAG_RW,
968 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
969 /* Pacing related sysctls */
970 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
971 SYSCTL_CHILDREN(rack_sysctl_root),
974 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
975 "Pacing related Controls");
976 SYSCTL_ADD_S32(&rack_sysctl_ctx,
977 SYSCTL_CHILDREN(rack_pacing),
978 OID_AUTO, "max_pace_over", CTLFLAG_RW,
979 &rack_max_per_above, 30,
980 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
981 SYSCTL_ADD_S32(&rack_sysctl_ctx,
982 SYSCTL_CHILDREN(rack_pacing),
983 OID_AUTO, "pace_to_one", CTLFLAG_RW,
984 &rack_pace_one_seg, 0,
985 "Do we allow low b/w pacing of 1MSS instead of two");
986 SYSCTL_ADD_S32(&rack_sysctl_ctx,
987 SYSCTL_CHILDREN(rack_pacing),
988 OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
989 &rack_limit_time_with_srtt, 0,
990 "Do we limit pacing time based on srtt");
991 SYSCTL_ADD_S32(&rack_sysctl_ctx,
992 SYSCTL_CHILDREN(rack_pacing),
993 OID_AUTO, "init_win", CTLFLAG_RW,
994 &rack_default_init_window, 0,
995 "Do we have a rack initial window 0 = system default");
996 SYSCTL_ADD_U16(&rack_sysctl_ctx,
997 SYSCTL_CHILDREN(rack_pacing),
998 OID_AUTO, "gp_per_ss", CTLFLAG_RW,
999 &rack_per_of_gp_ss, 250,
1000 "If non zero, what percentage of goodput to pace at in slow start");
1001 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1002 SYSCTL_CHILDREN(rack_pacing),
1003 OID_AUTO, "gp_per_ca", CTLFLAG_RW,
1004 &rack_per_of_gp_ca, 150,
1005 "If non zero, what percentage of goodput to pace at in congestion avoidance");
1006 SYSCTL_ADD_U16(&rack_sysctl_ctx,
1007 SYSCTL_CHILDREN(rack_pacing),
1008 OID_AUTO, "gp_per_rec", CTLFLAG_RW,
1009 &rack_per_of_gp_rec, 200,
1010 "If non zero, what percentage of goodput to pace at in recovery");
1011 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1012 SYSCTL_CHILDREN(rack_pacing),
1013 OID_AUTO, "pace_max_seg", CTLFLAG_RW,
1014 &rack_hptsi_segments, 40,
1015 "What size is the max for TSO segments in pacing and burst mitigation");
1016 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1017 SYSCTL_CHILDREN(rack_pacing),
1018 OID_AUTO, "burst_reduces", CTLFLAG_RW,
1019 &rack_slot_reduction, 4,
1020 "When doing only burst mitigation what is the reduce divisor");
1021 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1022 SYSCTL_CHILDREN(rack_sysctl_root),
1023 OID_AUTO, "use_pacing", CTLFLAG_RW,
1024 &rack_pace_every_seg, 0,
1025 "If set we use pacing, if clear we use only the original burst mitigation");
1026 SYSCTL_ADD_U64(&rack_sysctl_ctx,
1027 SYSCTL_CHILDREN(rack_pacing),
1028 OID_AUTO, "rate_cap", CTLFLAG_RW,
1029 &rack_bw_rate_cap, 0,
1030 "If set we apply this value to the absolute rate cap used by pacing");
1031 SYSCTL_ADD_U8(&rack_sysctl_ctx,
1032 SYSCTL_CHILDREN(rack_sysctl_root),
1033 OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
1034 &rack_req_measurements, 1,
1035 "If doing dynamic pacing, how many measurements must be in before we start pacing?");
1036 /* Hardware pacing */
1037 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1038 SYSCTL_CHILDREN(rack_sysctl_root),
1041 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1042 "Pacing related Controls");
1043 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1044 SYSCTL_CHILDREN(rack_hw_pacing),
1045 OID_AUTO, "rwnd_factor", CTLFLAG_RW,
1046 &rack_hw_rwnd_factor, 2,
1047 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
1048 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1049 SYSCTL_CHILDREN(rack_hw_pacing),
1050 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
1051 &rack_enobuf_hw_boost_mult, 2,
1052 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
1053 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1054 SYSCTL_CHILDREN(rack_hw_pacing),
1055 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
1056 &rack_enobuf_hw_max, 2,
1057 "What is the max boost the pacing time if we see a ENOBUFS?");
1058 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1059 SYSCTL_CHILDREN(rack_hw_pacing),
1060 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
1061 &rack_enobuf_hw_min, 2,
1062 "What is the min boost the pacing time if we see a ENOBUFS?");
1063 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1064 SYSCTL_CHILDREN(rack_hw_pacing),
1065 OID_AUTO, "enable", CTLFLAG_RW,
1066 &rack_enable_hw_pacing, 0,
1067 "Should RACK attempt to use hw pacing?");
1068 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1069 SYSCTL_CHILDREN(rack_hw_pacing),
1070 OID_AUTO, "rate_cap", CTLFLAG_RW,
1071 &rack_hw_rate_caps, 1,
1072 "Does the highest hardware pacing rate cap the rate we will send at??");
1073 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1074 SYSCTL_CHILDREN(rack_hw_pacing),
1075 OID_AUTO, "rate_min", CTLFLAG_RW,
1076 &rack_hw_rate_min, 0,
1077 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
1078 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1079 SYSCTL_CHILDREN(rack_hw_pacing),
1080 OID_AUTO, "rate_to_low", CTLFLAG_RW,
1081 &rack_hw_rate_to_low, 0,
1082 "If we fall below this rate, dis-engage hw pacing?");
1083 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1084 SYSCTL_CHILDREN(rack_hw_pacing),
1085 OID_AUTO, "up_only", CTLFLAG_RW,
1086 &rack_hw_up_only, 1,
1087 "Do we allow hw pacing to lower the rate selected?");
1088 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1089 SYSCTL_CHILDREN(rack_hw_pacing),
1090 OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
1091 &rack_hw_pace_extra_slots, 2,
1092 "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
1093 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1094 SYSCTL_CHILDREN(rack_sysctl_root),
1097 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1098 "Rack Timely RTT Controls");
1099 /* Timely based GP dynmics */
1100 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1101 SYSCTL_CHILDREN(rack_timely),
1102 OID_AUTO, "upper", CTLFLAG_RW,
1103 &rack_gp_per_bw_mul_up, 2,
1104 "Rack timely upper range for equal b/w (in percentage)");
1105 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1106 SYSCTL_CHILDREN(rack_timely),
1107 OID_AUTO, "lower", CTLFLAG_RW,
1108 &rack_gp_per_bw_mul_down, 4,
1109 "Rack timely lower range for equal b/w (in percentage)");
1110 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1111 SYSCTL_CHILDREN(rack_timely),
1112 OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
1113 &rack_gp_rtt_maxmul, 3,
1114 "Rack timely multipler of lowest rtt for rtt_max");
1115 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1116 SYSCTL_CHILDREN(rack_timely),
1117 OID_AUTO, "rtt_min_div", CTLFLAG_RW,
1118 &rack_gp_rtt_mindiv, 4,
1119 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
1120 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1121 SYSCTL_CHILDREN(rack_timely),
1122 OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
1123 &rack_gp_rtt_minmul, 1,
1124 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
1125 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1126 SYSCTL_CHILDREN(rack_timely),
1127 OID_AUTO, "decrease", CTLFLAG_RW,
1128 &rack_gp_decrease_per, 20,
1129 "Rack timely decrease percentage of our GP multiplication factor");
1130 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1131 SYSCTL_CHILDREN(rack_timely),
1132 OID_AUTO, "increase", CTLFLAG_RW,
1133 &rack_gp_increase_per, 2,
1134 "Rack timely increase perentage of our GP multiplication factor");
1135 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1136 SYSCTL_CHILDREN(rack_timely),
1137 OID_AUTO, "lowerbound", CTLFLAG_RW,
1138 &rack_per_lower_bound, 50,
1139 "Rack timely lowest percentage we allow GP multiplier to fall to");
1140 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1141 SYSCTL_CHILDREN(rack_timely),
1142 OID_AUTO, "upperboundss", CTLFLAG_RW,
1143 &rack_per_upper_bound_ss, 0,
1144 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
1145 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1146 SYSCTL_CHILDREN(rack_timely),
1147 OID_AUTO, "upperboundca", CTLFLAG_RW,
1148 &rack_per_upper_bound_ca, 0,
1149 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
1150 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1151 SYSCTL_CHILDREN(rack_timely),
1152 OID_AUTO, "dynamicgp", CTLFLAG_RW,
1153 &rack_do_dyn_mul, 0,
1154 "Rack timely do we enable dynmaic timely goodput by default");
1155 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1156 SYSCTL_CHILDREN(rack_timely),
1157 OID_AUTO, "no_rec_red", CTLFLAG_RW,
1158 &rack_gp_no_rec_chg, 1,
1159 "Rack timely do we prohibit the recovery multiplier from being lowered");
1160 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1161 SYSCTL_CHILDREN(rack_timely),
1162 OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
1163 &rack_timely_dec_clear, 6,
1164 "Rack timely what threshold do we count to before another boost during b/w decent");
1165 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1166 SYSCTL_CHILDREN(rack_timely),
1167 OID_AUTO, "max_push_rise", CTLFLAG_RW,
1168 &rack_timely_max_push_rise, 3,
1169 "Rack timely how many times do we push up with b/w increase");
1170 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1171 SYSCTL_CHILDREN(rack_timely),
1172 OID_AUTO, "max_push_drop", CTLFLAG_RW,
1173 &rack_timely_max_push_drop, 3,
1174 "Rack timely how many times do we push back on b/w decent");
1175 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1176 SYSCTL_CHILDREN(rack_timely),
1177 OID_AUTO, "min_segs", CTLFLAG_RW,
1178 &rack_timely_min_segs, 4,
1179 "Rack timely when setting the cwnd what is the min num segments");
1180 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1181 SYSCTL_CHILDREN(rack_timely),
1182 OID_AUTO, "noback_max", CTLFLAG_RW,
1183 &rack_use_max_for_nobackoff, 0,
1184 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
1185 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1186 SYSCTL_CHILDREN(rack_timely),
1187 OID_AUTO, "interim_timely_only", CTLFLAG_RW,
1188 &rack_timely_int_timely_only, 0,
1189 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
1190 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1191 SYSCTL_CHILDREN(rack_timely),
1192 OID_AUTO, "nonstop", CTLFLAG_RW,
1193 &rack_timely_no_stopping, 0,
1194 "Rack timely don't stop increase");
1195 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1196 SYSCTL_CHILDREN(rack_timely),
1197 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
1198 &rack_down_raise_thresh, 100,
1199 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
1200 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1201 SYSCTL_CHILDREN(rack_timely),
1202 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
1204 "Bottom dragging if not these many segments outstanding and room");
1206 /* TLP and Rack related parameters */
1207 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1208 SYSCTL_CHILDREN(rack_sysctl_root),
1211 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1212 "TLP and Rack related Controls");
1213 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1214 SYSCTL_CHILDREN(rack_tlp),
1215 OID_AUTO, "use_rrr", CTLFLAG_RW,
1217 "Do we use Rack Rapid Recovery");
1218 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1219 SYSCTL_CHILDREN(rack_tlp),
1220 OID_AUTO, "post_rec_labc", CTLFLAG_RW,
1221 &rack_max_abc_post_recovery, 2,
1222 "Since we do early recovery, do we override the l_abc to a value, if so what?");
1223 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1224 SYSCTL_CHILDREN(rack_tlp),
1225 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
1226 &rack_non_rxt_use_cr, 0,
1227 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
1228 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1229 SYSCTL_CHILDREN(rack_tlp),
1230 OID_AUTO, "tlpmethod", CTLFLAG_RW,
1231 &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
1232 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
1233 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1234 SYSCTL_CHILDREN(rack_tlp),
1235 OID_AUTO, "limit", CTLFLAG_RW,
1237 "How many TLP's can be sent without sending new data");
1238 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1239 SYSCTL_CHILDREN(rack_tlp),
1240 OID_AUTO, "use_greater", CTLFLAG_RW,
1241 &rack_tlp_use_greater, 1,
1242 "Should we use the rack_rtt time if its greater than srtt");
1243 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1244 SYSCTL_CHILDREN(rack_tlp),
1245 OID_AUTO, "tlpminto", CTLFLAG_RW,
1246 &rack_tlp_min, 10000,
1247 "TLP minimum timeout per the specification (in microseconds)");
1248 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1249 SYSCTL_CHILDREN(rack_tlp),
1250 OID_AUTO, "send_oldest", CTLFLAG_RW,
1251 &rack_always_send_oldest, 0,
1252 "Should we always send the oldest TLP and RACK-TLP");
1253 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1254 SYSCTL_CHILDREN(rack_tlp),
1255 OID_AUTO, "rack_tlimit", CTLFLAG_RW,
1256 &rack_limited_retran, 0,
1257 "How many times can a rack timeout drive out sends");
1258 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1259 SYSCTL_CHILDREN(rack_tlp),
1260 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
1261 &rack_lower_cwnd_at_tlp, 0,
1262 "When a TLP completes a retran should we enter recovery");
1263 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1264 SYSCTL_CHILDREN(rack_tlp),
1265 OID_AUTO, "reorder_thresh", CTLFLAG_RW,
1266 &rack_reorder_thresh, 2,
1267 "What factor for rack will be added when seeing reordering (shift right)");
1268 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1269 SYSCTL_CHILDREN(rack_tlp),
1270 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
1271 &rack_tlp_thresh, 1,
1272 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
1273 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1274 SYSCTL_CHILDREN(rack_tlp),
1275 OID_AUTO, "reorder_fade", CTLFLAG_RW,
1276 &rack_reorder_fade, 60000000,
1277 "Does reorder detection fade, if so how many microseconds (0 means never)");
1278 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1279 SYSCTL_CHILDREN(rack_tlp),
1280 OID_AUTO, "pktdelay", CTLFLAG_RW,
1281 &rack_pkt_delay, 1000,
1282 "Extra RACK time (in microseconds) besides reordering thresh");
1284 /* Timer related controls */
1285 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1286 SYSCTL_CHILDREN(rack_sysctl_root),
1289 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1290 "Timer related controls");
1291 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1292 SYSCTL_CHILDREN(rack_timers),
1293 OID_AUTO, "persmin", CTLFLAG_RW,
1294 &rack_persist_min, 250000,
1295 "What is the minimum time in microseconds between persists");
1296 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1297 SYSCTL_CHILDREN(rack_timers),
1298 OID_AUTO, "persmax", CTLFLAG_RW,
1299 &rack_persist_max, 2000000,
1300 "What is the largest delay in microseconds between persists");
1301 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1302 SYSCTL_CHILDREN(rack_timers),
1303 OID_AUTO, "delayed_ack", CTLFLAG_RW,
1304 &rack_delayed_ack_time, 40000,
1305 "Delayed ack time (40ms in microseconds)");
1306 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1307 SYSCTL_CHILDREN(rack_timers),
1308 OID_AUTO, "minrto", CTLFLAG_RW,
1309 &rack_rto_min, 30000,
1310 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
1311 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1312 SYSCTL_CHILDREN(rack_timers),
1313 OID_AUTO, "maxrto", CTLFLAG_RW,
1314 &rack_rto_max, 4000000,
1315 "Maxiumum RTO in microseconds -- should be at least as large as min_rto");
1316 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1317 SYSCTL_CHILDREN(rack_timers),
1318 OID_AUTO, "minto", CTLFLAG_RW,
1320 "Minimum rack timeout in microseconds");
1321 /* Measure controls */
1322 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1323 SYSCTL_CHILDREN(rack_sysctl_root),
1326 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1327 "Measure related controls");
1328 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1329 SYSCTL_CHILDREN(rack_measure),
1330 OID_AUTO, "wma_divisor", CTLFLAG_RW,
1331 &rack_wma_divisor, 8,
1332 "When doing b/w calculation what is the divisor for the WMA");
1333 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1334 SYSCTL_CHILDREN(rack_measure),
1335 OID_AUTO, "end_cwnd", CTLFLAG_RW,
1336 &rack_cwnd_block_ends_measure, 0,
1337 "Does a cwnd just-return end the measurement window (app limited)");
1338 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1339 SYSCTL_CHILDREN(rack_measure),
1340 OID_AUTO, "end_rwnd", CTLFLAG_RW,
1341 &rack_rwnd_block_ends_measure, 0,
1342 "Does an rwnd just-return end the measurement window (app limited -- not persists)");
1343 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1344 SYSCTL_CHILDREN(rack_measure),
1345 OID_AUTO, "min_target", CTLFLAG_RW,
1346 &rack_def_data_window, 20,
1347 "What is the minimum target window (in mss) for a GP measurements");
1348 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1349 SYSCTL_CHILDREN(rack_measure),
1350 OID_AUTO, "goal_bdp", CTLFLAG_RW,
1352 "What is the goal BDP to measure");
1353 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1354 SYSCTL_CHILDREN(rack_measure),
1355 OID_AUTO, "min_srtts", CTLFLAG_RW,
1357 "What is the goal BDP to measure");
1358 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1359 SYSCTL_CHILDREN(rack_measure),
1360 OID_AUTO, "min_measure_tim", CTLFLAG_RW,
1361 &rack_min_measure_usec, 0,
1362 "What is the Minimum time time for a measurement if 0, this is off");
1363 /* Misc rack controls */
1364 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
1365 SYSCTL_CHILDREN(rack_sysctl_root),
1368 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1369 "Misc related controls");
1370 #ifdef TCP_ACCOUNTING
1371 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1372 SYSCTL_CHILDREN(rack_misc),
1373 OID_AUTO, "tcp_acct", CTLFLAG_RW,
1374 &rack_tcp_accounting, 0,
1375 "Should we turn on TCP accounting for all rack sessions?");
1377 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1378 SYSCTL_CHILDREN(rack_misc),
1379 OID_AUTO, "prr_addback_max", CTLFLAG_RW,
1380 &rack_prr_addbackmax, 2,
1381 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
1382 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1383 SYSCTL_CHILDREN(rack_misc),
1384 OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
1385 &rack_stats_gets_ms_rtt, 1,
1386 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
1387 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1388 SYSCTL_CHILDREN(rack_misc),
1389 OID_AUTO, "clientlowbuf", CTLFLAG_RW,
1390 &rack_client_low_buf, 0,
1391 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
1392 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1393 SYSCTL_CHILDREN(rack_misc),
1394 OID_AUTO, "defprofile", CTLFLAG_RW,
1395 &rack_def_profile, 0,
1396 "Should RACK use a default profile (0=no, num == profile num)?");
1397 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1398 SYSCTL_CHILDREN(rack_misc),
1399 OID_AUTO, "cmpack", CTLFLAG_RW,
1400 &rack_use_cmp_acks, 1,
1401 "Should RACK have LRO send compressed acks");
1402 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1403 SYSCTL_CHILDREN(rack_misc),
1404 OID_AUTO, "fsb", CTLFLAG_RW,
1406 "Should RACK use the fast send block?");
1407 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1408 SYSCTL_CHILDREN(rack_misc),
1409 OID_AUTO, "rfo", CTLFLAG_RW,
1411 "Should RACK use rack_fast_output()?");
1412 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1413 SYSCTL_CHILDREN(rack_misc),
1414 OID_AUTO, "rsmrfo", CTLFLAG_RW,
1415 &rack_use_rsm_rfo, 1,
1416 "Should RACK use rack_fast_rsm_output()?");
1417 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1418 SYSCTL_CHILDREN(rack_misc),
1419 OID_AUTO, "shared_cwnd", CTLFLAG_RW,
1420 &rack_enable_shared_cwnd, 1,
1421 "Should RACK try to use the shared cwnd on connections where allowed");
1422 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1423 SYSCTL_CHILDREN(rack_misc),
1424 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
1425 &rack_limits_scwnd, 1,
1426 "Should RACK place low end time limits on the shared cwnd feature");
1427 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1428 SYSCTL_CHILDREN(rack_misc),
1429 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
1430 &rack_enable_mqueue_for_nonpaced, 0,
1431 "Should RACK use mbuf queuing for non-paced connections");
1432 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1433 SYSCTL_CHILDREN(rack_misc),
1434 OID_AUTO, "iMac_dack", CTLFLAG_RW,
1435 &rack_use_imac_dack, 0,
1436 "Should RACK try to emulate iMac delayed ack");
1437 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1438 SYSCTL_CHILDREN(rack_misc),
1439 OID_AUTO, "no_prr", CTLFLAG_RW,
1440 &rack_disable_prr, 0,
1441 "Should RACK not use prr and only pace (must have pacing on)");
1442 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1443 SYSCTL_CHILDREN(rack_misc),
1444 OID_AUTO, "bb_verbose", CTLFLAG_RW,
1445 &rack_verbose_logging, 0,
1446 "Should RACK black box logging be verbose");
1447 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1448 SYSCTL_CHILDREN(rack_misc),
1449 OID_AUTO, "data_after_close", CTLFLAG_RW,
1450 &rack_ignore_data_after_close, 1,
1451 "Do we hold off sending a RST until all pending data is ack'd");
1452 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1453 SYSCTL_CHILDREN(rack_misc),
1454 OID_AUTO, "no_sack_needed", CTLFLAG_RW,
1455 &rack_sack_not_required, 1,
1456 "Do we allow rack to run on connections not supporting SACK");
1457 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1458 SYSCTL_CHILDREN(rack_misc),
1459 OID_AUTO, "prr_sendalot", CTLFLAG_RW,
1460 &rack_send_a_lot_in_prr, 1,
1461 "Send a lot in prr");
1462 SYSCTL_ADD_S32(&rack_sysctl_ctx,
1463 SYSCTL_CHILDREN(rack_misc),
1464 OID_AUTO, "autoscale", CTLFLAG_RW,
1465 &rack_autosndbuf_inc, 20,
1466 "What percentage should rack scale up its snd buffer by?");
1467 /* Sack Attacker detection stuff */
1468 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1469 SYSCTL_CHILDREN(rack_attack),
1470 OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
1471 &rack_highest_sack_thresh_seen, 0,
1472 "Highest sack to ack ratio seen");
1473 SYSCTL_ADD_U32(&rack_sysctl_ctx,
1474 SYSCTL_CHILDREN(rack_attack),
1475 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
1476 &rack_highest_move_thresh_seen, 0,
1477 "Highest move to non-move ratio seen");
1478 rack_ack_total = counter_u64_alloc(M_WAITOK);
1479 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1480 SYSCTL_CHILDREN(rack_attack),
1481 OID_AUTO, "acktotal", CTLFLAG_RD,
1483 "Total number of Ack's");
1484 rack_express_sack = counter_u64_alloc(M_WAITOK);
1485 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1486 SYSCTL_CHILDREN(rack_attack),
1487 OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
1489 "Total expresss number of Sack's");
1490 rack_sack_total = counter_u64_alloc(M_WAITOK);
1491 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1492 SYSCTL_CHILDREN(rack_attack),
1493 OID_AUTO, "sacktotal", CTLFLAG_RD,
1495 "Total number of SACKs");
1496 rack_move_none = counter_u64_alloc(M_WAITOK);
1497 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1498 SYSCTL_CHILDREN(rack_attack),
1499 OID_AUTO, "move_none", CTLFLAG_RD,
1501 "Total number of SACK index reuse of postions under threshold");
1502 rack_move_some = counter_u64_alloc(M_WAITOK);
1503 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1504 SYSCTL_CHILDREN(rack_attack),
1505 OID_AUTO, "move_some", CTLFLAG_RD,
1507 "Total number of SACK index reuse of postions over threshold");
1508 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
1509 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1510 SYSCTL_CHILDREN(rack_attack),
1511 OID_AUTO, "attacks", CTLFLAG_RD,
1512 &rack_sack_attacks_detected,
1513 "Total number of SACK attackers that had sack disabled");
1514 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
1515 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1516 SYSCTL_CHILDREN(rack_attack),
1517 OID_AUTO, "reversed", CTLFLAG_RD,
1518 &rack_sack_attacks_reversed,
1519 "Total number of SACK attackers that were later determined false positive");
1520 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
1521 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1522 SYSCTL_CHILDREN(rack_attack),
1523 OID_AUTO, "nextmerge", CTLFLAG_RD,
1524 &rack_sack_used_next_merge,
1525 "Total number of times we used the next merge");
1526 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
1527 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1528 SYSCTL_CHILDREN(rack_attack),
1529 OID_AUTO, "prevmerge", CTLFLAG_RD,
1530 &rack_sack_used_prev_merge,
1531 "Total number of times we used the prev merge");
1533 rack_fto_send = counter_u64_alloc(M_WAITOK);
1534 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1535 SYSCTL_CHILDREN(rack_counters),
1536 OID_AUTO, "fto_send", CTLFLAG_RD,
1537 &rack_fto_send, "Total number of rack_fast_output sends");
1538 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
1539 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1540 SYSCTL_CHILDREN(rack_counters),
1541 OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
1542 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
1543 rack_nfto_resend = counter_u64_alloc(M_WAITOK);
1544 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1545 SYSCTL_CHILDREN(rack_counters),
1546 OID_AUTO, "nfto_resend", CTLFLAG_RD,
1547 &rack_nfto_resend, "Total number of rack_output retransmissions");
1548 rack_non_fto_send = counter_u64_alloc(M_WAITOK);
1549 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1550 SYSCTL_CHILDREN(rack_counters),
1551 OID_AUTO, "nfto_send", CTLFLAG_RD,
1552 &rack_non_fto_send, "Total number of rack_output first sends");
1553 rack_extended_rfo = counter_u64_alloc(M_WAITOK);
1554 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1555 SYSCTL_CHILDREN(rack_counters),
1556 OID_AUTO, "rfo_extended", CTLFLAG_RD,
1557 &rack_extended_rfo, "Total number of times we extended rfo");
1559 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
1560 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1561 SYSCTL_CHILDREN(rack_counters),
1562 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
1563 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
1564 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
1566 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1567 SYSCTL_CHILDREN(rack_counters),
1568 OID_AUTO, "hwpace_lost", CTLFLAG_RD,
1569 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
1573 rack_badfr = counter_u64_alloc(M_WAITOK);
1574 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1575 SYSCTL_CHILDREN(rack_counters),
1576 OID_AUTO, "badfr", CTLFLAG_RD,
1577 &rack_badfr, "Total number of bad FRs");
1578 rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
1579 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1580 SYSCTL_CHILDREN(rack_counters),
1581 OID_AUTO, "badfr_bytes", CTLFLAG_RD,
1582 &rack_badfr_bytes, "Total number of bad FRs");
1583 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
1584 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1585 SYSCTL_CHILDREN(rack_counters),
1586 OID_AUTO, "prrsndret", CTLFLAG_RD,
1587 &rack_rtm_prr_retran,
1588 "Total number of prr based retransmits");
1589 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
1590 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1591 SYSCTL_CHILDREN(rack_counters),
1592 OID_AUTO, "prrsndnew", CTLFLAG_RD,
1593 &rack_rtm_prr_newdata,
1594 "Total number of prr based new transmits");
1595 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
1596 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1597 SYSCTL_CHILDREN(rack_counters),
1598 OID_AUTO, "tsnf", CTLFLAG_RD,
1599 &rack_timestamp_mismatch,
1600 "Total number of timestamps that we could not find the reported ts");
1601 rack_find_high = counter_u64_alloc(M_WAITOK);
1602 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1603 SYSCTL_CHILDREN(rack_counters),
1604 OID_AUTO, "findhigh", CTLFLAG_RD,
1606 "Total number of FIN causing find-high");
1607 rack_reorder_seen = counter_u64_alloc(M_WAITOK);
1608 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1609 SYSCTL_CHILDREN(rack_counters),
1610 OID_AUTO, "reordering", CTLFLAG_RD,
1612 "Total number of times we added delay due to reordering");
1613 rack_tlp_tot = counter_u64_alloc(M_WAITOK);
1614 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1615 SYSCTL_CHILDREN(rack_counters),
1616 OID_AUTO, "tlp_to_total", CTLFLAG_RD,
1618 "Total number of tail loss probe expirations");
1619 rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
1620 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1621 SYSCTL_CHILDREN(rack_counters),
1622 OID_AUTO, "tlp_new", CTLFLAG_RD,
1624 "Total number of tail loss probe sending new data");
1625 rack_tlp_retran = counter_u64_alloc(M_WAITOK);
1626 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1627 SYSCTL_CHILDREN(rack_counters),
1628 OID_AUTO, "tlp_retran", CTLFLAG_RD,
1630 "Total number of tail loss probe sending retransmitted data");
1631 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
1632 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1633 SYSCTL_CHILDREN(rack_counters),
1634 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
1635 &rack_tlp_retran_bytes,
1636 "Total bytes of tail loss probe sending retransmitted data");
1637 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
1638 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1639 SYSCTL_CHILDREN(rack_counters),
1640 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
1641 &rack_tlp_retran_fail,
1642 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
1643 rack_to_tot = counter_u64_alloc(M_WAITOK);
1644 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1645 SYSCTL_CHILDREN(rack_counters),
1646 OID_AUTO, "rack_to_tot", CTLFLAG_RD,
1648 "Total number of times the rack to expired");
1649 rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
1650 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1651 SYSCTL_CHILDREN(rack_counters),
1652 OID_AUTO, "arm_rack", CTLFLAG_RD,
1654 "Total number of times the rack timer armed");
1655 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
1656 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1657 SYSCTL_CHILDREN(rack_counters),
1658 OID_AUTO, "arm_tlp", CTLFLAG_RD,
1660 "Total number of times the tlp timer armed");
1661 rack_calc_zero = counter_u64_alloc(M_WAITOK);
1662 rack_calc_nonzero = counter_u64_alloc(M_WAITOK);
1663 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1664 SYSCTL_CHILDREN(rack_counters),
1665 OID_AUTO, "calc_zero", CTLFLAG_RD,
1667 "Total number of times pacing time worked out to zero");
1668 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1669 SYSCTL_CHILDREN(rack_counters),
1670 OID_AUTO, "calc_nonzero", CTLFLAG_RD,
1672 "Total number of times pacing time worked out to non-zero");
1673 rack_paced_segments = counter_u64_alloc(M_WAITOK);
1674 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1675 SYSCTL_CHILDREN(rack_counters),
1676 OID_AUTO, "paced", CTLFLAG_RD,
1677 &rack_paced_segments,
1678 "Total number of times a segment send caused hptsi");
1679 rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
1680 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1681 SYSCTL_CHILDREN(rack_counters),
1682 OID_AUTO, "unpaced", CTLFLAG_RD,
1683 &rack_unpaced_segments,
1684 "Total number of times a segment did not cause hptsi");
1685 rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
1686 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1687 SYSCTL_CHILDREN(rack_counters),
1688 OID_AUTO, "saw_enobufs", CTLFLAG_RD,
1690 "Total number of times a sends returned enobuf for non-hdwr paced connections");
1691 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
1692 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1693 SYSCTL_CHILDREN(rack_counters),
1694 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
1695 &rack_saw_enobuf_hw,
1696 "Total number of times a send returned enobuf for hdwr paced connections");
1697 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
1698 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1699 SYSCTL_CHILDREN(rack_counters),
1700 OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
1701 &rack_saw_enetunreach,
1702 "Total number of times a send received a enetunreachable");
1703 rack_hot_alloc = counter_u64_alloc(M_WAITOK);
1704 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1705 SYSCTL_CHILDREN(rack_counters),
1706 OID_AUTO, "alloc_hot", CTLFLAG_RD,
1708 "Total allocations from the top of our list");
1709 rack_to_alloc = counter_u64_alloc(M_WAITOK);
1710 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1711 SYSCTL_CHILDREN(rack_counters),
1712 OID_AUTO, "allocs", CTLFLAG_RD,
1714 "Total allocations of tracking structures");
1715 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
1716 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1717 SYSCTL_CHILDREN(rack_counters),
1718 OID_AUTO, "allochard", CTLFLAG_RD,
1719 &rack_to_alloc_hard,
1720 "Total allocations done with sleeping the hard way");
1721 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
1722 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1723 SYSCTL_CHILDREN(rack_counters),
1724 OID_AUTO, "allocemerg", CTLFLAG_RD,
1725 &rack_to_alloc_emerg,
1726 "Total allocations done from emergency cache");
1727 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
1728 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1729 SYSCTL_CHILDREN(rack_counters),
1730 OID_AUTO, "alloc_limited", CTLFLAG_RD,
1731 &rack_to_alloc_limited,
1732 "Total allocations dropped due to limit");
1733 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
1734 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1735 SYSCTL_CHILDREN(rack_counters),
1736 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
1737 &rack_alloc_limited_conns,
1738 "Connections with allocations dropped due to limit");
1739 rack_split_limited = counter_u64_alloc(M_WAITOK);
1740 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1741 SYSCTL_CHILDREN(rack_counters),
1742 OID_AUTO, "split_limited", CTLFLAG_RD,
1743 &rack_split_limited,
1744 "Split allocations dropped due to limit");
1746 for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
1748 sprintf(name, "cmp_ack_cnt_%d", i);
1749 rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK);
1750 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1751 SYSCTL_CHILDREN(rack_counters),
1752 OID_AUTO, name, CTLFLAG_RD,
1753 &rack_proc_comp_ack[i],
1754 "Number of compressed acks we processed");
1756 rack_large_ackcmp = counter_u64_alloc(M_WAITOK);
1757 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1758 SYSCTL_CHILDREN(rack_counters),
1759 OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD,
1761 "Number of TCP connections with large mbuf's for compressed acks");
1762 rack_small_ackcmp = counter_u64_alloc(M_WAITOK);
1763 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1764 SYSCTL_CHILDREN(rack_counters),
1765 OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD,
1767 "Number of TCP connections with small mbuf's for compressed acks");
1769 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
1770 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1771 SYSCTL_CHILDREN(rack_counters),
1772 OID_AUTO, "map_adjust_req", CTLFLAG_RD,
1773 &rack_adjust_map_bw,
1774 "Number of times we hit the case where the sb went up and down on a sendmap entry");
1776 rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
1777 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1778 SYSCTL_CHILDREN(rack_counters),
1779 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
1780 &rack_multi_single_eq,
1781 "Number of compressed acks total represented");
1782 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
1783 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1784 SYSCTL_CHILDREN(rack_counters),
1785 OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
1786 &rack_proc_non_comp_ack,
1787 "Number of non compresseds acks that we processed");
1790 rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
1791 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1792 SYSCTL_CHILDREN(rack_counters),
1793 OID_AUTO, "sack_long", CTLFLAG_RD,
1794 &rack_sack_proc_all,
1795 "Total times we had to walk whole list for sack processing");
1796 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
1797 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1798 SYSCTL_CHILDREN(rack_counters),
1799 OID_AUTO, "sack_restart", CTLFLAG_RD,
1800 &rack_sack_proc_restart,
1801 "Total times we had to walk whole list due to a restart");
1802 rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
1803 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1804 SYSCTL_CHILDREN(rack_counters),
1805 OID_AUTO, "sack_short", CTLFLAG_RD,
1806 &rack_sack_proc_short,
1807 "Total times we took shortcut for sack processing");
1808 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
1809 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1810 SYSCTL_CHILDREN(rack_counters),
1811 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
1812 &rack_enter_tlp_calc,
1813 "Total times we called calc-tlp");
1814 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
1815 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1816 SYSCTL_CHILDREN(rack_counters),
1817 OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
1818 &rack_used_tlpmethod,
1819 "Total number of runt sacks");
1820 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
1821 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1822 SYSCTL_CHILDREN(rack_counters),
1823 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
1824 &rack_used_tlpmethod2,
1825 "Total number of times we hit TLP method 2");
1826 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
1827 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1828 SYSCTL_CHILDREN(rack_attack),
1829 OID_AUTO, "skipacked", CTLFLAG_RD,
1830 &rack_sack_skipped_acked,
1831 "Total number of times we skipped previously sacked");
1832 rack_sack_splits = counter_u64_alloc(M_WAITOK);
1833 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1834 SYSCTL_CHILDREN(rack_attack),
1835 OID_AUTO, "ofsplit", CTLFLAG_RD,
1837 "Total number of times we did the old fashion tree split");
1838 rack_progress_drops = counter_u64_alloc(M_WAITOK);
1839 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1840 SYSCTL_CHILDREN(rack_counters),
1841 OID_AUTO, "prog_drops", CTLFLAG_RD,
1842 &rack_progress_drops,
1843 "Total number of progress drops");
1844 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
1845 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1846 SYSCTL_CHILDREN(rack_counters),
1847 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
1848 &rack_input_idle_reduces,
1849 "Total number of idle reductions on input");
1850 rack_collapsed_win = counter_u64_alloc(M_WAITOK);
1851 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1852 SYSCTL_CHILDREN(rack_counters),
1853 OID_AUTO, "collapsed_win", CTLFLAG_RD,
1854 &rack_collapsed_win,
1855 "Total number of collapsed windows");
1856 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
1857 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1858 SYSCTL_CHILDREN(rack_counters),
1859 OID_AUTO, "tlp_nada", CTLFLAG_RD,
1860 &rack_tlp_does_nada,
1861 "Total number of nada tlp calls");
1862 rack_try_scwnd = counter_u64_alloc(M_WAITOK);
1863 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1864 SYSCTL_CHILDREN(rack_counters),
1865 OID_AUTO, "tried_scwnd", CTLFLAG_RD,
1867 "Total number of scwnd attempts");
1869 rack_per_timer_hole = counter_u64_alloc(M_WAITOK);
1870 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1871 SYSCTL_CHILDREN(rack_counters),
1872 OID_AUTO, "timer_hole", CTLFLAG_RD,
1873 &rack_per_timer_hole,
1874 "Total persists start in timer hole");
1876 rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK);
1877 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1878 SYSCTL_CHILDREN(rack_counters),
1879 OID_AUTO, "sndptr_wrong", CTLFLAG_RD,
1880 &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorret");
1881 rack_sbsndptr_right = counter_u64_alloc(M_WAITOK);
1882 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
1883 SYSCTL_CHILDREN(rack_counters),
1884 OID_AUTO, "sndptr_right", CTLFLAG_RD,
1885 &rack_sbsndptr_right, "Total number of times the saved sbsndptr was corret");
1887 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
1888 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1889 OID_AUTO, "outsize", CTLFLAG_RD,
1890 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
1891 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
1892 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
1893 OID_AUTO, "opts", CTLFLAG_RD,
1894 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
1895 SYSCTL_ADD_PROC(&rack_sysctl_ctx,
1896 SYSCTL_CHILDREN(rack_sysctl_root),
1897 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1898 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
1902 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
1904 if (SEQ_GEQ(b->r_start, a->r_start) &&
1905 SEQ_LT(b->r_start, a->r_end)) {
1907 * The entry b is within the
1909 * a -- |-------------|
1914 * b -- |-----------|
1917 } else if (SEQ_GEQ(b->r_start, a->r_end)) {
1919 * b falls as either the next
1920 * sequence block after a so a
1921 * is said to be smaller than b.
1931 * Whats left is where a is
1932 * larger than b. i.e:
1936 * b -- |--------------|
1941 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1942 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp);
1945 rc_init_window(struct tcp_rack *rack)
1949 if (rack->rc_init_win == 0) {
1951 * Nothing set by the user, use the system stack
1954 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
1956 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
1961 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
1963 if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
1964 return (rack->r_ctl.rc_fixed_pacing_rate_rec);
1965 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
1966 return (rack->r_ctl.rc_fixed_pacing_rate_ss);
1968 return (rack->r_ctl.rc_fixed_pacing_rate_ca);
1972 rack_get_bw(struct tcp_rack *rack)
1974 if (rack->use_fixed_rate) {
1975 /* Return the fixed pacing rate */
1976 return (rack_get_fixed_pacing_bw(rack));
1978 if (rack->r_ctl.gp_bw == 0) {
1980 * We have yet no b/w measurement,
1981 * if we have a user set initial bw
1982 * return it. If we don't have that and
1983 * we have an srtt, use the tcp IW (10) to
1984 * calculate a fictional b/w over the SRTT
1985 * which is more or less a guess. Note
1986 * we don't use our IW from rack on purpose
1987 * so if we have like IW=30, we are not
1988 * calculating a "huge" b/w.
1991 if (rack->r_ctl.init_rate)
1992 return (rack->r_ctl.init_rate);
1994 /* Has the user set a max peak rate? */
1995 #ifdef NETFLIX_PEAKRATE
1996 if (rack->rc_tp->t_maxpeakrate)
1997 return (rack->rc_tp->t_maxpeakrate);
1999 /* Ok lets come up with the IW guess, if we have a srtt */
2000 if (rack->rc_tp->t_srtt == 0) {
2002 * Go with old pacing method
2003 * i.e. burst mitigation only.
2007 /* Ok lets get the initial TCP win (not racks) */
2008 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
2009 srtt = (uint64_t)rack->rc_tp->t_srtt;
2010 bw *= (uint64_t)USECS_IN_SECOND;
2012 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
2013 bw = rack->r_ctl.bw_rate_cap;
2018 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
2019 /* Averaging is done, we can return the value */
2020 bw = rack->r_ctl.gp_bw;
2022 /* Still doing initial average must calculate */
2023 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements;
2025 #ifdef NETFLIX_PEAKRATE
2026 if ((rack->rc_tp->t_maxpeakrate) &&
2027 (bw > rack->rc_tp->t_maxpeakrate)) {
2028 /* The user has set a peak rate to pace at
2029 * don't allow us to pace faster than that.
2031 return (rack->rc_tp->t_maxpeakrate);
2034 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap))
2035 bw = rack->r_ctl.bw_rate_cap;
2041 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
2043 if (rack->use_fixed_rate) {
2045 } else if (rack->in_probe_rtt && (rsm == NULL))
2046 return (rack->r_ctl.rack_per_of_gp_probertt);
2047 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
2048 rack->r_ctl.rack_per_of_gp_rec)) {
2050 /* a retransmission always use the recovery rate */
2051 return (rack->r_ctl.rack_per_of_gp_rec);
2052 } else if (rack->rack_rec_nonrxt_use_cr) {
2053 /* Directed to use the configured rate */
2054 goto configured_rate;
2055 } else if (rack->rack_no_prr &&
2056 (rack->r_ctl.rack_per_of_gp_rec > 100)) {
2057 /* No PRR, lets just use the b/w estimate only */
2061 * Here we may have a non-retransmit but we
2062 * have no overrides, so just use the recovery
2063 * rate (prr is in effect).
2065 return (rack->r_ctl.rack_per_of_gp_rec);
2069 /* For the configured rate we look at our cwnd vs the ssthresh */
2070 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
2071 return (rack->r_ctl.rack_per_of_gp_ss);
2073 return (rack->r_ctl.rack_per_of_gp_ca);
2077 rack_log_hdwr_pacing(struct tcp_rack *rack,
2078 uint64_t rate, uint64_t hw_rate, int line,
2079 int error, uint16_t mod)
2081 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2082 union tcp_log_stackspecific log;
2084 const struct ifnet *ifp;
2086 memset(&log, 0, sizeof(log));
2087 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
2088 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
2089 if (rack->r_ctl.crte) {
2090 ifp = rack->r_ctl.crte->ptbl->rs_ifp;
2091 } else if (rack->rc_inp->inp_route.ro_nh &&
2092 rack->rc_inp->inp_route.ro_nh->nh_ifp) {
2093 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
2097 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff);
2098 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
2100 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2101 log.u_bbr.bw_inuse = rate;
2102 log.u_bbr.flex5 = line;
2103 log.u_bbr.flex6 = error;
2104 log.u_bbr.flex7 = mod;
2105 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
2106 log.u_bbr.flex8 = rack->use_fixed_rate;
2107 log.u_bbr.flex8 <<= 1;
2108 log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
2109 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
2110 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
2111 if (rack->r_ctl.crte)
2112 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
2114 log.u_bbr.cur_del_rate = 0;
2115 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
2116 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2117 &rack->rc_inp->inp_socket->so_rcv,
2118 &rack->rc_inp->inp_socket->so_snd,
2119 BBR_LOG_HDWR_PACE, 0,
2120 0, &log, false, &tv);
2125 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
2128 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
2130 uint64_t bw_est, high_rate;
2133 gain = (uint64_t)rack_get_output_gain(rack, rsm);
2135 bw_est /= (uint64_t)100;
2136 /* Never fall below the minimum (def 64kbps) */
2137 if (bw_est < RACK_MIN_BW)
2138 bw_est = RACK_MIN_BW;
2139 if (rack->r_rack_hw_rate_caps) {
2140 /* Rate caps are in place */
2141 if (rack->r_ctl.crte != NULL) {
2142 /* We have a hdwr rate already */
2143 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
2144 if (bw_est >= high_rate) {
2145 /* We are capping bw at the highest rate table entry */
2146 rack_log_hdwr_pacing(rack,
2147 bw_est, high_rate, __LINE__,
2153 } else if ((rack->rack_hdrw_pacing == 0) &&
2154 (rack->rack_hdw_pace_ena) &&
2155 (rack->rack_attempt_hdwr_pace == 0) &&
2156 (rack->rc_inp->inp_route.ro_nh != NULL) &&
2157 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
2159 * Special case, we have not yet attempted hardware
2160 * pacing, and yet we may, when we do, find out if we are
2161 * above the highest rate. We need to know the maxbw for the interface
2162 * in question (if it supports ratelimiting). We get back
2163 * a 0, if the interface is not found in the RL lists.
2165 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
2167 /* Yep, we have a rate is it above this rate? */
2168 if (bw_est > high_rate) {
2180 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2182 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2183 union tcp_log_stackspecific log;
2186 if ((mod != 1) && (rack_verbose_logging == 0)) {
2188 * We get 3 values currently for mod
2189 * 1 - We are retransmitting and this tells the reason.
2190 * 2 - We are clearing a dup-ack count.
2191 * 3 - We are incrementing a dup-ack count.
2193 * The clear/increment are only logged
2194 * if you have BBverbose on.
2198 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2199 log.u_bbr.flex1 = tsused;
2200 log.u_bbr.flex2 = thresh;
2201 log.u_bbr.flex3 = rsm->r_flags;
2202 log.u_bbr.flex4 = rsm->r_dupack;
2203 log.u_bbr.flex5 = rsm->r_start;
2204 log.u_bbr.flex6 = rsm->r_end;
2205 log.u_bbr.flex8 = mod;
2206 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2207 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2208 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2209 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2210 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2211 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2212 log.u_bbr.pacing_gain = rack->r_must_retran;
2213 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2214 &rack->rc_inp->inp_socket->so_rcv,
2215 &rack->rc_inp->inp_socket->so_snd,
2216 BBR_LOG_SETTINGS_CHG, 0,
2217 0, &log, false, &tv);
2222 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
2224 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2225 union tcp_log_stackspecific log;
2228 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2229 log.u_bbr.flex1 = rack->rc_tp->t_srtt;
2230 log.u_bbr.flex2 = to;
2231 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
2232 log.u_bbr.flex4 = slot;
2233 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
2234 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2235 log.u_bbr.flex7 = rack->rc_in_persist;
2236 log.u_bbr.flex8 = which;
2237 if (rack->rack_no_prr)
2238 log.u_bbr.pkts_out = 0;
2240 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
2241 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2242 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2243 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2244 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2245 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2246 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2247 log.u_bbr.pacing_gain = rack->r_must_retran;
2248 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
2249 log.u_bbr.lost = rack_rto_min;
2250 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2251 &rack->rc_inp->inp_socket->so_rcv,
2252 &rack->rc_inp->inp_socket->so_snd,
2253 BBR_LOG_TIMERSTAR, 0,
2254 0, &log, false, &tv);
2259 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2261 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2262 union tcp_log_stackspecific log;
2265 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2266 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2267 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2268 log.u_bbr.flex8 = to_num;
2269 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
2270 log.u_bbr.flex2 = rack->rc_rack_rtt;
2272 log.u_bbr.flex3 = 0;
2274 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2275 if (rack->rack_no_prr)
2276 log.u_bbr.flex5 = 0;
2278 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2279 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2280 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2281 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2282 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2283 log.u_bbr.pacing_gain = rack->r_must_retran;
2284 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2285 &rack->rc_inp->inp_socket->so_rcv,
2286 &rack->rc_inp->inp_socket->so_snd,
2288 0, &log, false, &tv);
2293 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
2294 struct rack_sendmap *prev,
2295 struct rack_sendmap *rsm,
2296 struct rack_sendmap *next,
2297 int flag, uint32_t th_ack, int line)
2299 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2300 union tcp_log_stackspecific log;
2303 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2304 log.u_bbr.flex8 = flag;
2305 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2306 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2307 log.u_bbr.cur_del_rate = (uint64_t)prev;
2308 log.u_bbr.delRate = (uint64_t)rsm;
2309 log.u_bbr.rttProp = (uint64_t)next;
2310 log.u_bbr.flex7 = 0;
2312 log.u_bbr.flex1 = prev->r_start;
2313 log.u_bbr.flex2 = prev->r_end;
2314 log.u_bbr.flex7 |= 0x4;
2317 log.u_bbr.flex3 = rsm->r_start;
2318 log.u_bbr.flex4 = rsm->r_end;
2319 log.u_bbr.flex7 |= 0x2;
2322 log.u_bbr.flex5 = next->r_start;
2323 log.u_bbr.flex6 = next->r_end;
2324 log.u_bbr.flex7 |= 0x1;
2326 log.u_bbr.applimited = line;
2327 log.u_bbr.pkts_out = th_ack;
2328 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2329 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2330 if (rack->rack_no_prr)
2333 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
2334 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2335 &rack->rc_inp->inp_socket->so_rcv,
2336 &rack->rc_inp->inp_socket->so_snd,
2338 0, &log, false, &tv);
2343 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
2344 struct rack_sendmap *rsm, int conf)
2346 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2347 union tcp_log_stackspecific log;
2349 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2350 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2351 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2352 log.u_bbr.flex1 = t;
2353 log.u_bbr.flex2 = len;
2354 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
2355 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
2356 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
2357 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2358 log.u_bbr.flex7 = conf;
2359 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
2360 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
2361 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2362 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
2363 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
2364 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2366 log.u_bbr.pkt_epoch = rsm->r_start;
2367 log.u_bbr.lost = rsm->r_end;
2368 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2369 log.u_bbr.pacing_gain = rsm->r_flags;
2372 log.u_bbr.pkt_epoch = rack->rc_tp->iss;
2374 log.u_bbr.cwnd_gain = 0;
2375 log.u_bbr.pacing_gain = 0;
2377 /* Write out general bits of interest rrs here */
2378 log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
2379 log.u_bbr.use_lt_bw <<= 1;
2380 log.u_bbr.use_lt_bw |= rack->forced_ack;
2381 log.u_bbr.use_lt_bw <<= 1;
2382 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
2383 log.u_bbr.use_lt_bw <<= 1;
2384 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
2385 log.u_bbr.use_lt_bw <<= 1;
2386 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
2387 log.u_bbr.use_lt_bw <<= 1;
2388 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
2389 log.u_bbr.use_lt_bw <<= 1;
2390 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
2391 log.u_bbr.use_lt_bw <<= 1;
2392 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
2393 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
2394 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
2395 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
2396 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
2397 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
2398 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
2399 log.u_bbr.bw_inuse <<= 32;
2401 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
2402 TCP_LOG_EVENTP(tp, NULL,
2403 &rack->rc_inp->inp_socket->so_rcv,
2404 &rack->rc_inp->inp_socket->so_snd,
2406 0, &log, false, &tv);
2413 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
2416 * Log the rtt sample we are
2417 * applying to the srtt algorithm in
2420 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2421 union tcp_log_stackspecific log;
2424 /* Convert our ms to a microsecond */
2425 memset(&log, 0, sizeof(log));
2426 log.u_bbr.flex1 = rtt;
2427 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2428 log.u_bbr.flex3 = rack->r_ctl.sack_count;
2429 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2430 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
2431 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2432 log.u_bbr.flex7 = 1;
2433 log.u_bbr.flex8 = rack->sack_attack_disable;
2434 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2435 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2436 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2437 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2438 log.u_bbr.pacing_gain = rack->r_must_retran;
2440 * We capture in delRate the upper 32 bits as
2441 * the confidence level we had declared, and the
2442 * lower 32 bits as the actual RTT using the arrival
2445 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
2446 log.u_bbr.delRate <<= 32;
2447 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
2448 /* Lets capture all the things that make up t_rtxcur */
2449 log.u_bbr.applimited = rack_rto_min;
2450 log.u_bbr.epoch = rack_rto_max;
2451 log.u_bbr.lt_epoch = rtt;
2452 log.u_bbr.lost = rack_rto_min;
2453 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
2454 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
2455 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
2456 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
2457 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
2458 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2459 &rack->rc_inp->inp_socket->so_rcv,
2460 &rack->rc_inp->inp_socket->so_snd,
2462 0, &log, false, &tv);
2467 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
2469 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
2470 union tcp_log_stackspecific log;
2473 /* Convert our ms to a microsecond */
2474 memset(&log, 0, sizeof(log));
2475 log.u_bbr.flex1 = rtt;
2476 log.u_bbr.flex2 = send_time;
2477 log.u_bbr.flex3 = ack_time;
2478 log.u_bbr.flex4 = where;
2479 log.u_bbr.flex7 = 2;
2480 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2481 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2482 &rack->rc_inp->inp_socket->so_rcv,
2483 &rack->rc_inp->inp_socket->so_snd,
2485 0, &log, false, &tv);
2492 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line)
2494 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
2495 union tcp_log_stackspecific log;
2498 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2499 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2500 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2501 log.u_bbr.flex1 = line;
2502 log.u_bbr.flex2 = tick;
2503 log.u_bbr.flex3 = tp->t_maxunacktime;
2504 log.u_bbr.flex4 = tp->t_acktime;
2505 log.u_bbr.flex8 = event;
2506 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2507 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2508 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2509 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2510 log.u_bbr.pacing_gain = rack->r_must_retran;
2511 TCP_LOG_EVENTP(tp, NULL,
2512 &rack->rc_inp->inp_socket->so_rcv,
2513 &rack->rc_inp->inp_socket->so_snd,
2514 BBR_LOG_PROGRESS, 0,
2515 0, &log, false, &tv);
2520 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv)
2522 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2523 union tcp_log_stackspecific log;
2525 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2526 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2527 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2528 log.u_bbr.flex1 = slot;
2529 if (rack->rack_no_prr)
2530 log.u_bbr.flex2 = 0;
2532 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
2533 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
2534 log.u_bbr.flex8 = rack->rc_in_persist;
2535 log.u_bbr.timeStamp = cts;
2536 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2537 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2538 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2539 log.u_bbr.pacing_gain = rack->r_must_retran;
2540 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2541 &rack->rc_inp->inp_socket->so_rcv,
2542 &rack->rc_inp->inp_socket->so_snd,
2544 0, &log, false, tv);
2549 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
2551 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2552 union tcp_log_stackspecific log;
2555 memset(&log, 0, sizeof(log));
2556 log.u_bbr.flex1 = did_out;
2557 log.u_bbr.flex2 = nxt_pkt;
2558 log.u_bbr.flex3 = way_out;
2559 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2560 if (rack->rack_no_prr)
2561 log.u_bbr.flex5 = 0;
2563 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2564 log.u_bbr.flex6 = nsegs;
2565 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
2566 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */
2567 log.u_bbr.flex7 <<= 1;
2568 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */
2569 log.u_bbr.flex7 <<= 1;
2570 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */
2571 log.u_bbr.flex8 = rack->rc_in_persist;
2572 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2573 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2574 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2575 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2576 log.u_bbr.use_lt_bw <<= 1;
2577 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2578 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2579 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2580 log.u_bbr.pacing_gain = rack->r_must_retran;
2581 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2582 &rack->rc_inp->inp_socket->so_rcv,
2583 &rack->rc_inp->inp_socket->so_snd,
2584 BBR_LOG_DOSEG_DONE, 0,
2585 0, &log, false, &tv);
2590 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
2592 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2593 union tcp_log_stackspecific log;
2597 memset(&log, 0, sizeof(log));
2598 cts = tcp_get_usecs(&tv);
2599 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
2600 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
2601 log.u_bbr.flex4 = arg1;
2602 log.u_bbr.flex5 = arg2;
2603 log.u_bbr.flex6 = arg3;
2604 log.u_bbr.flex8 = frm;
2605 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2606 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2607 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2608 log.u_bbr.applimited = rack->r_ctl.rc_sacked;
2609 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2610 log.u_bbr.pacing_gain = rack->r_must_retran;
2611 TCP_LOG_EVENTP(tp, NULL,
2612 &tp->t_inpcb->inp_socket->so_rcv,
2613 &tp->t_inpcb->inp_socket->so_snd,
2614 TCP_HDWR_PACE_SIZE, 0,
2615 0, &log, false, &tv);
2620 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
2621 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
2623 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2624 union tcp_log_stackspecific log;
2627 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2628 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2629 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2630 log.u_bbr.flex1 = slot;
2631 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
2632 log.u_bbr.flex4 = reason;
2633 if (rack->rack_no_prr)
2634 log.u_bbr.flex5 = 0;
2636 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2637 log.u_bbr.flex7 = hpts_calling;
2638 log.u_bbr.flex8 = rack->rc_in_persist;
2639 log.u_bbr.lt_epoch = cwnd_to_use;
2640 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2641 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2642 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2643 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2644 log.u_bbr.pacing_gain = rack->r_must_retran;
2645 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2646 &rack->rc_inp->inp_socket->so_rcv,
2647 &rack->rc_inp->inp_socket->so_snd,
2649 tlen, &log, false, &tv);
2654 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
2655 struct timeval *tv, uint32_t flags_on_entry)
2657 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2658 union tcp_log_stackspecific log;
2660 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2661 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
2662 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
2663 log.u_bbr.flex1 = line;
2664 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
2665 log.u_bbr.flex3 = flags_on_entry;
2666 log.u_bbr.flex4 = us_cts;
2667 if (rack->rack_no_prr)
2668 log.u_bbr.flex5 = 0;
2670 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
2671 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
2672 log.u_bbr.flex7 = hpts_removed;
2673 log.u_bbr.flex8 = 1;
2674 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
2675 log.u_bbr.timeStamp = us_cts;
2676 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2677 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2678 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2679 log.u_bbr.pacing_gain = rack->r_must_retran;
2680 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2681 &rack->rc_inp->inp_socket->so_rcv,
2682 &rack->rc_inp->inp_socket->so_snd,
2683 BBR_LOG_TIMERCANC, 0,
2684 0, &log, false, tv);
2689 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
2690 uint32_t flex1, uint32_t flex2,
2691 uint32_t flex3, uint32_t flex4,
2692 uint32_t flex5, uint32_t flex6,
2693 uint16_t flex7, uint8_t mod)
2695 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2696 union tcp_log_stackspecific log;
2700 /* No you can't use 1, its for the real to cancel */
2703 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2704 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2705 log.u_bbr.flex1 = flex1;
2706 log.u_bbr.flex2 = flex2;
2707 log.u_bbr.flex3 = flex3;
2708 log.u_bbr.flex4 = flex4;
2709 log.u_bbr.flex5 = flex5;
2710 log.u_bbr.flex6 = flex6;
2711 log.u_bbr.flex7 = flex7;
2712 log.u_bbr.flex8 = mod;
2713 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2714 &rack->rc_inp->inp_socket->so_rcv,
2715 &rack->rc_inp->inp_socket->so_snd,
2716 BBR_LOG_TIMERCANC, 0,
2717 0, &log, false, &tv);
2722 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
2724 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2725 union tcp_log_stackspecific log;
2728 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2729 log.u_bbr.flex1 = timers;
2730 log.u_bbr.flex2 = ret;
2731 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
2732 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
2733 log.u_bbr.flex5 = cts;
2734 if (rack->rack_no_prr)
2735 log.u_bbr.flex6 = 0;
2737 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
2738 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
2739 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
2740 log.u_bbr.pacing_gain = rack->r_must_retran;
2741 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2742 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2743 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2744 &rack->rc_inp->inp_socket->so_rcv,
2745 &rack->rc_inp->inp_socket->so_snd,
2746 BBR_LOG_TO_PROCESS, 0,
2747 0, &log, false, &tv);
2752 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd)
2754 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2755 union tcp_log_stackspecific log;
2758 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2759 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
2760 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
2761 if (rack->rack_no_prr)
2762 log.u_bbr.flex3 = 0;
2764 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
2765 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
2766 log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
2767 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
2768 log.u_bbr.flex8 = frm;
2769 log.u_bbr.pkts_out = orig_cwnd;
2770 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2771 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2772 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
2773 log.u_bbr.use_lt_bw <<= 1;
2774 log.u_bbr.use_lt_bw |= rack->r_might_revert;
2775 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2776 &rack->rc_inp->inp_socket->so_rcv,
2777 &rack->rc_inp->inp_socket->so_snd,
2779 0, &log, false, &tv);
2783 #ifdef NETFLIX_EXP_DETECTION
2785 rack_log_sad(struct tcp_rack *rack, int event)
2787 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
2788 union tcp_log_stackspecific log;
2791 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
2792 log.u_bbr.flex1 = rack->r_ctl.sack_count;
2793 log.u_bbr.flex2 = rack->r_ctl.ack_count;
2794 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
2795 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
2796 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
2797 log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
2798 log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
2799 log.u_bbr.lt_epoch = (tcp_force_detection << 8);
2800 log.u_bbr.lt_epoch |= rack->do_detection;
2801 log.u_bbr.applimited = tcp_map_minimum;
2802 log.u_bbr.flex7 = rack->sack_attack_disable;
2803 log.u_bbr.flex8 = event;
2804 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
2805 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
2806 log.u_bbr.delivered = tcp_sad_decay_val;
2807 TCP_LOG_EVENTP(rack->rc_tp, NULL,
2808 &rack->rc_inp->inp_socket->so_rcv,
2809 &rack->rc_inp->inp_socket->so_snd,
2810 TCP_SAD_DETECTION, 0,
2811 0, &log, false, &tv);
2817 rack_counter_destroy(void)
2821 counter_u64_free(rack_fto_send);
2822 counter_u64_free(rack_fto_rsm_send);
2823 counter_u64_free(rack_nfto_resend);
2824 counter_u64_free(rack_hw_pace_init_fail);
2825 counter_u64_free(rack_hw_pace_lost);
2826 counter_u64_free(rack_non_fto_send);
2827 counter_u64_free(rack_extended_rfo);
2828 counter_u64_free(rack_ack_total);
2829 counter_u64_free(rack_express_sack);
2830 counter_u64_free(rack_sack_total);
2831 counter_u64_free(rack_move_none);
2832 counter_u64_free(rack_move_some);
2833 counter_u64_free(rack_sack_attacks_detected);
2834 counter_u64_free(rack_sack_attacks_reversed);
2835 counter_u64_free(rack_sack_used_next_merge);
2836 counter_u64_free(rack_sack_used_prev_merge);
2837 counter_u64_free(rack_badfr);
2838 counter_u64_free(rack_badfr_bytes);
2839 counter_u64_free(rack_rtm_prr_retran);
2840 counter_u64_free(rack_rtm_prr_newdata);
2841 counter_u64_free(rack_timestamp_mismatch);
2842 counter_u64_free(rack_find_high);
2843 counter_u64_free(rack_reorder_seen);
2844 counter_u64_free(rack_tlp_tot);
2845 counter_u64_free(rack_tlp_newdata);
2846 counter_u64_free(rack_tlp_retran);
2847 counter_u64_free(rack_tlp_retran_bytes);
2848 counter_u64_free(rack_tlp_retran_fail);
2849 counter_u64_free(rack_to_tot);
2850 counter_u64_free(rack_to_arm_rack);
2851 counter_u64_free(rack_to_arm_tlp);
2852 counter_u64_free(rack_calc_zero);
2853 counter_u64_free(rack_calc_nonzero);
2854 counter_u64_free(rack_paced_segments);
2855 counter_u64_free(rack_unpaced_segments);
2856 counter_u64_free(rack_saw_enobuf);
2857 counter_u64_free(rack_saw_enobuf_hw);
2858 counter_u64_free(rack_saw_enetunreach);
2859 counter_u64_free(rack_hot_alloc);
2860 counter_u64_free(rack_to_alloc);
2861 counter_u64_free(rack_to_alloc_hard);
2862 counter_u64_free(rack_to_alloc_emerg);
2863 counter_u64_free(rack_to_alloc_limited);
2864 counter_u64_free(rack_alloc_limited_conns);
2865 counter_u64_free(rack_split_limited);
2866 for (i = 0; i < MAX_NUM_OF_CNTS; i++) {
2867 counter_u64_free(rack_proc_comp_ack[i]);
2869 counter_u64_free(rack_multi_single_eq);
2870 counter_u64_free(rack_proc_non_comp_ack);
2871 counter_u64_free(rack_sack_proc_all);
2872 counter_u64_free(rack_sack_proc_restart);
2873 counter_u64_free(rack_sack_proc_short);
2874 counter_u64_free(rack_enter_tlp_calc);
2875 counter_u64_free(rack_used_tlpmethod);
2876 counter_u64_free(rack_used_tlpmethod2);
2877 counter_u64_free(rack_sack_skipped_acked);
2878 counter_u64_free(rack_sack_splits);
2879 counter_u64_free(rack_progress_drops);
2880 counter_u64_free(rack_input_idle_reduces);
2881 counter_u64_free(rack_collapsed_win);
2882 counter_u64_free(rack_tlp_does_nada);
2883 counter_u64_free(rack_try_scwnd);
2884 counter_u64_free(rack_per_timer_hole);
2885 counter_u64_free(rack_large_ackcmp);
2886 counter_u64_free(rack_small_ackcmp);
2888 counter_u64_free(rack_adjust_map_bw);
2890 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
2891 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
2894 static struct rack_sendmap *
2895 rack_alloc(struct tcp_rack *rack)
2897 struct rack_sendmap *rsm;
2900 * First get the top of the list it in
2901 * theory is the "hottest" rsm we have,
2902 * possibly just freed by ack processing.
2904 if (rack->rc_free_cnt > rack_free_cache) {
2905 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2906 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2907 counter_u64_add(rack_hot_alloc, 1);
2908 rack->rc_free_cnt--;
2912 * Once we get under our free cache we probably
2913 * no longer have a "hot" one available. Lets
2916 rsm = uma_zalloc(rack_zone, M_NOWAIT);
2918 rack->r_ctl.rc_num_maps_alloced++;
2919 counter_u64_add(rack_to_alloc, 1);
2923 * Dig in to our aux rsm's (the last two) since
2924 * UMA failed to get us one.
2926 if (rack->rc_free_cnt) {
2927 counter_u64_add(rack_to_alloc_emerg, 1);
2928 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
2929 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
2930 rack->rc_free_cnt--;
2936 static struct rack_sendmap *
2937 rack_alloc_full_limit(struct tcp_rack *rack)
2939 if ((V_tcp_map_entries_limit > 0) &&
2940 (rack->do_detection == 0) &&
2941 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
2942 counter_u64_add(rack_to_alloc_limited, 1);
2943 if (!rack->alloc_limit_reported) {
2944 rack->alloc_limit_reported = 1;
2945 counter_u64_add(rack_alloc_limited_conns, 1);
2949 return (rack_alloc(rack));
2952 /* wrapper to allocate a sendmap entry, subject to a specific limit */
2953 static struct rack_sendmap *
2954 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
2956 struct rack_sendmap *rsm;
2959 /* currently there is only one limit type */
2960 if (V_tcp_map_split_limit > 0 &&
2961 (rack->do_detection == 0) &&
2962 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
2963 counter_u64_add(rack_split_limited, 1);
2964 if (!rack->alloc_limit_reported) {
2965 rack->alloc_limit_reported = 1;
2966 counter_u64_add(rack_alloc_limited_conns, 1);
2972 /* allocate and mark in the limit type, if set */
2973 rsm = rack_alloc(rack);
2974 if (rsm != NULL && limit_type) {
2975 rsm->r_limit_type = limit_type;
2976 rack->r_ctl.rc_num_split_allocs++;
2982 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
2984 if (rsm->r_flags & RACK_APP_LIMITED) {
2985 if (rack->r_ctl.rc_app_limited_cnt > 0) {
2986 rack->r_ctl.rc_app_limited_cnt--;
2989 if (rsm->r_limit_type) {
2990 /* currently there is only one limit type */
2991 rack->r_ctl.rc_num_split_allocs--;
2993 if (rsm == rack->r_ctl.rc_first_appl) {
2994 if (rack->r_ctl.rc_app_limited_cnt == 0)
2995 rack->r_ctl.rc_first_appl = NULL;
2997 /* Follow the next one out */
2998 struct rack_sendmap fe;
3000 fe.r_start = rsm->r_nseq_appl;
3001 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
3004 if (rsm == rack->r_ctl.rc_resend)
3005 rack->r_ctl.rc_resend = NULL;
3006 if (rsm == rack->r_ctl.rc_rsm_at_retran)
3007 rack->r_ctl.rc_rsm_at_retran = NULL;
3008 if (rsm == rack->r_ctl.rc_end_appl)
3009 rack->r_ctl.rc_end_appl = NULL;
3010 if (rack->r_ctl.rc_tlpsend == rsm)
3011 rack->r_ctl.rc_tlpsend = NULL;
3012 if (rack->r_ctl.rc_sacklast == rsm)
3013 rack->r_ctl.rc_sacklast = NULL;
3014 memset(rsm, 0, sizeof(struct rack_sendmap));
3015 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
3016 rack->rc_free_cnt++;
3020 rack_free_trim(struct tcp_rack *rack)
3022 struct rack_sendmap *rsm;
3025 * Free up all the tail entries until
3026 * we get our list down to the limit.
3028 while (rack->rc_free_cnt > rack_free_cache) {
3029 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
3030 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3031 rack->rc_free_cnt--;
3032 uma_zfree(rack_zone, rsm);
3038 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
3040 uint64_t srtt, bw, len, tim;
3041 uint32_t segsiz, def_len, minl;
3043 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3044 def_len = rack_def_data_window * segsiz;
3045 if (rack->rc_gp_filled == 0) {
3047 * We have no measurement (IW is in flight?) so
3048 * we can only guess using our data_window sysctl
3049 * value (usually 100MSS).
3054 * Now we have a number of factors to consider.
3056 * 1) We have a desired BDP which is usually
3058 * 2) We have a minimum number of rtt's usually 1 SRTT
3059 * but we allow it too to be more.
3060 * 3) We want to make sure a measurement last N useconds (if
3061 * we have set rack_min_measure_usec.
3063 * We handle the first concern here by trying to create a data
3064 * window of max(rack_def_data_window, DesiredBDP). The
3065 * second concern we handle in not letting the measurement
3066 * window end normally until at least the required SRTT's
3067 * have gone by which is done further below in
3068 * rack_enough_for_measurement(). Finally the third concern
3069 * we also handle here by calculating how long that time
3070 * would take at the current BW and then return the
3071 * max of our first calculation and that length. Note
3072 * that if rack_min_measure_usec is 0, we don't deal
3073 * with concern 3. Also for both Concern 1 and 3 an
3074 * application limited period could end the measurement
3077 * So lets calculate the BDP with the "known" b/w using
3078 * the SRTT has our rtt and then multiply it by the
3081 bw = rack_get_bw(rack);
3082 srtt = (uint64_t)tp->t_srtt;
3084 len /= (uint64_t)HPTS_USEC_IN_SEC;
3085 len *= max(1, rack_goal_bdp);
3086 /* Now we need to round up to the nearest MSS */
3087 len = roundup(len, segsiz);
3088 if (rack_min_measure_usec) {
3089 /* Now calculate our min length for this b/w */
3090 tim = rack_min_measure_usec;
3091 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
3094 minl = roundup(minl, segsiz);
3099 * Now if we have a very small window we want
3100 * to attempt to get the window that is
3101 * as small as possible. This happens on
3102 * low b/w connections and we don't want to
3103 * span huge numbers of rtt's between measurements.
3105 * We basically include 2 over our "MIN window" so
3106 * that the measurement can be shortened (possibly) by
3110 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
3112 return (max((uint32_t)len, def_len));
3117 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack)
3119 uint32_t tim, srtts, segsiz;
3122 * Has enough time passed for the GP measurement to be valid?
3124 if ((tp->snd_max == tp->snd_una) ||
3125 (th_ack == tp->snd_max)){
3129 if (SEQ_LT(th_ack, tp->gput_seq)) {
3130 /* Not enough bytes yet */
3133 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
3134 if (SEQ_LT(th_ack, tp->gput_ack) &&
3135 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
3136 /* Not enough bytes yet */
3139 if (rack->r_ctl.rc_first_appl &&
3140 (rack->r_ctl.rc_first_appl->r_start == th_ack)) {
3142 * We are up to the app limited point
3143 * we have to measure irrespective of the time..
3147 /* Now what about time? */
3148 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
3149 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
3153 /* Nope not even a full SRTT has passed */
3158 rack_log_timely(struct tcp_rack *rack,
3159 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
3160 uint64_t up_bnd, int line, uint8_t method)
3162 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3163 union tcp_log_stackspecific log;
3166 memset(&log, 0, sizeof(log));
3167 log.u_bbr.flex1 = logged;
3168 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
3169 log.u_bbr.flex2 <<= 4;
3170 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
3171 log.u_bbr.flex2 <<= 4;
3172 log.u_bbr.flex2 |= rack->rc_gp_incr;
3173 log.u_bbr.flex2 <<= 4;
3174 log.u_bbr.flex2 |= rack->rc_gp_bwred;
3175 log.u_bbr.flex3 = rack->rc_gp_incr;
3176 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3177 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
3178 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
3179 log.u_bbr.flex7 = rack->rc_gp_bwred;
3180 log.u_bbr.flex8 = method;
3181 log.u_bbr.cur_del_rate = cur_bw;
3182 log.u_bbr.delRate = low_bnd;
3183 log.u_bbr.bw_inuse = up_bnd;
3184 log.u_bbr.rttProp = rack_get_bw(rack);
3185 log.u_bbr.pkt_epoch = line;
3186 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3187 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3188 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3189 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3190 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3191 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
3192 log.u_bbr.cwnd_gain <<= 1;
3193 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
3194 log.u_bbr.cwnd_gain <<= 1;
3195 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
3196 log.u_bbr.cwnd_gain <<= 1;
3197 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
3198 log.u_bbr.lost = rack->r_ctl.rc_loss_count;
3199 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3200 &rack->rc_inp->inp_socket->so_rcv,
3201 &rack->rc_inp->inp_socket->so_snd,
3203 0, &log, false, &tv);
3208 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
3211 * Before we increase we need to know if
3212 * the estimate just made was less than
3213 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
3215 * If we already are pacing at a fast enough
3216 * rate to push us faster there is no sense of
3219 * We first caculate our actual pacing rate (ss or ca multipler
3220 * times our cur_bw).
3222 * Then we take the last measured rate and multipy by our
3223 * maximum pacing overage to give us a max allowable rate.
3225 * If our act_rate is smaller than our max_allowable rate
3226 * then we should increase. Else we should hold steady.
3229 uint64_t act_rate, max_allow_rate;
3231 if (rack_timely_no_stopping)
3234 if ((cur_bw == 0) || (last_bw_est == 0)) {
3236 * Initial startup case or
3237 * everything is acked case.
3239 rack_log_timely(rack, mult, cur_bw, 0, 0,
3245 * We can always pace at or slightly above our rate.
3247 rack_log_timely(rack, mult, cur_bw, 0, 0,
3251 act_rate = cur_bw * (uint64_t)mult;
3253 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
3254 max_allow_rate /= 100;
3255 if (act_rate < max_allow_rate) {
3257 * Here the rate we are actually pacing at
3258 * is smaller than 10% above our last measurement.
3259 * This means we are pacing below what we would
3260 * like to try to achieve (plus some wiggle room).
3262 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3267 * Here we are already pacing at least rack_max_per_above(10%)
3268 * what we are getting back. This indicates most likely
3269 * that we are being limited (cwnd/rwnd/app) and can't
3270 * get any more b/w. There is no sense of trying to
3271 * raise up the pacing rate its not speeding us up
3272 * and we already are pacing faster than we are getting.
3274 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate,
3281 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
3284 * When we drag bottom, we want to assure
3285 * that no multiplier is below 1.0, if so
3286 * we want to restore it to at least that.
3288 if (rack->r_ctl.rack_per_of_gp_rec < 100) {
3289 /* This is unlikely we usually do not touch recovery */
3290 rack->r_ctl.rack_per_of_gp_rec = 100;
3292 if (rack->r_ctl.rack_per_of_gp_ca < 100) {
3293 rack->r_ctl.rack_per_of_gp_ca = 100;
3295 if (rack->r_ctl.rack_per_of_gp_ss < 100) {
3296 rack->r_ctl.rack_per_of_gp_ss = 100;
3301 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
3303 if (rack->r_ctl.rack_per_of_gp_ca > 100) {
3304 rack->r_ctl.rack_per_of_gp_ca = 100;
3306 if (rack->r_ctl.rack_per_of_gp_ss > 100) {
3307 rack->r_ctl.rack_per_of_gp_ss = 100;
3312 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
3314 int32_t calc, logged, plus;
3320 * override is passed when we are
3321 * loosing b/w and making one last
3322 * gasp at trying to not loose out
3323 * to a new-reno flow.
3327 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
3328 if (rack->rc_gp_incr &&
3329 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
3331 * Reset and get 5 strokes more before the boost. Note
3332 * that the count is 0 based so we have to add one.
3335 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
3336 rack->rc_gp_timely_inc_cnt = 0;
3338 plus = (uint32_t)rack_gp_increase_per;
3339 /* Must be at least 1% increase for true timely increases */
3341 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
3343 if (rack->rc_gp_saw_rec &&
3344 (rack->rc_gp_no_rec_chg == 0) &&
3345 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3346 rack->r_ctl.rack_per_of_gp_rec)) {
3347 /* We have been in recovery ding it too */
3348 calc = rack->r_ctl.rack_per_of_gp_rec + plus;
3352 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
3353 if (rack_per_upper_bound_ss &&
3354 (rack->rc_dragged_bottom == 0) &&
3355 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss))
3356 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss;
3358 if (rack->rc_gp_saw_ca &&
3359 (rack->rc_gp_saw_ss == 0) &&
3360 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3361 rack->r_ctl.rack_per_of_gp_ca)) {
3363 calc = rack->r_ctl.rack_per_of_gp_ca + plus;
3367 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
3368 if (rack_per_upper_bound_ca &&
3369 (rack->rc_dragged_bottom == 0) &&
3370 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca))
3371 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca;
3373 if (rack->rc_gp_saw_ss &&
3374 rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
3375 rack->r_ctl.rack_per_of_gp_ss)) {
3377 calc = rack->r_ctl.rack_per_of_gp_ss + plus;
3380 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
3381 if (rack_per_upper_bound_ss &&
3382 (rack->rc_dragged_bottom == 0) &&
3383 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss))
3384 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss;
3388 (rack->rc_gp_incr == 0)){
3389 /* Go into increment mode */
3390 rack->rc_gp_incr = 1;
3391 rack->rc_gp_timely_inc_cnt = 0;
3393 if (rack->rc_gp_incr &&
3395 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
3396 rack->rc_gp_timely_inc_cnt++;
3398 rack_log_timely(rack, logged, plus, 0, 0,
3403 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
3406 * norm_grad = rtt_diff / minrtt;
3407 * new_per = curper * (1 - B * norm_grad)
3409 * B = rack_gp_decrease_per (default 10%)
3410 * rtt_dif = input var current rtt-diff
3411 * curper = input var current percentage
3412 * minrtt = from rack filter
3417 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3418 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
3419 (((uint64_t)rtt_diff * (uint64_t)1000000)/
3420 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
3421 (uint64_t)1000000)) /
3423 if (perf > curper) {
3427 return ((uint32_t)perf);
3431 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
3435 * result = curper * (1 - (B * ( 1 - ------ ))
3438 * B = rack_gp_decrease_per (default 10%)
3439 * highrttthresh = filter_min * rack_gp_rtt_maxmul
3442 uint32_t highrttthresh;
3444 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
3446 perf = (((uint64_t)curper * ((uint64_t)1000000 -
3447 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
3448 ((uint64_t)highrttthresh * (uint64_t)1000000) /
3449 (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
3454 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
3456 uint64_t logvar, logvar2, logvar3;
3457 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
3459 if (rack->rc_gp_incr) {
3460 /* Turn off increment counting */
3461 rack->rc_gp_incr = 0;
3462 rack->rc_gp_timely_inc_cnt = 0;
3464 ss_red = ca_red = rec_red = 0;
3466 /* Calculate the reduction value */
3470 /* Must be at least 1% reduction */
3471 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
3472 /* We have been in recovery ding it too */
3473 if (timely_says == 2) {
3474 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
3475 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3481 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3482 if (rack->r_ctl.rack_per_of_gp_rec > val) {
3483 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
3484 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
3486 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3489 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
3490 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
3493 if (rack->rc_gp_saw_ss) {
3495 if (timely_says == 2) {
3496 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
3497 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3503 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
3504 if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
3505 ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
3506 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
3509 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3513 logvar2 = (uint32_t)rtt;
3515 logvar2 |= (uint32_t)rtt_diff;
3516 logvar3 = rack_gp_rtt_maxmul;
3518 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3519 rack_log_timely(rack, timely_says,
3521 logvar, __LINE__, 10);
3523 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
3524 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
3526 } else if (rack->rc_gp_saw_ca) {
3528 if (timely_says == 2) {
3529 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
3530 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
3536 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
3537 if (rack->r_ctl.rack_per_of_gp_ca > val) {
3538 ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
3539 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
3541 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3546 logvar2 = (uint32_t)rtt;
3548 logvar2 |= (uint32_t)rtt_diff;
3549 logvar3 = rack_gp_rtt_maxmul;
3551 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3552 rack_log_timely(rack, timely_says,
3554 logvar, __LINE__, 10);
3556 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
3557 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
3560 if (rack->rc_gp_timely_dec_cnt < 0x7) {
3561 rack->rc_gp_timely_dec_cnt++;
3562 if (rack_timely_dec_clear &&
3563 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
3564 rack->rc_gp_timely_dec_cnt = 0;
3569 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar,
3574 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
3575 uint32_t rtt, uint32_t line, uint8_t reas)
3577 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
3578 union tcp_log_stackspecific log;
3581 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
3582 log.u_bbr.flex1 = line;
3583 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
3584 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
3585 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
3586 log.u_bbr.flex5 = rtt;
3587 log.u_bbr.flex6 = rack->rc_highly_buffered;
3588 log.u_bbr.flex6 <<= 1;
3589 log.u_bbr.flex6 |= rack->forced_ack;
3590 log.u_bbr.flex6 <<= 1;
3591 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
3592 log.u_bbr.flex6 <<= 1;
3593 log.u_bbr.flex6 |= rack->in_probe_rtt;
3594 log.u_bbr.flex6 <<= 1;
3595 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
3596 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
3597 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
3598 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
3599 log.u_bbr.flex8 = reas;
3600 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
3601 log.u_bbr.delRate = rack_get_bw(rack);
3602 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
3603 log.u_bbr.cur_del_rate <<= 32;
3604 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
3605 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
3606 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
3607 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
3608 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
3609 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
3610 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
3611 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
3612 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3613 log.u_bbr.rttProp = us_cts;
3614 log.u_bbr.rttProp <<= 32;
3615 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
3616 TCP_LOG_EVENTP(rack->rc_tp, NULL,
3617 &rack->rc_inp->inp_socket->so_rcv,
3618 &rack->rc_inp->inp_socket->so_snd,
3619 BBR_LOG_RTT_SHRINKS, 0,
3620 0, &log, false, &rack->r_ctl.act_rcv_time);
3625 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
3629 bwdp = rack_get_bw(rack);
3630 bwdp *= (uint64_t)rtt;
3631 bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
3632 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
3633 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
3635 * A window protocol must be able to have 4 packets
3636 * outstanding as the floor in order to function
3637 * (especially considering delayed ack :D).
3639 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
3644 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
3647 * ProbeRTT is a bit different in rack_pacing than in
3648 * BBR. It is like BBR in that it uses the lowering of
3649 * the RTT as a signal that we saw something new and
3650 * counts from there for how long between. But it is
3651 * different in that its quite simple. It does not
3652 * play with the cwnd and wait until we get down
3653 * to N segments outstanding and hold that for
3654 * 200ms. Instead it just sets the pacing reduction
3655 * rate to a set percentage (70 by default) and hold
3656 * that for a number of recent GP Srtt's.
3660 if (rack->rc_gp_dyn_mul == 0)
3663 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
3667 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3668 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3670 * Stop the goodput now, the idea here is
3671 * that future measurements with in_probe_rtt
3672 * won't register if they are not greater so
3673 * we want to get what info (if any) is available
3676 rack_do_goodput_measurement(rack->rc_tp, rack,
3677 rack->rc_tp->snd_una, __LINE__);
3679 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3680 rack->r_ctl.rc_time_probertt_entered = us_cts;
3681 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3682 rack->r_ctl.rc_pace_min_segs);
3683 rack->in_probe_rtt = 1;
3684 rack->measure_saw_probe_rtt = 1;
3685 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3686 rack->r_ctl.rc_time_probertt_starts = 0;
3687 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
3688 if (rack_probertt_use_min_rtt_entry)
3689 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3691 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
3692 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3693 __LINE__, RACK_RTTS_ENTERPROBE);
3697 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
3699 struct rack_sendmap *rsm;
3702 segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
3703 rack->r_ctl.rc_pace_min_segs);
3704 rack->in_probe_rtt = 0;
3705 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
3706 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
3708 * Stop the goodput now, the idea here is
3709 * that future measurements with in_probe_rtt
3710 * won't register if they are not greater so
3711 * we want to get what info (if any) is available
3714 rack_do_goodput_measurement(rack->rc_tp, rack,
3715 rack->rc_tp->snd_una, __LINE__);
3716 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
3718 * We don't have enough data to make a measurement.
3719 * So lets just stop and start here after exiting
3720 * probe-rtt. We probably are not interested in
3721 * the results anyway.
3723 rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
3726 * Measurements through the current snd_max are going
3727 * to be limited by the slower pacing rate.
3729 * We need to mark these as app-limited so we
3730 * don't collapse the b/w.
3732 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
3733 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
3734 if (rack->r_ctl.rc_app_limited_cnt == 0)
3735 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
3738 * Go out to the end app limited and mark
3739 * this new one as next and move the end_appl up
3742 if (rack->r_ctl.rc_end_appl)
3743 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
3744 rack->r_ctl.rc_end_appl = rsm;
3746 rsm->r_flags |= RACK_APP_LIMITED;
3747 rack->r_ctl.rc_app_limited_cnt++;
3750 * Now, we need to examine our pacing rate multipliers.
3751 * If its under 100%, we need to kick it back up to
3752 * 100%. We also don't let it be over our "max" above
3753 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
3754 * Note setting clamp_atexit_prtt to 0 has the effect
3755 * of setting CA/SS to 100% always at exit (which is
3756 * the default behavior).
3758 if (rack_probertt_clear_is) {
3759 rack->rc_gp_incr = 0;
3760 rack->rc_gp_bwred = 0;
3761 rack->rc_gp_timely_inc_cnt = 0;
3762 rack->rc_gp_timely_dec_cnt = 0;
3764 /* Do we do any clamping at exit? */
3765 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
3766 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
3767 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
3769 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
3770 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
3771 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
3774 * Lets set rtt_diff to 0, so that we will get a "boost"
3777 rack->r_ctl.rc_rtt_diff = 0;
3779 /* Clear all flags so we start fresh */
3780 rack->rc_tp->t_bytes_acked = 0;
3781 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND;
3783 * If configured to, set the cwnd and ssthresh to
3786 if (rack_probe_rtt_sets_cwnd) {
3790 /* Set ssthresh so we get into CA once we hit our target */
3791 if (rack_probertt_use_min_rtt_exit == 1) {
3792 /* Set to min rtt */
3793 rack_set_prtt_target(rack, segsiz,
3794 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
3795 } else if (rack_probertt_use_min_rtt_exit == 2) {
3796 /* Set to current gp rtt */
3797 rack_set_prtt_target(rack, segsiz,
3798 rack->r_ctl.rc_gp_srtt);
3799 } else if (rack_probertt_use_min_rtt_exit == 3) {
3800 /* Set to entry gp rtt */
3801 rack_set_prtt_target(rack, segsiz,
3802 rack->r_ctl.rc_entry_gp_rtt);
3807 sum = rack->r_ctl.rc_entry_gp_rtt;
3809 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
3812 * A highly buffered path needs
3813 * cwnd space for timely to work.
3814 * Lets set things up as if
3815 * we are heading back here again.
3817 setval = rack->r_ctl.rc_entry_gp_rtt;
3818 } else if (sum >= 15) {
3820 * Lets take the smaller of the
3821 * two since we are just somewhat
3824 setval = rack->r_ctl.rc_gp_srtt;
3825 if (setval > rack->r_ctl.rc_entry_gp_rtt)
3826 setval = rack->r_ctl.rc_entry_gp_rtt;
3829 * Here we are not highly buffered
3830 * and should pick the min we can to
3831 * keep from causing loss.
3833 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
3835 rack_set_prtt_target(rack, segsiz,
3838 if (rack_probe_rtt_sets_cwnd > 1) {
3839 /* There is a percentage here to boost */
3840 ebdp = rack->r_ctl.rc_target_probertt_flight;
3841 ebdp *= rack_probe_rtt_sets_cwnd;
3843 setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
3845 setto = rack->r_ctl.rc_target_probertt_flight;
3846 rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
3847 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
3849 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
3851 /* If we set in the cwnd also set the ssthresh point so we are in CA */
3852 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
3854 rack_log_rtt_shrinks(rack, us_cts,
3855 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3856 __LINE__, RACK_RTTS_EXITPROBE);
3857 /* Clear times last so log has all the info */
3858 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
3859 rack->r_ctl.rc_time_probertt_entered = us_cts;
3860 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
3861 rack->r_ctl.rc_time_of_last_probertt = us_cts;
3865 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
3867 /* Check in on probe-rtt */
3868 if (rack->rc_gp_filled == 0) {
3869 /* We do not do p-rtt unless we have gp measurements */
3872 if (rack->in_probe_rtt) {
3873 uint64_t no_overflow;
3874 uint32_t endtime, must_stay;
3876 if (rack->r_ctl.rc_went_idle_time &&
3877 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
3879 * We went idle during prtt, just exit now.
3881 rack_exit_probertt(rack, us_cts);
3882 } else if (rack_probe_rtt_safety_val &&
3883 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
3884 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
3886 * Probe RTT safety value triggered!
3888 rack_log_rtt_shrinks(rack, us_cts,
3889 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3890 __LINE__, RACK_RTTS_SAFETY);
3891 rack_exit_probertt(rack, us_cts);
3893 /* Calculate the max we will wait */
3894 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
3895 if (rack->rc_highly_buffered)
3896 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
3897 /* Calculate the min we must wait */
3898 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
3899 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
3900 TSTMP_LT(us_cts, endtime)) {
3902 /* Do we lower more? */
3904 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
3905 calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
3908 calc /= max(rack->r_ctl.rc_gp_srtt, 1);
3911 calc *= rack_per_of_gp_probertt_reduce;
3912 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
3914 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
3915 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
3917 /* We must reach target or the time set */
3920 if (rack->r_ctl.rc_time_probertt_starts == 0) {
3921 if ((TSTMP_LT(us_cts, must_stay) &&
3922 rack->rc_highly_buffered) ||
3923 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
3924 rack->r_ctl.rc_target_probertt_flight)) {
3925 /* We are not past the must_stay time */
3928 rack_log_rtt_shrinks(rack, us_cts,
3929 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
3930 __LINE__, RACK_RTTS_REACHTARGET);
3931 rack->r_ctl.rc_time_probertt_starts = us_cts;
3932 if (rack->r_ctl.rc_time_probertt_starts == 0)
3933 rack->r_ctl.rc_time_probertt_starts = 1;
3934 /* Restore back to our rate we want to pace at in prtt */
3935 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
3938 * Setup our end time, some number of gp_srtts plus 200ms.
3940 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
3941 (uint64_t)rack_probertt_gpsrtt_cnt_mul);
3942 if (rack_probertt_gpsrtt_cnt_div)
3943 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
3946 endtime += rack_min_probertt_hold;
3947 endtime += rack->r_ctl.rc_time_probertt_starts;
3948 if (TSTMP_GEQ(us_cts, endtime)) {
3949 /* yes, exit probertt */
3950 rack_exit_probertt(rack, us_cts);
3953 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
3954 /* Go into probertt, its been too long since we went lower */
3955 rack_enter_probertt(rack, us_cts);
3960 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
3961 uint32_t rtt, int32_t rtt_diff)
3963 uint64_t cur_bw, up_bnd, low_bnd, subfr;
3966 if ((rack->rc_gp_dyn_mul == 0) ||
3967 (rack->use_fixed_rate) ||
3968 (rack->in_probe_rtt) ||
3969 (rack->rc_always_pace == 0)) {
3970 /* No dynamic GP multipler in play */
3973 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
3974 cur_bw = rack_get_bw(rack);
3975 /* Calculate our up and down range */
3976 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
3978 up_bnd += rack->r_ctl.last_gp_comp_bw;
3980 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
3982 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
3983 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
3985 * This is the case where our RTT is above
3986 * the max target and we have been configured
3987 * to just do timely no bonus up stuff in that case.
3989 * There are two configurations, set to 1, and we
3990 * just do timely if we are over our max. If its
3991 * set above 1 then we slam the multipliers down
3992 * to 100 and then decrement per timely.
3994 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
3996 if (rack->r_ctl.rc_no_push_at_mrtt > 1)
3997 rack_validate_multipliers_at_or_below_100(rack);
3998 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
3999 } else if ((last_bw_est < low_bnd) && !losses) {
4001 * We are decreasing this is a bit complicated this
4002 * means we are loosing ground. This could be
4003 * because another flow entered and we are competing
4004 * for b/w with it. This will push the RTT up which
4005 * makes timely unusable unless we want to get shoved
4006 * into a corner and just be backed off (the age
4007 * old problem with delay based CC).
4009 * On the other hand if it was a route change we
4010 * would like to stay somewhat contained and not
4011 * blow out the buffers.
4013 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4015 rack->r_ctl.last_gp_comp_bw = cur_bw;
4016 if (rack->rc_gp_bwred == 0) {
4017 /* Go into reduction counting */
4018 rack->rc_gp_bwred = 1;
4019 rack->rc_gp_timely_dec_cnt = 0;
4021 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) ||
4022 (timely_says == 0)) {
4024 * Push another time with a faster pacing
4025 * to try to gain back (we include override to
4026 * get a full raise factor).
4028 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
4029 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
4030 (timely_says == 0) ||
4031 (rack_down_raise_thresh == 0)) {
4033 * Do an override up in b/w if we were
4034 * below the threshold or if the threshold
4035 * is zero we always do the raise.
4037 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
4039 /* Log it stays the same */
4040 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0,
4043 rack->rc_gp_timely_dec_cnt++;
4044 /* We are not incrementing really no-count */
4045 rack->rc_gp_incr = 0;
4046 rack->rc_gp_timely_inc_cnt = 0;
4049 * Lets just use the RTT
4050 * information and give up
4055 } else if ((timely_says != 2) &&
4057 (last_bw_est > up_bnd)) {
4059 * We are increasing b/w lets keep going, updating
4060 * our b/w and ignoring any timely input, unless
4061 * of course we are at our max raise (if there is one).
4064 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4066 rack->r_ctl.last_gp_comp_bw = cur_bw;
4067 if (rack->rc_gp_saw_ss &&
4068 rack_per_upper_bound_ss &&
4069 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) {
4071 * In cases where we can't go higher
4072 * we should just use timely.
4076 if (rack->rc_gp_saw_ca &&
4077 rack_per_upper_bound_ca &&
4078 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) {
4080 * In cases where we can't go higher
4081 * we should just use timely.
4085 rack->rc_gp_bwred = 0;
4086 rack->rc_gp_timely_dec_cnt = 0;
4087 /* You get a set number of pushes if timely is trying to reduce */
4088 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
4089 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4091 /* Log it stays the same */
4092 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0,
4098 * We are staying between the lower and upper range bounds
4099 * so use timely to decide.
4101 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd,
4105 rack->rc_gp_incr = 0;
4106 rack->rc_gp_timely_inc_cnt = 0;
4107 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
4109 (last_bw_est < low_bnd)) {
4110 /* We are loosing ground */
4111 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4112 rack->rc_gp_timely_dec_cnt++;
4113 /* We are not incrementing really no-count */
4114 rack->rc_gp_incr = 0;
4115 rack->rc_gp_timely_inc_cnt = 0;
4117 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
4119 rack->rc_gp_bwred = 0;
4120 rack->rc_gp_timely_dec_cnt = 0;
4121 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
4127 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
4129 int32_t timely_says;
4130 uint64_t log_mult, log_rtt_a_diff;
4132 log_rtt_a_diff = rtt;
4133 log_rtt_a_diff <<= 32;
4134 log_rtt_a_diff |= (uint32_t)rtt_diff;
4135 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
4136 rack_gp_rtt_maxmul)) {
4137 /* Reduce the b/w multipler */
4139 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
4141 log_mult |= prev_rtt;
4142 rack_log_timely(rack, timely_says, log_mult,
4143 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4144 log_rtt_a_diff, __LINE__, 4);
4145 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4146 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4147 max(rack_gp_rtt_mindiv , 1)))) {
4148 /* Increase the b/w multipler */
4149 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
4150 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
4151 max(rack_gp_rtt_mindiv , 1));
4153 log_mult |= prev_rtt;
4155 rack_log_timely(rack, timely_says, log_mult ,
4156 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
4157 log_rtt_a_diff, __LINE__, 5);
4160 * Use a gradient to find it the timely gradient
4162 * grad = rc_rtt_diff / min_rtt;
4164 * anything below or equal to 0 will be
4165 * a increase indication. Anything above
4166 * zero is a decrease. Note we take care
4167 * of the actual gradient calculation
4168 * in the reduction (its not needed for
4171 log_mult = prev_rtt;
4172 if (rtt_diff <= 0) {
4174 * Rttdiff is less than zero, increase the
4175 * b/w multipler (its 0 or negative)
4178 rack_log_timely(rack, timely_says, log_mult,
4179 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
4181 /* Reduce the b/w multipler */
4183 rack_log_timely(rack, timely_says, log_mult,
4184 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
4187 return (timely_says);
4191 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
4192 tcp_seq th_ack, int line)
4194 uint64_t tim, bytes_ps, ltim, stim, utim;
4195 uint32_t segsiz, bytes, reqbytes, us_cts;
4196 int32_t gput, new_rtt_diff, timely_says;
4197 uint64_t resid_bw, subpart = 0, addpart = 0, srtt;
4200 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4201 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
4202 if (TSTMP_GEQ(us_cts, tp->gput_ts))
4203 tim = us_cts - tp->gput_ts;
4207 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
4208 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
4212 * Use the larger of the send time or ack time. This prevents us
4213 * from being influenced by ack artifacts to come up with too
4214 * high of measurement. Note that since we are spanning over many more
4215 * bytes in most of our measurements hopefully that is less likely to
4221 utim = max(stim, 1);
4222 /* Lets get a msec time ltim too for the old stuff */
4223 ltim = max(1, (utim / HPTS_USEC_IN_MSEC));
4224 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim;
4225 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
4226 if ((tim == 0) && (stim == 0)) {
4228 * Invalid measurement time, maybe
4229 * all on one ack/one send?
4233 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4234 0, 0, 0, 10, __LINE__, NULL);
4235 goto skip_measurement;
4237 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
4238 /* We never made a us_rtt measurement? */
4241 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4242 0, 0, 0, 10, __LINE__, NULL);
4243 goto skip_measurement;
4246 * Calculate the maximum possible b/w this connection
4247 * could have. We base our calculation on the lowest
4248 * rtt we have seen during the measurement and the
4249 * largest rwnd the client has given us in that time. This
4250 * forms a BDP that is the maximum that we could ever
4251 * get to the client. Anything larger is not valid.
4253 * I originally had code here that rejected measurements
4254 * where the time was less than 1/2 the latest us_rtt.
4255 * But after thinking on that I realized its wrong since
4256 * say you had a 150Mbps or even 1Gbps link, and you
4257 * were a long way away.. example I am in Europe (100ms rtt)
4258 * talking to my 1Gbps link in S.C. Now measuring say 150,000
4259 * bytes my time would be 1.2ms, and yet my rtt would say
4260 * the measurement was invalid the time was < 50ms. The
4261 * same thing is true for 150Mb (8ms of time).
4263 * A better way I realized is to look at what the maximum
4264 * the connection could possibly do. This is gated on
4265 * the lowest RTT we have seen and the highest rwnd.
4266 * We should in theory never exceed that, if we are
4267 * then something on the path is storing up packets
4268 * and then feeding them all at once to our endpoint
4269 * messing up our measurement.
4271 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
4272 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
4273 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
4274 if (SEQ_LT(th_ack, tp->gput_seq)) {
4275 /* No measurement can be made */
4278 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4279 0, 0, 0, 10, __LINE__, NULL);
4280 goto skip_measurement;
4282 bytes = (th_ack - tp->gput_seq);
4283 bytes_ps = (uint64_t)bytes;
4285 * Don't measure a b/w for pacing unless we have gotten at least
4286 * an initial windows worth of data in this measurement interval.
4288 * Small numbers of bytes get badly influenced by delayed ack and
4289 * other artifacts. Note we take the initial window or our
4290 * defined minimum GP (defaulting to 10 which hopefully is the
4293 if (rack->rc_gp_filled == 0) {
4295 * The initial estimate is special. We
4296 * have blasted out an IW worth of packets
4297 * without a real valid ack ts results. We
4298 * then setup the app_limited_needs_set flag,
4299 * this should get the first ack in (probably 2
4300 * MSS worth) to be recorded as the timestamp.
4301 * We thus allow a smaller number of bytes i.e.
4304 reqbytes -= (2 * segsiz);
4305 /* Also lets fill previous for our first measurement to be neutral */
4306 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4308 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
4309 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4310 rack->r_ctl.rc_app_limited_cnt,
4311 0, 0, 10, __LINE__, NULL);
4312 goto skip_measurement;
4315 * We now need to calculate the Timely like status so
4316 * we can update (possibly) the b/w multipliers.
4318 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
4319 if (rack->rc_gp_filled == 0) {
4320 /* No previous reading */
4321 rack->r_ctl.rc_rtt_diff = new_rtt_diff;
4323 if (rack->measure_saw_probe_rtt == 0) {
4325 * We don't want a probertt to be counted
4326 * since it will be negative incorrectly. We
4327 * expect to be reducing the RTT when we
4328 * pace at a slower rate.
4330 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
4331 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
4334 timely_says = rack_make_timely_judgement(rack,
4335 rack->r_ctl.rc_gp_srtt,
4336 rack->r_ctl.rc_rtt_diff,
4337 rack->r_ctl.rc_prev_gp_srtt
4339 bytes_ps *= HPTS_USEC_IN_SEC;
4341 if (bytes_ps > rack->r_ctl.last_max_bw) {
4343 * Something is on path playing
4344 * since this b/w is not possible based
4345 * on our BDP (highest rwnd and lowest rtt
4346 * we saw in the measurement window).
4348 * Another option here would be to
4349 * instead skip the measurement.
4351 rack_log_pacing_delay_calc(rack, bytes, reqbytes,
4352 bytes_ps, rack->r_ctl.last_max_bw, 0,
4353 11, __LINE__, NULL);
4354 bytes_ps = rack->r_ctl.last_max_bw;
4356 /* We store gp for b/w in bytes per second */
4357 if (rack->rc_gp_filled == 0) {
4358 /* Initial measurment */
4360 rack->r_ctl.gp_bw = bytes_ps;
4361 rack->rc_gp_filled = 1;
4362 rack->r_ctl.num_measurements = 1;
4363 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
4365 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
4366 rack->r_ctl.rc_app_limited_cnt,
4367 0, 0, 10, __LINE__, NULL);
4369 if (rack->rc_inp->inp_in_hpts &&
4370 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
4372 * Ok we can't trust the pacer in this case
4373 * where we transition from un-paced to paced.
4374 * Or for that matter when the burst mitigation
4375 * was making a wild guess and got it wrong.
4376 * Stop the pacer and clear up all the aggregate
4379 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
4380 rack->r_ctl.rc_hpts_flags = 0;
4381 rack->r_ctl.rc_last_output_to = 0;
4384 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
4385 /* Still a small number run an average */
4386 rack->r_ctl.gp_bw += bytes_ps;
4387 addpart = rack->r_ctl.num_measurements;
4388 rack->r_ctl.num_measurements++;
4389 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
4390 /* We have collected enought to move forward */
4391 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
4396 * We want to take 1/wma of the goodput and add in to 7/8th
4397 * of the old value weighted by the srtt. So if your measurement
4398 * period is say 2 SRTT's long you would get 1/4 as the
4399 * value, if it was like 1/2 SRTT then you would get 1/16th.
4401 * But we must be careful not to take too much i.e. if the
4402 * srtt is say 20ms and the measurement is taken over
4403 * 400ms our weight would be 400/20 i.e. 20. On the
4404 * other hand if we get a measurement over 1ms with a
4405 * 10ms rtt we only want to take a much smaller portion.
4407 if (rack->r_ctl.num_measurements < 0xff) {
4408 rack->r_ctl.num_measurements++;
4410 srtt = (uint64_t)tp->t_srtt;
4413 * Strange why did t_srtt go back to zero?
4415 if (rack->r_ctl.rc_rack_min_rtt)
4416 srtt = rack->r_ctl.rc_rack_min_rtt;
4418 srtt = HPTS_USEC_IN_MSEC;
4421 * XXXrrs: Note for reviewers, in playing with
4422 * dynamic pacing I discovered this GP calculation
4423 * as done originally leads to some undesired results.
4424 * Basically you can get longer measurements contributing
4425 * too much to the WMA. Thus I changed it if you are doing
4426 * dynamic adjustments to only do the aportioned adjustment
4427 * if we have a very small (time wise) measurement. Longer
4428 * measurements just get there weight (defaulting to 1/8)
4429 * add to the WMA. We may want to think about changing
4430 * this to always do that for both sides i.e. dynamic
4431 * and non-dynamic... but considering lots of folks
4432 * were playing with this I did not want to change the
4433 * calculation per.se. without your thoughts.. Lawerence?
4436 if (rack->rc_gp_dyn_mul == 0) {
4437 subpart = rack->r_ctl.gp_bw * utim;
4438 subpart /= (srtt * 8);
4439 if (subpart < (rack->r_ctl.gp_bw / 2)) {
4441 * The b/w update takes no more
4442 * away then 1/2 our running total
4445 addpart = bytes_ps * utim;
4446 addpart /= (srtt * 8);
4449 * Don't allow a single measurement
4450 * to account for more than 1/2 of the
4451 * WMA. This could happen on a retransmission
4452 * where utim becomes huge compared to
4453 * srtt (multiple retransmissions when using
4454 * the sending rate which factors in all the
4455 * transmissions from the first one).
4457 subpart = rack->r_ctl.gp_bw / 2;
4458 addpart = bytes_ps / 2;
4460 resid_bw = rack->r_ctl.gp_bw - subpart;
4461 rack->r_ctl.gp_bw = resid_bw + addpart;
4464 if ((utim / srtt) <= 1) {
4466 * The b/w update was over a small period
4467 * of time. The idea here is to prevent a small
4468 * measurement time period from counting
4469 * too much. So we scale it based on the
4470 * time so it attributes less than 1/rack_wma_divisor
4471 * of its measurement.
4473 subpart = rack->r_ctl.gp_bw * utim;
4474 subpart /= (srtt * rack_wma_divisor);
4475 addpart = bytes_ps * utim;
4476 addpart /= (srtt * rack_wma_divisor);
4479 * The scaled measurement was long
4480 * enough so lets just add in the
4481 * portion of the measurment i.e. 1/rack_wma_divisor
4483 subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
4484 addpart = bytes_ps / rack_wma_divisor;
4486 if ((rack->measure_saw_probe_rtt == 0) ||
4487 (bytes_ps > rack->r_ctl.gp_bw)) {
4489 * For probe-rtt we only add it in
4490 * if its larger, all others we just
4494 resid_bw = rack->r_ctl.gp_bw - subpart;
4495 rack->r_ctl.gp_bw = resid_bw + addpart;
4499 if ((rack->gp_ready == 0) &&
4500 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
4501 /* We have enough measurements now */
4503 rack_set_cc_pacing(rack);
4504 if (rack->defer_options)
4505 rack_apply_deferred_options(rack);
4507 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
4508 rack_get_bw(rack), 22, did_add, NULL);
4509 /* We do not update any multipliers if we are in or have seen a probe-rtt */
4510 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
4511 rack_update_multiplier(rack, timely_says, bytes_ps,
4512 rack->r_ctl.rc_gp_srtt,
4513 rack->r_ctl.rc_rtt_diff);
4514 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
4515 rack_get_bw(rack), 3, line, NULL);
4516 /* reset the gp srtt and setup the new prev */
4517 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
4518 /* Record the lost count for the next measurement */
4519 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
4521 * We restart our diffs based on the gpsrtt in the
4522 * measurement window.
4524 rack->rc_gp_rtt_set = 0;
4525 rack->rc_gp_saw_rec = 0;
4526 rack->rc_gp_saw_ca = 0;
4527 rack->rc_gp_saw_ss = 0;
4528 rack->rc_dragged_bottom = 0;
4532 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
4535 * XXXLAS: This is a temporary hack, and should be
4536 * chained off VOI_TCP_GPUT when stats(9) grows an
4537 * API to deal with chained VOIs.
4539 if (tp->t_stats_gput_prev > 0)
4540 stats_voi_update_abs_s32(tp->t_stats,
4542 ((gput - tp->t_stats_gput_prev) * 100) /
4543 tp->t_stats_gput_prev);
4545 tp->t_flags &= ~TF_GPUTINPROG;
4546 tp->t_stats_gput_prev = gput;
4548 * Now are we app limited now and there is space from where we
4549 * were to where we want to go?
4551 * We don't do the other case i.e. non-applimited here since
4552 * the next send will trigger us picking up the missing data.
4554 if (rack->r_ctl.rc_first_appl &&
4555 TCPS_HAVEESTABLISHED(tp->t_state) &&
4556 rack->r_ctl.rc_app_limited_cnt &&
4557 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
4558 ((rack->r_ctl.rc_first_appl->r_start - th_ack) >
4559 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
4561 * Yep there is enough outstanding to make a measurement here.
4563 struct rack_sendmap *rsm, fe;
4565 tp->t_flags |= TF_GPUTINPROG;
4566 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
4567 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
4568 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
4569 rack->app_limited_needs_set = 0;
4570 tp->gput_seq = th_ack;
4571 if (rack->in_probe_rtt)
4572 rack->measure_saw_probe_rtt = 1;
4573 else if ((rack->measure_saw_probe_rtt) &&
4574 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
4575 rack->measure_saw_probe_rtt = 0;
4576 if ((rack->r_ctl.rc_first_appl->r_start - th_ack) >= rack_get_measure_window(tp, rack)) {
4577 /* There is a full window to gain info from */
4578 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
4580 /* We can only measure up to the applimited point */
4581 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_start - th_ack);
4584 * Now we need to find the timestamp of the send at tp->gput_seq
4585 * for the send based measurement.
4587 fe.r_start = tp->gput_seq;
4588 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
4590 /* Ok send-based limit is set */
4591 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
4593 * Move back to include the earlier part
4594 * so our ack time lines up right (this may
4595 * make an overlapping measurement but thats
4598 tp->gput_seq = rsm->r_start;
4600 if (rsm->r_flags & RACK_ACKED)
4601 tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
4603 rack->app_limited_needs_set = 1;
4604 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
4607 * If we don't find the rsm due to some
4608 * send-limit set the current time, which
4609 * basically disables the send-limit.
4614 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
4616 rack_log_pacing_delay_calc(rack,
4621 rack->r_ctl.rc_app_limited_cnt,
4628 * CC wrapper hook functions
4631 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
4632 uint16_t type, int32_t recovery)
4634 uint32_t prior_cwnd, acked;
4635 struct tcp_log_buffer *lgb = NULL;
4636 uint8_t labc_to_use;
4638 INP_WLOCK_ASSERT(tp->t_inpcb);
4639 tp->ccv->nsegs = nsegs;
4640 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una);
4641 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
4644 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
4645 if (tp->ccv->bytes_this_ack > max) {
4646 tp->ccv->bytes_this_ack = max;
4650 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
4651 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
4653 if ((tp->t_flags & TF_GPUTINPROG) &&
4654 rack_enough_for_measurement(tp, rack, th_ack)) {
4655 /* Measure the Goodput */
4656 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__);
4657 #ifdef NETFLIX_PEAKRATE
4658 if ((type == CC_ACK) &&
4659 (tp->t_maxpeakrate)) {
4661 * We update t_peakrate_thr. This gives us roughly
4662 * one update per round trip time. Note
4663 * it will only be used if pace_always is off i.e
4664 * we don't do this for paced flows.
4666 rack_update_peakrate_thr(tp);
4670 /* Which way our we limited, if not cwnd limited no advance in CA */
4671 if (tp->snd_cwnd <= tp->snd_wnd)
4672 tp->ccv->flags |= CCF_CWND_LIMITED;
4674 tp->ccv->flags &= ~CCF_CWND_LIMITED;
4675 if (tp->snd_cwnd > tp->snd_ssthresh) {
4676 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
4677 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
4678 /* For the setting of a window past use the actual scwnd we are using */
4679 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
4680 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
4681 tp->ccv->flags |= CCF_ABC_SENTAWND;
4684 tp->ccv->flags &= ~CCF_ABC_SENTAWND;
4685 tp->t_bytes_acked = 0;
4687 prior_cwnd = tp->snd_cwnd;
4688 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
4689 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf)))
4690 labc_to_use = rack->rc_labc;
4692 labc_to_use = rack_max_abc_post_recovery;
4693 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4694 union tcp_log_stackspecific log;
4697 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4698 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4699 log.u_bbr.flex1 = th_ack;
4700 log.u_bbr.flex2 = tp->ccv->flags;
4701 log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4702 log.u_bbr.flex4 = tp->ccv->nsegs;
4703 log.u_bbr.flex5 = labc_to_use;
4704 log.u_bbr.flex6 = prior_cwnd;
4705 log.u_bbr.flex7 = V_tcp_do_newsack;
4706 log.u_bbr.flex8 = 1;
4707 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4708 0, &log, false, NULL, NULL, 0, &tv);
4710 if (CC_ALGO(tp)->ack_received != NULL) {
4711 /* XXXLAS: Find a way to live without this */
4712 tp->ccv->curack = th_ack;
4713 tp->ccv->labc = labc_to_use;
4714 tp->ccv->flags |= CCF_USE_LOCAL_ABC;
4715 CC_ALGO(tp)->ack_received(tp->ccv, type);
4718 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
4720 if (rack->r_must_retran) {
4721 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
4723 * We now are beyond the rxt point so lets disable
4726 rack->r_ctl.rc_out_at_rto = 0;
4727 rack->r_must_retran = 0;
4728 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
4730 * Only decrement the rc_out_at_rto if the cwnd advances
4731 * at least a whole segment. Otherwise next time the peer
4732 * acks, we won't be able to send this generaly happens
4733 * when we are in Congestion Avoidance.
4735 if (acked <= rack->r_ctl.rc_out_at_rto){
4736 rack->r_ctl.rc_out_at_rto -= acked;
4738 rack->r_ctl.rc_out_at_rto = 0;
4743 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
4745 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
4746 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
4748 #ifdef NETFLIX_PEAKRATE
4749 /* we enforce max peak rate if it is set and we are not pacing */
4750 if ((rack->rc_always_pace == 0) &&
4751 tp->t_peakrate_thr &&
4752 (tp->snd_cwnd > tp->t_peakrate_thr)) {
4753 tp->snd_cwnd = tp->t_peakrate_thr;
4759 tcp_rack_partialack(struct tcpcb *tp)
4761 struct tcp_rack *rack;
4763 rack = (struct tcp_rack *)tp->t_fb_ptr;
4764 INP_WLOCK_ASSERT(tp->t_inpcb);
4766 * If we are doing PRR and have enough
4767 * room to send <or> we are pacing and prr
4768 * is disabled we will want to see if we
4769 * can send data (by setting r_wanted_output to
4772 if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
4774 rack->r_wanted_output = 1;
4778 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
4780 struct tcp_rack *rack;
4783 orig_cwnd = tp->snd_cwnd;
4784 INP_WLOCK_ASSERT(tp->t_inpcb);
4785 rack = (struct tcp_rack *)tp->t_fb_ptr;
4786 /* only alert CC if we alerted when we entered */
4787 if (CC_ALGO(tp)->post_recovery != NULL) {
4788 tp->ccv->curack = th_ack;
4789 CC_ALGO(tp)->post_recovery(tp->ccv);
4790 if (tp->snd_cwnd < tp->snd_ssthresh) {
4792 * Rack has burst control and pacing
4793 * so lets not set this any lower than
4794 * snd_ssthresh per RFC-6582 (option 2).
4796 tp->snd_cwnd = tp->snd_ssthresh;
4799 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) {
4800 union tcp_log_stackspecific log;
4803 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
4804 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
4805 log.u_bbr.flex1 = th_ack;
4806 log.u_bbr.flex2 = tp->ccv->flags;
4807 log.u_bbr.flex3 = tp->ccv->bytes_this_ack;
4808 log.u_bbr.flex4 = tp->ccv->nsegs;
4809 log.u_bbr.flex5 = V_tcp_abc_l_var;
4810 log.u_bbr.flex6 = orig_cwnd;
4811 log.u_bbr.flex7 = V_tcp_do_newsack;
4812 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
4813 log.u_bbr.flex8 = 2;
4814 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
4815 0, &log, false, NULL, NULL, 0, &tv);
4817 if ((rack->rack_no_prr == 0) &&
4818 (rack->no_prr_addback == 0) &&
4819 (rack->r_ctl.rc_prr_sndcnt > 0)) {
4821 * Suck the next prr cnt back into cwnd, but
4822 * only do that if we are not application limited.
4824 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
4826 * We are allowed to add back to the cwnd the amount we did
4828 * a) no_prr_addback is off.
4829 * b) we are not app limited
4830 * c) we are doing prr
4832 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
4834 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
4835 rack->r_ctl.rc_prr_sndcnt);
4837 rack->r_ctl.rc_prr_sndcnt = 0;
4838 rack_log_to_prr(rack, 1, 0);
4840 rack_log_to_prr(rack, 14, orig_cwnd);
4841 tp->snd_recover = tp->snd_una;
4842 EXIT_RECOVERY(tp->t_flags);
4846 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack)
4848 struct tcp_rack *rack;
4849 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
4851 INP_WLOCK_ASSERT(tp->t_inpcb);
4853 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
4855 if (IN_RECOVERY(tp->t_flags) == 0) {
4856 in_rec_at_entry = 0;
4857 ssthresh_enter = tp->snd_ssthresh;
4858 cwnd_enter = tp->snd_cwnd;
4860 in_rec_at_entry = 1;
4861 rack = (struct tcp_rack *)tp->t_fb_ptr;
4864 tp->t_flags &= ~TF_WASFRECOVERY;
4865 tp->t_flags &= ~TF_WASCRECOVERY;
4866 if (!IN_FASTRECOVERY(tp->t_flags)) {
4867 rack->r_ctl.rc_prr_delivered = 0;
4868 rack->r_ctl.rc_prr_out = 0;
4869 if (rack->rack_no_prr == 0) {
4870 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
4871 rack_log_to_prr(rack, 2, in_rec_at_entry);
4873 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
4874 tp->snd_recover = tp->snd_max;
4875 if (tp->t_flags2 & TF2_ECN_PERMIT)
4876 tp->t_flags2 |= TF2_ECN_SND_CWR;
4880 if (!IN_CONGRECOVERY(tp->t_flags) ||
4882 * Allow ECN reaction on ACK to CWR, if
4883 * that data segment was also CE marked.
4885 SEQ_GEQ(ack, tp->snd_recover)) {
4886 EXIT_CONGRECOVERY(tp->t_flags);
4887 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
4888 tp->snd_recover = tp->snd_max + 1;
4889 if (tp->t_flags2 & TF2_ECN_PERMIT)
4890 tp->t_flags2 |= TF2_ECN_SND_CWR;
4895 tp->t_bytes_acked = 0;
4896 EXIT_RECOVERY(tp->t_flags);
4897 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
4898 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
4899 orig_cwnd = tp->snd_cwnd;
4900 tp->snd_cwnd = ctf_fixed_maxseg(tp);
4901 rack_log_to_prr(rack, 16, orig_cwnd);
4902 if (tp->t_flags2 & TF2_ECN_PERMIT)
4903 tp->t_flags2 |= TF2_ECN_SND_CWR;
4906 KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
4907 /* RTO was unnecessary, so reset everything. */
4908 tp->snd_cwnd = tp->snd_cwnd_prev;
4909 tp->snd_ssthresh = tp->snd_ssthresh_prev;
4910 tp->snd_recover = tp->snd_recover_prev;
4911 if (tp->t_flags & TF_WASFRECOVERY) {
4912 ENTER_FASTRECOVERY(tp->t_flags);
4913 tp->t_flags &= ~TF_WASFRECOVERY;
4915 if (tp->t_flags & TF_WASCRECOVERY) {
4916 ENTER_CONGRECOVERY(tp->t_flags);
4917 tp->t_flags &= ~TF_WASCRECOVERY;
4919 tp->snd_nxt = tp->snd_max;
4920 tp->t_badrxtwin = 0;
4923 if ((CC_ALGO(tp)->cong_signal != NULL) &&
4925 tp->ccv->curack = ack;
4926 CC_ALGO(tp)->cong_signal(tp->ccv, type);
4928 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
4929 rack_log_to_prr(rack, 15, cwnd_enter);
4930 rack->r_ctl.dsack_byte_cnt = 0;
4931 rack->r_ctl.retran_during_recovery = 0;
4932 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
4933 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
4934 rack->r_ent_rec_ns = 1;
4939 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
4943 INP_WLOCK_ASSERT(tp->t_inpcb);
4945 #ifdef NETFLIX_STATS
4946 KMOD_TCPSTAT_INC(tcps_idle_restarts);
4947 if (tp->t_state == TCPS_ESTABLISHED)
4948 KMOD_TCPSTAT_INC(tcps_idle_estrestarts);
4950 if (CC_ALGO(tp)->after_idle != NULL)
4951 CC_ALGO(tp)->after_idle(tp->ccv);
4953 if (tp->snd_cwnd == 1)
4954 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
4956 i_cwnd = rc_init_window(rack);
4959 * Being idle is no differnt than the initial window. If the cc
4960 * clamps it down below the initial window raise it to the initial
4963 if (tp->snd_cwnd < i_cwnd) {
4964 tp->snd_cwnd = i_cwnd;
4969 * Indicate whether this ack should be delayed. We can delay the ack if
4970 * following conditions are met:
4971 * - There is no delayed ack timer in progress.
4972 * - Our last ack wasn't a 0-sized window. We never want to delay
4973 * the ack that opens up a 0-sized window.
4974 * - LRO wasn't used for this segment. We make sure by checking that the
4975 * segment size is not larger than the MSS.
4976 * - Delayed acks are enabled or this is a half-synchronized T/TCP
4979 #define DELAY_ACK(tp, tlen) \
4980 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
4981 ((tp->t_flags & TF_DELACK) == 0) && \
4982 (tlen <= tp->t_maxseg) && \
4983 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
4985 static struct rack_sendmap *
4986 rack_find_lowest_rsm(struct tcp_rack *rack)
4988 struct rack_sendmap *rsm;
4991 * Walk the time-order transmitted list looking for an rsm that is
4992 * not acked. This will be the one that was sent the longest time
4993 * ago that is still outstanding.
4995 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
4996 if (rsm->r_flags & RACK_ACKED) {
5005 static struct rack_sendmap *
5006 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
5008 struct rack_sendmap *prsm;
5011 * Walk the sequence order list backward until we hit and arrive at
5012 * the highest seq not acked. In theory when this is called it
5013 * should be the last segment (which it was not).
5015 counter_u64_add(rack_find_high, 1);
5017 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) {
5018 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
5027 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
5033 * lro is the flag we use to determine if we have seen reordering.
5034 * If it gets set we have seen reordering. The reorder logic either
5035 * works in one of two ways:
5037 * If reorder-fade is configured, then we track the last time we saw
5038 * re-ordering occur. If we reach the point where enough time as
5039 * passed we no longer consider reordering has occuring.
5041 * Or if reorder-face is 0, then once we see reordering we consider
5042 * the connection to alway be subject to reordering and just set lro
5045 * In the end if lro is non-zero we add the extra time for
5050 if (rack->r_ctl.rc_reorder_ts) {
5051 if (rack->r_ctl.rc_reorder_fade) {
5052 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
5053 lro = cts - rack->r_ctl.rc_reorder_ts;
5056 * No time as passed since the last
5057 * reorder, mark it as reordering.
5062 /* Negative time? */
5065 if (lro > rack->r_ctl.rc_reorder_fade) {
5066 /* Turn off reordering seen too */
5067 rack->r_ctl.rc_reorder_ts = 0;
5071 /* Reodering does not fade */
5077 thresh = srtt + rack->r_ctl.rc_pkt_delay;
5079 /* It must be set, if not you get 1/4 rtt */
5080 if (rack->r_ctl.rc_reorder_shift)
5081 thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
5083 thresh += (srtt >> 2);
5087 /* We don't let the rack timeout be above a RTO */
5088 if (thresh > rack->rc_tp->t_rxtcur) {
5089 thresh = rack->rc_tp->t_rxtcur;
5091 /* And we don't want it above the RTO max either */
5092 if (thresh > rack_rto_max) {
5093 thresh = rack_rto_max;
5099 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
5100 struct rack_sendmap *rsm, uint32_t srtt)
5102 struct rack_sendmap *prsm;
5103 uint32_t thresh, len;
5108 if (rack->r_ctl.rc_tlp_threshold)
5109 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
5111 thresh = (srtt * 2);
5113 /* Get the previous sent packet, if any */
5114 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
5115 counter_u64_add(rack_enter_tlp_calc, 1);
5116 len = rsm->r_end - rsm->r_start;
5117 if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
5118 /* Exactly like the ID */
5119 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
5120 uint32_t alt_thresh;
5122 * Compensate for delayed-ack with the d-ack time.
5124 counter_u64_add(rack_used_tlpmethod, 1);
5125 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5126 if (alt_thresh > thresh)
5127 thresh = alt_thresh;
5129 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
5131 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
5132 if (prsm && (len <= segsiz)) {
5134 * Two packets outstanding, thresh should be (2*srtt) +
5135 * possible inter-packet delay (if any).
5137 uint32_t inter_gap = 0;
5140 counter_u64_add(rack_used_tlpmethod, 1);
5141 idx = rsm->r_rtr_cnt - 1;
5142 nidx = prsm->r_rtr_cnt - 1;
5143 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
5144 /* Yes it was sent later (or at the same time) */
5145 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
5147 thresh += inter_gap;
5148 } else if (len <= segsiz) {
5150 * Possibly compensate for delayed-ack.
5152 uint32_t alt_thresh;
5154 counter_u64_add(rack_used_tlpmethod2, 1);
5155 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5156 if (alt_thresh > thresh)
5157 thresh = alt_thresh;
5159 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
5161 if (len <= segsiz) {
5162 uint32_t alt_thresh;
5164 * Compensate for delayed-ack with the d-ack time.
5166 counter_u64_add(rack_used_tlpmethod, 1);
5167 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
5168 if (alt_thresh > thresh)
5169 thresh = alt_thresh;
5172 /* Not above an RTO */
5173 if (thresh > tp->t_rxtcur) {
5174 thresh = tp->t_rxtcur;
5176 /* Not above a RTO max */
5177 if (thresh > rack_rto_max) {
5178 thresh = rack_rto_max;
5180 /* Apply user supplied min TLP */
5181 if (thresh < rack_tlp_min) {
5182 thresh = rack_tlp_min;
5188 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
5191 * We want the rack_rtt which is the
5192 * last rtt we measured. However if that
5193 * does not exist we fallback to the srtt (which
5194 * we probably will never do) and then as a last
5195 * resort we use RACK_INITIAL_RTO if no srtt is
5198 if (rack->rc_rack_rtt)
5199 return (rack->rc_rack_rtt);
5200 else if (tp->t_srtt == 0)
5201 return (RACK_INITIAL_RTO);
5202 return (tp->t_srtt);
5205 static struct rack_sendmap *
5206 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
5209 * Check to see that we don't need to fall into recovery. We will
5210 * need to do so if our oldest transmit is past the time we should
5213 struct tcp_rack *rack;
5214 struct rack_sendmap *rsm;
5216 uint32_t srtt, thresh;
5218 rack = (struct tcp_rack *)tp->t_fb_ptr;
5219 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
5222 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5226 if (rsm->r_flags & RACK_ACKED) {
5227 rsm = rack_find_lowest_rsm(rack);
5231 idx = rsm->r_rtr_cnt - 1;
5232 srtt = rack_grab_rtt(tp, rack);
5233 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
5234 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
5237 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
5240 /* Ok if we reach here we are over-due and this guy can be sent */
5241 if (IN_RECOVERY(tp->t_flags) == 0) {
5243 * For the one that enters us into recovery record undo
5246 rack->r_ctl.rc_rsm_start = rsm->r_start;
5247 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
5248 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
5250 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
5255 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
5261 t = (tp->t_srtt + (tp->t_rttvar << 2));
5262 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
5263 rack_persist_min, rack_persist_max);
5264 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
5266 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
5267 ret_val = (uint32_t)tt;
5272 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
5275 * Start the FR timer, we do this based on getting the first one in
5276 * the rc_tmap. Note that if its NULL we must stop the timer. in all
5277 * events we need to stop the running timer (if its running) before
5278 * starting the new one.
5280 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
5283 int32_t is_tlp_timer = 0;
5284 struct rack_sendmap *rsm;
5286 if (rack->t_timers_stopped) {
5287 /* All timers have been stopped none are to run */
5290 if (rack->rc_in_persist) {
5291 /* We can't start any timer in persists */
5292 return (rack_get_persists_timer_val(tp, rack));
5294 rack->rc_on_min_to = 0;
5295 if ((tp->t_state < TCPS_ESTABLISHED) ||
5296 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
5299 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5300 if ((rsm == NULL) || sup_rack) {
5301 /* Nothing on the send map or no rack */
5303 time_since_sent = 0;
5304 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
5307 * Should we discount the RTX timer any?
5309 * We want to discount it the smallest amount.
5310 * If a timer (Rack/TLP or RXT) has gone off more
5311 * recently thats the discount we want to use (now - timer time).
5312 * If the retransmit of the oldest packet was more recent then
5313 * we want to use that (now - oldest-packet-last_transmit_time).
5316 idx = rsm->r_rtr_cnt - 1;
5317 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
5318 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5320 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5321 if (TSTMP_GT(cts, tstmp_touse))
5322 time_since_sent = cts - tstmp_touse;
5324 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
5325 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
5327 if (to > time_since_sent)
5328 to -= time_since_sent;
5330 to = rack->r_ctl.rc_min_to;
5333 /* Special case for KEEPINIT */
5334 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
5335 (TP_KEEPINIT(tp) != 0) &&
5338 * We have to put a ceiling on the rxt timer
5339 * of the keep-init timeout.
5341 uint32_t max_time, red;
5343 max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
5344 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
5345 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
5351 /* Reduce timeout to the keep value if needed */
5359 if (rsm->r_flags & RACK_ACKED) {
5360 rsm = rack_find_lowest_rsm(rack);
5366 if (rack->sack_attack_disable) {
5368 * We don't want to do
5369 * any TLP's if you are an attacker.
5370 * Though if you are doing what
5371 * is expected you may still have
5372 * SACK-PASSED marks.
5376 /* Convert from ms to usecs */
5377 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
5378 if ((tp->t_flags & TF_SENTFIN) &&
5379 ((tp->snd_max - tp->snd_una) == 1) &&
5380 (rsm->r_flags & RACK_HAS_FIN)) {
5382 * We don't start a rack timer if all we have is a
5387 if ((rack->use_rack_rr == 0) &&
5388 (IN_FASTRECOVERY(tp->t_flags)) &&
5389 (rack->rack_no_prr == 0) &&
5390 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
5392 * We are not cheating, in recovery and
5393 * not enough ack's to yet get our next
5394 * retransmission out.
5396 * Note that classified attackers do not
5397 * get to use the rack-cheat.
5401 srtt = rack_grab_rtt(tp, rack);
5402 thresh = rack_calc_thresh_rack(rack, srtt, cts);
5403 idx = rsm->r_rtr_cnt - 1;
5404 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
5405 if (SEQ_GEQ(exp, cts)) {
5407 if (to < rack->r_ctl.rc_min_to) {
5408 to = rack->r_ctl.rc_min_to;
5409 if (rack->r_rr_config == 3)
5410 rack->rc_on_min_to = 1;
5413 to = rack->r_ctl.rc_min_to;
5414 if (rack->r_rr_config == 3)
5415 rack->rc_on_min_to = 1;
5418 /* Ok we need to do a TLP not RACK */
5420 if ((rack->rc_tlp_in_progress != 0) &&
5421 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
5423 * The previous send was a TLP and we have sent
5424 * N TLP's without sending new data.
5428 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
5430 /* We found no rsm to TLP with. */
5433 if (rsm->r_flags & RACK_HAS_FIN) {
5434 /* If its a FIN we dont do TLP */
5438 idx = rsm->r_rtr_cnt - 1;
5439 time_since_sent = 0;
5440 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
5441 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
5443 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
5444 if (TSTMP_GT(cts, tstmp_touse))
5445 time_since_sent = cts - tstmp_touse;
5448 if ((rack->rc_srtt_measure_made == 0) &&
5449 (tp->t_srtt == 1)) {
5451 * If another stack as run and set srtt to 1,
5452 * then the srtt was 0, so lets use the initial.
5454 srtt = RACK_INITIAL_RTO;
5456 srtt_cur = tp->t_srtt;
5460 srtt = RACK_INITIAL_RTO;
5462 * If the SRTT is not keeping up and the
5463 * rack RTT has spiked we want to use
5464 * the last RTT not the smoothed one.
5466 if (rack_tlp_use_greater &&
5468 (srtt < rack_grab_rtt(tp, rack))) {
5469 srtt = rack_grab_rtt(tp, rack);
5471 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
5472 if (thresh > time_since_sent) {
5473 to = thresh - time_since_sent;
5475 to = rack->r_ctl.rc_min_to;
5476 rack_log_alt_to_to_cancel(rack,
5478 time_since_sent, /* flex2 */
5479 tstmp_touse, /* flex3 */
5480 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
5481 (uint32_t)rsm->r_tim_lastsent[idx],
5485 if (to < rack_tlp_min) {
5488 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
5490 * If the TLP time works out to larger than the max
5491 * RTO lets not do TLP.. just RTO.
5496 if (is_tlp_timer == 0) {
5497 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
5499 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
5507 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5509 if (rack->rc_in_persist == 0) {
5510 if (tp->t_flags & TF_GPUTINPROG) {
5512 * Stop the goodput now, the calling of the
5513 * measurement function clears the flag.
5515 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__);
5517 #ifdef NETFLIX_SHARED_CWND
5518 if (rack->r_ctl.rc_scw) {
5519 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5520 rack->rack_scwnd_is_idle = 1;
5523 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
5524 if (rack->r_ctl.rc_went_idle_time == 0)
5525 rack->r_ctl.rc_went_idle_time = 1;
5526 rack_timer_cancel(tp, rack, cts, __LINE__);
5528 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5529 rack_rto_min, rack_rto_max);
5530 rack->rc_in_persist = 1;
5535 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5537 if (rack->rc_inp->inp_in_hpts) {
5538 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
5539 rack->r_ctl.rc_hpts_flags = 0;
5541 #ifdef NETFLIX_SHARED_CWND
5542 if (rack->r_ctl.rc_scw) {
5543 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
5544 rack->rack_scwnd_is_idle = 0;
5547 if (rack->rc_gp_dyn_mul &&
5548 (rack->use_fixed_rate == 0) &&
5549 (rack->rc_always_pace)) {
5551 * Do we count this as if a probe-rtt just
5554 uint32_t time_idle, idle_min;
5556 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time;
5557 idle_min = rack_min_probertt_hold;
5558 if (rack_probertt_gpsrtt_cnt_div) {
5560 extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
5561 (uint64_t)rack_probertt_gpsrtt_cnt_mul;
5562 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
5563 idle_min += (uint32_t)extra;
5565 if (time_idle >= idle_min) {
5566 /* Yes, we count it as a probe-rtt. */
5569 us_cts = tcp_get_usecs(NULL);
5570 if (rack->in_probe_rtt == 0) {
5571 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
5572 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
5573 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
5574 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
5576 rack_exit_probertt(rack, us_cts);
5580 rack->rc_in_persist = 0;
5581 rack->r_ctl.rc_went_idle_time = 0;
5583 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
5584 rack_rto_min, rack_rto_max);
5585 rack->r_ctl.rc_agg_delayed = 0;
5588 rack->r_ctl.rc_agg_early = 0;
5592 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
5593 struct hpts_diag *diag, struct timeval *tv)
5595 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5596 union tcp_log_stackspecific log;
5598 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5599 log.u_bbr.flex1 = diag->p_nxt_slot;
5600 log.u_bbr.flex2 = diag->p_cur_slot;
5601 log.u_bbr.flex3 = diag->slot_req;
5602 log.u_bbr.flex4 = diag->inp_hptsslot;
5603 log.u_bbr.flex5 = diag->slot_remaining;
5604 log.u_bbr.flex6 = diag->need_new_to;
5605 log.u_bbr.flex7 = diag->p_hpts_active;
5606 log.u_bbr.flex8 = diag->p_on_min_sleep;
5607 /* Hijack other fields as needed */
5608 log.u_bbr.epoch = diag->have_slept;
5609 log.u_bbr.lt_epoch = diag->yet_to_sleep;
5610 log.u_bbr.pkts_out = diag->co_ret;
5611 log.u_bbr.applimited = diag->hpts_sleep_time;
5612 log.u_bbr.delivered = diag->p_prev_slot;
5613 log.u_bbr.inflight = diag->p_runningtick;
5614 log.u_bbr.bw_inuse = diag->wheel_tick;
5615 log.u_bbr.rttProp = diag->wheel_cts;
5616 log.u_bbr.timeStamp = cts;
5617 log.u_bbr.delRate = diag->maxticks;
5618 log.u_bbr.cur_del_rate = diag->p_curtick;
5619 log.u_bbr.cur_del_rate <<= 32;
5620 log.u_bbr.cur_del_rate |= diag->p_lasttick;
5621 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5622 &rack->rc_inp->inp_socket->so_rcv,
5623 &rack->rc_inp->inp_socket->so_snd,
5624 BBR_LOG_HPTSDIAG, 0,
5625 0, &log, false, tv);
5631 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
5633 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
5634 union tcp_log_stackspecific log;
5637 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
5638 log.u_bbr.flex1 = sb->sb_flags;
5639 log.u_bbr.flex2 = len;
5640 log.u_bbr.flex3 = sb->sb_state;
5641 log.u_bbr.flex8 = type;
5642 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
5643 TCP_LOG_EVENTP(rack->rc_tp, NULL,
5644 &rack->rc_inp->inp_socket->so_rcv,
5645 &rack->rc_inp->inp_socket->so_snd,
5647 len, &log, false, &tv);
5652 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
5653 int32_t slot, uint32_t tot_len_this_send, int sup_rack)
5655 struct hpts_diag diag;
5658 uint32_t delayed_ack = 0;
5659 uint32_t hpts_timeout;
5660 uint32_t entry_slot = slot;
5666 if ((tp->t_state == TCPS_CLOSED) ||
5667 (tp->t_state == TCPS_LISTEN)) {
5670 if (inp->inp_in_hpts) {
5671 /* Already on the pacer */
5674 stopped = rack->rc_tmr_stopped;
5675 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
5676 left = rack->r_ctl.rc_timer_exp - cts;
5678 rack->r_ctl.rc_timer_exp = 0;
5679 rack->r_ctl.rc_hpts_flags = 0;
5680 us_cts = tcp_get_usecs(&tv);
5681 /* Now early/late accounting */
5682 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL);
5683 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
5685 * We have a early carry over set,
5686 * we can always add more time so we
5687 * can always make this compensation.
5689 * Note if ack's are allowed to wake us do not
5690 * penalize the next timer for being awoke
5691 * by an ack aka the rc_agg_early (non-paced mode).
5693 slot += rack->r_ctl.rc_agg_early;
5695 rack->r_ctl.rc_agg_early = 0;
5699 * This is harder, we can
5700 * compensate some but it
5701 * really depends on what
5702 * the current pacing time is.
5704 if (rack->r_ctl.rc_agg_delayed >= slot) {
5706 * We can't compensate for it all.
5707 * And we have to have some time
5708 * on the clock. We always have a min
5709 * 10 slots (10 x 10 i.e. 100 usecs).
5711 if (slot <= HPTS_TICKS_PER_USEC) {
5713 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_USEC - slot);
5714 slot = HPTS_TICKS_PER_USEC;
5716 /* We take off some */
5717 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_USEC);
5718 slot = HPTS_TICKS_PER_USEC;
5721 slot -= rack->r_ctl.rc_agg_delayed;
5722 rack->r_ctl.rc_agg_delayed = 0;
5723 /* Make sure we have 100 useconds at minimum */
5724 if (slot < HPTS_TICKS_PER_USEC) {
5725 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_USEC - slot;
5726 slot = HPTS_TICKS_PER_USEC;
5728 if (rack->r_ctl.rc_agg_delayed == 0)
5733 /* We are pacing too */
5734 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
5736 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
5737 #ifdef NETFLIX_EXP_DETECTION
5738 if (rack->sack_attack_disable &&
5739 (slot < tcp_sad_pacing_interval)) {
5741 * We have a potential attacker on
5742 * the line. We have possibly some
5743 * (or now) pacing time set. We want to
5744 * slow down the processing of sacks by some
5745 * amount (if it is an attacker). Set the default
5746 * slot for attackers in place (unless the orginal
5747 * interval is longer). Its stored in
5748 * micro-seconds, so lets convert to msecs.
5750 slot = tcp_sad_pacing_interval;
5753 if (tp->t_flags & TF_DELACK) {
5754 delayed_ack = TICKS_2_USEC(tcp_delacktime);
5755 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
5757 if (delayed_ack && ((hpts_timeout == 0) ||
5758 (delayed_ack < hpts_timeout)))
5759 hpts_timeout = delayed_ack;
5761 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
5763 * If no timers are going to run and we will fall off the hptsi
5764 * wheel, we resort to a keep-alive timer if its configured.
5766 if ((hpts_timeout == 0) &&
5768 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
5769 (tp->t_state <= TCPS_CLOSING)) {
5771 * Ok we have no timer (persists, rack, tlp, rxt or
5772 * del-ack), we don't have segments being paced. So
5773 * all that is left is the keepalive timer.
5775 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
5776 /* Get the established keep-alive time */
5777 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
5780 * Get the initial setup keep-alive time,
5781 * note that this is probably not going to
5782 * happen, since rack will be running a rxt timer
5783 * if a SYN of some sort is outstanding. It is
5784 * actually handled in rack_timeout_rxt().
5786 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
5788 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
5789 if (rack->in_probe_rtt) {
5791 * We want to instead not wake up a long time from
5792 * now but to wake up about the time we would
5793 * exit probe-rtt and initiate a keep-alive ack.
5794 * This will get us out of probe-rtt and update
5797 hpts_timeout = rack_min_probertt_hold;
5801 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
5802 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
5804 * RACK, TLP, persists and RXT timers all are restartable
5805 * based on actions input .. i.e we received a packet (ack
5806 * or sack) and that changes things (rw, or snd_una etc).
5807 * Thus we can restart them with a new value. For
5808 * keep-alive, delayed_ack we keep track of what was left
5809 * and restart the timer with a smaller value.
5811 if (left < hpts_timeout)
5812 hpts_timeout = left;
5816 * Hack alert for now we can't time-out over 2,147,483
5817 * seconds (a bit more than 596 hours), which is probably ok
5820 if (hpts_timeout > 0x7ffffffe)
5821 hpts_timeout = 0x7ffffffe;
5822 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
5824 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL);
5825 if ((rack->gp_ready == 0) &&
5826 (rack->use_fixed_rate == 0) &&
5827 (hpts_timeout < slot) &&
5828 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
5830 * We have no good estimate yet for the
5831 * old clunky burst mitigation or the
5832 * real pacing. And the tlp or rxt is smaller
5833 * than the pacing calculation. Lets not
5834 * pace that long since we know the calculation
5835 * so far is not accurate.
5837 slot = hpts_timeout;
5839 rack->r_ctl.last_pacing_time = slot;
5841 * Turn off all the flags for queuing by default. The
5842 * flags have important meanings to what happens when
5843 * LRO interacts with the transport. Most likely (by default now)
5844 * mbuf_queueing and ack compression are on. So the transport
5845 * has a couple of flags that control what happens (if those
5846 * are not on then these flags won't have any effect since it
5847 * won't go through the queuing LRO path).
5849 * INP_MBUF_QUEUE_READY - This flags says that I am busy
5850 * pacing output, so don't disturb. But
5851 * it also means LRO can wake me if there
5852 * is a SACK arrival.
5854 * INP_DONT_SACK_QUEUE - This flag is used in conjunction
5855 * with the above flag (QUEUE_READY) and
5856 * when present it says don't even wake me
5857 * if a SACK arrives.
5859 * The idea behind these flags is that if we are pacing we
5860 * set the MBUF_QUEUE_READY and only get woken up if
5861 * a SACK arrives (which could change things) or if
5862 * our pacing timer expires. If, however, we have a rack
5863 * timer running, then we don't even want a sack to wake
5864 * us since the rack timer has to expire before we can send.
5866 * Other cases should usually have none of the flags set
5867 * so LRO can call into us.
5869 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5871 rack->r_ctl.rc_last_output_to = us_cts + slot;
5873 * A pacing timer (slot) is being set, in
5874 * such a case we cannot send (we are blocked by
5875 * the timer). So lets tell LRO that it should not
5876 * wake us unless there is a SACK. Note this only
5877 * will be effective if mbuf queueing is on or
5878 * compressed acks are being processed.
5880 inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
5882 * But wait if we have a Rack timer running
5883 * even a SACK should not disturb us (with
5884 * the exception of r_rr_config 3).
5886 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
5887 (rack->r_rr_config != 3))
5888 inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
5889 if (rack->rc_ack_can_sendout_data) {
5891 * Ahh but wait, this is that special case
5892 * where the pacing timer can be disturbed
5893 * backout the changes (used for non-paced
5896 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
5898 if ((rack->use_rack_rr) &&
5899 (rack->r_rr_config < 2) &&
5900 ((hpts_timeout) && (hpts_timeout < slot))) {
5902 * Arrange for the hpts to kick back in after the
5903 * t-o if the t-o does not cause a send.
5905 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5907 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5908 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5910 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
5912 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5913 rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
5915 } else if (hpts_timeout) {
5917 * With respect to inp_flags2 here, lets let any new acks wake
5918 * us up here. Since we are not pacing (no pacing timer), output
5919 * can happen so we should let it. If its a Rack timer, then any inbound
5920 * packet probably won't change the sending (we will be blocked)
5921 * but it may change the prr stats so letting it in (the set defaults
5922 * at the start of this block) are good enough.
5924 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
5926 rack_log_hpts_diag(rack, us_cts, &diag, &tv);
5927 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
5929 /* No timer starting */
5931 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
5932 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
5933 tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
5937 rack->rc_tmr_stopped = 0;
5939 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv);
5943 * RACK Timer, here we simply do logging and house keeping.
5944 * the normal rack_output() function will call the
5945 * appropriate thing to check if we need to do a RACK retransmit.
5946 * We return 1, saying don't proceed with rack_output only
5947 * when all timers have been stopped (destroyed PCB?).
5950 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
5953 * This timer simply provides an internal trigger to send out data.
5954 * The check_recovery_mode call will see if there are needed
5955 * retransmissions, if so we will enter fast-recovery. The output
5956 * call may or may not do the same thing depending on sysctl
5959 struct rack_sendmap *rsm;
5961 if (tp->t_timers->tt_flags & TT_STOPPED) {
5964 counter_u64_add(rack_to_tot, 1);
5965 if (rack->r_state && (rack->r_state != tp->t_state))
5966 rack_set_state(tp, rack);
5967 rack->rc_on_min_to = 0;
5968 rsm = rack_check_recovery_mode(tp, cts);
5969 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
5971 rack->r_ctl.rc_resend = rsm;
5972 rack->r_timer_override = 1;
5973 if (rack->use_rack_rr) {
5975 * Don't accumulate extra pacing delay
5976 * we are allowing the rack timer to
5977 * over-ride pacing i.e. rrr takes precedence
5978 * if the pacing interval is longer than the rrr
5979 * time (in other words we get the min pacing
5980 * time versus rrr pacing time).
5982 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
5985 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
5987 /* restart a timer and return 1 */
5988 rack_start_hpts_timer(rack, tp, cts,
5996 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
5998 if (rsm->m->m_len > rsm->orig_m_len) {
6000 * Mbuf grew, caused by sbcompress, our offset does
6003 rsm->orig_m_len = rsm->m->m_len;
6004 } else if (rsm->m->m_len < rsm->orig_m_len) {
6006 * Mbuf shrank, trimmed off the top by an ack, our
6009 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
6010 rsm->orig_m_len = rsm->m->m_len;
6015 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
6020 if (src_rsm->orig_m_len != src_rsm->m->m_len) {
6021 /* Fix up the orig_m_len and possibly the mbuf offset */
6022 rack_adjust_orig_mlen(src_rsm);
6025 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
6026 while (soff >= m->m_len) {
6027 /* Move out past this mbuf */
6030 KASSERT((m != NULL),
6031 ("rsm:%p nrsm:%p hit at soff:%u null m",
6032 src_rsm, rsm, soff));
6036 rsm->orig_m_len = m->m_len;
6039 static __inline void
6040 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
6041 struct rack_sendmap *rsm, uint32_t start)
6045 nrsm->r_start = start;
6046 nrsm->r_end = rsm->r_end;
6047 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
6048 nrsm->r_flags = rsm->r_flags;
6049 nrsm->r_dupack = rsm->r_dupack;
6050 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
6051 nrsm->r_rtr_bytes = 0;
6052 rsm->r_end = nrsm->r_start;
6053 nrsm->r_just_ret = rsm->r_just_ret;
6054 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
6055 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
6058 * Now we need to find nrsm's new location in the mbuf chain
6059 * we basically calculate a new offset, which is soff +
6060 * how much is left in original rsm. Then we walk out the mbuf
6061 * chain to find the righ postion, it may be the same mbuf
6064 KASSERT((rsm->m != NULL),
6065 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
6066 rack_setup_offset_for_rsm(rsm, nrsm);
6069 static struct rack_sendmap *
6070 rack_merge_rsm(struct tcp_rack *rack,
6071 struct rack_sendmap *l_rsm,
6072 struct rack_sendmap *r_rsm)
6075 * We are merging two ack'd RSM's,
6076 * the l_rsm is on the left (lower seq
6077 * values) and the r_rsm is on the right
6078 * (higher seq value). The simplest way
6079 * to merge these is to move the right
6080 * one into the left. I don't think there
6081 * is any reason we need to try to find
6082 * the oldest (or last oldest retransmitted).
6084 struct rack_sendmap *rm;
6086 rack_log_map_chg(rack->rc_tp, rack, NULL,
6087 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
6088 l_rsm->r_end = r_rsm->r_end;
6089 if (l_rsm->r_dupack < r_rsm->r_dupack)
6090 l_rsm->r_dupack = r_rsm->r_dupack;
6091 if (r_rsm->r_rtr_bytes)
6092 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
6093 if (r_rsm->r_in_tmap) {
6094 /* This really should not happen */
6095 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
6096 r_rsm->r_in_tmap = 0;
6100 if (r_rsm->r_flags & RACK_HAS_FIN)
6101 l_rsm->r_flags |= RACK_HAS_FIN;
6102 if (r_rsm->r_flags & RACK_TLP)
6103 l_rsm->r_flags |= RACK_TLP;
6104 if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
6105 l_rsm->r_flags |= RACK_RWND_COLLAPSED;
6106 if ((r_rsm->r_flags & RACK_APP_LIMITED) &&
6107 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
6109 * If both are app-limited then let the
6110 * free lower the count. If right is app
6111 * limited and left is not, transfer.
6113 l_rsm->r_flags |= RACK_APP_LIMITED;
6114 r_rsm->r_flags &= ~RACK_APP_LIMITED;
6115 if (r_rsm == rack->r_ctl.rc_first_appl)
6116 rack->r_ctl.rc_first_appl = l_rsm;
6118 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm);
6121 panic("removing head in rack:%p rsm:%p rm:%p",
6125 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
6126 /* Transfer the split limit to the map we free */
6127 r_rsm->r_limit_type = l_rsm->r_limit_type;
6128 l_rsm->r_limit_type = 0;
6130 rack_free(rack, r_rsm);
6135 * TLP Timer, here we simply setup what segment we want to
6136 * have the TLP expire on, the normal rack_output() will then
6139 * We return 1, saying don't proceed with rack_output only
6140 * when all timers have been stopped (destroyed PCB?).
6143 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6148 struct rack_sendmap *rsm = NULL;
6149 struct rack_sendmap *insret;
6152 uint32_t out, avail;
6153 int collapsed_win = 0;
6155 if (tp->t_timers->tt_flags & TT_STOPPED) {
6158 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6159 /* Its not time yet */
6162 if (ctf_progress_timeout_check(tp, true)) {
6163 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6164 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6168 * A TLP timer has expired. We have been idle for 2 rtts. So we now
6169 * need to figure out how to force a full MSS segment out.
6171 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
6172 rack->r_ctl.retran_during_recovery = 0;
6173 rack->r_ctl.dsack_byte_cnt = 0;
6174 counter_u64_add(rack_tlp_tot, 1);
6175 if (rack->r_state && (rack->r_state != tp->t_state))
6176 rack_set_state(tp, rack);
6177 so = tp->t_inpcb->inp_socket;
6178 avail = sbavail(&so->so_snd);
6179 out = tp->snd_max - tp->snd_una;
6180 if (out > tp->snd_wnd) {
6181 /* special case, we need a retransmission */
6186 * Check our send oldest always settings, and if
6187 * there is an oldest to send jump to the need_retran.
6189 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
6193 /* New data is available */
6195 if (amm > ctf_fixed_maxseg(tp)) {
6196 amm = ctf_fixed_maxseg(tp);
6197 if ((amm + out) > tp->snd_wnd) {
6198 /* We are rwnd limited */
6201 } else if (amm < ctf_fixed_maxseg(tp)) {
6202 /* not enough to fill a MTU */
6205 if (IN_FASTRECOVERY(tp->t_flags)) {
6207 if (rack->rack_no_prr == 0) {
6208 if (out + amm <= tp->snd_wnd) {
6209 rack->r_ctl.rc_prr_sndcnt = amm;
6210 rack_log_to_prr(rack, 4, 0);
6215 /* Set the send-new override */
6216 if (out + amm <= tp->snd_wnd)
6217 rack->r_ctl.rc_tlp_new_data = amm;
6221 rack->r_ctl.rc_tlpsend = NULL;
6222 counter_u64_add(rack_tlp_newdata, 1);
6227 * Ok we need to arrange the last un-acked segment to be re-sent, or
6228 * optionally the first un-acked segment.
6230 if (collapsed_win == 0) {
6231 if (rack_always_send_oldest)
6232 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6234 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6235 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
6236 rsm = rack_find_high_nonack(rack, rsm);
6240 counter_u64_add(rack_tlp_does_nada, 1);
6242 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6248 * We must find the last segment
6249 * that was acceptable by the client.
6251 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6252 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) {
6258 /* None? if so send the first */
6259 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6261 counter_u64_add(rack_tlp_does_nada, 1);
6263 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
6269 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
6271 * We need to split this the last segment in two.
6273 struct rack_sendmap *nrsm;
6275 nrsm = rack_alloc_full_limit(rack);
6278 * No memory to split, we will just exit and punt
6279 * off to the RXT timer.
6281 counter_u64_add(rack_tlp_does_nada, 1);
6284 rack_clone_rsm(rack, nrsm, rsm,
6285 (rsm->r_end - ctf_fixed_maxseg(tp)));
6286 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
6287 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
6289 if (insret != NULL) {
6290 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
6291 nrsm, insret, rack, rsm);
6294 if (rsm->r_in_tmap) {
6295 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
6296 nrsm->r_in_tmap = 1;
6298 rsm->r_flags &= (~RACK_HAS_FIN);
6301 rack->r_ctl.rc_tlpsend = rsm;
6303 rack->r_timer_override = 1;
6304 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6307 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
6312 * Delayed ack Timer, here we simply need to setup the
6313 * ACK_NOW flag and remove the DELACK flag. From there
6314 * the output routine will send the ack out.
6316 * We only return 1, saying don't proceed, if all timers
6317 * are stopped (destroyed PCB?).
6320 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6322 if (tp->t_timers->tt_flags & TT_STOPPED) {
6325 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
6326 tp->t_flags &= ~TF_DELACK;
6327 tp->t_flags |= TF_ACKNOW;
6328 KMOD_TCPSTAT_INC(tcps_delack);
6329 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
6334 * Persists timer, here we simply send the
6335 * same thing as a keepalive will.
6336 * the one byte send.
6338 * We only return 1, saying don't proceed, if all timers
6339 * are stopped (destroyed PCB?).
6342 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6344 struct tcptemp *t_template;
6350 if (tp->t_timers->tt_flags & TT_STOPPED) {
6353 if (rack->rc_in_persist == 0)
6355 if (ctf_progress_timeout_check(tp, false)) {
6356 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6357 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6358 tcp_set_inp_to_drop(inp, ETIMEDOUT);
6361 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
6363 * Persistence timer into zero window. Force a byte to be output, if
6366 KMOD_TCPSTAT_INC(tcps_persisttimeo);
6368 * Hack: if the peer is dead/unreachable, we do not time out if the
6369 * window is closed. After a full backoff, drop the connection if
6370 * the idle time (no responses to probes) reaches the maximum
6371 * backoff that we would use if retransmitting.
6373 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
6374 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
6375 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
6376 KMOD_TCPSTAT_INC(tcps_persistdrop);
6378 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6379 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6382 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
6383 tp->snd_una == tp->snd_max)
6384 rack_exit_persist(tp, rack, cts);
6385 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
6387 * If the user has closed the socket then drop a persisting
6388 * connection after a much reduced timeout.
6390 if (tp->t_state > TCPS_CLOSE_WAIT &&
6391 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
6393 KMOD_TCPSTAT_INC(tcps_persistdrop);
6394 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
6395 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6398 t_template = tcpip_maketemplate(rack->rc_inp);
6400 /* only set it if we were answered */
6401 if (rack->forced_ack == 0) {
6402 rack->forced_ack = 1;
6403 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6405 tcp_respond(tp, t_template->tt_ipgen,
6406 &t_template->tt_t, (struct mbuf *)NULL,
6407 tp->rcv_nxt, tp->snd_una - 1, 0);
6408 /* This sends an ack */
6409 if (tp->t_flags & TF_DELACK)
6410 tp->t_flags &= ~TF_DELACK;
6411 free(t_template, M_TEMP);
6413 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
6416 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
6417 rack_start_hpts_timer(rack, tp, cts,
6423 * If a keepalive goes off, we had no other timers
6424 * happening. We always return 1 here since this
6425 * routine either drops the connection or sends
6426 * out a segment with respond.
6429 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6431 struct tcptemp *t_template;
6434 if (tp->t_timers->tt_flags & TT_STOPPED) {
6437 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
6439 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
6441 * Keep-alive timer went off; send something or drop connection if
6442 * idle for too long.
6444 KMOD_TCPSTAT_INC(tcps_keeptimeo);
6445 if (tp->t_state < TCPS_ESTABLISHED)
6447 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
6448 tp->t_state <= TCPS_CLOSING) {
6449 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
6452 * Send a packet designed to force a response if the peer is
6453 * up and reachable: either an ACK if the connection is
6454 * still alive, or an RST if the peer has closed the
6455 * connection due to timeout or reboot. Using sequence
6456 * number tp->snd_una-1 causes the transmitted zero-length
6457 * segment to lie outside the receive window; by the
6458 * protocol spec, this requires the correspondent TCP to
6461 KMOD_TCPSTAT_INC(tcps_keepprobe);
6462 t_template = tcpip_maketemplate(inp);
6464 if (rack->forced_ack == 0) {
6465 rack->forced_ack = 1;
6466 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
6468 tcp_respond(tp, t_template->tt_ipgen,
6469 &t_template->tt_t, (struct mbuf *)NULL,
6470 tp->rcv_nxt, tp->snd_una - 1, 0);
6471 free(t_template, M_TEMP);
6474 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
6477 KMOD_TCPSTAT_INC(tcps_keepdrops);
6478 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6479 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
6484 * Retransmit helper function, clear up all the ack
6485 * flags and take care of important book keeping.
6488 rack_remxt_tmr(struct tcpcb *tp)
6491 * The retransmit timer went off, all sack'd blocks must be
6494 struct rack_sendmap *rsm, *trsm = NULL;
6495 struct tcp_rack *rack;
6497 rack = (struct tcp_rack *)tp->t_fb_ptr;
6498 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
6499 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
6500 if (rack->r_state && (rack->r_state != tp->t_state))
6501 rack_set_state(tp, rack);
6503 * Ideally we would like to be able to
6504 * mark SACK-PASS on anything not acked here.
6506 * However, if we do that we would burst out
6507 * all that data 1ms apart. This would be unwise,
6508 * so for now we will just let the normal rxt timer
6509 * and tlp timer take care of it.
6511 * Also we really need to stick them back in sequence
6512 * order. This way we send in the proper order and any
6513 * sacks that come floating in will "re-ack" the data.
6514 * To do this we zap the tmap with an INIT and then
6515 * walk through and place every rsm in the RB tree
6516 * back in its seq ordered place.
6518 TAILQ_INIT(&rack->r_ctl.rc_tmap);
6519 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
6521 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
6522 /* We must re-add it back to the tlist */
6524 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6526 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
6530 if (rsm->r_flags & RACK_ACKED)
6531 rsm->r_flags |= RACK_WAS_ACKED;
6532 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
6534 /* Clear the count (we just un-acked them) */
6535 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
6536 rack->r_ctl.rc_sacked = 0;
6537 rack->r_ctl.rc_sacklast = NULL;
6538 rack->r_ctl.rc_agg_delayed = 0;
6540 rack->r_ctl.rc_agg_early = 0;
6542 /* Clear the tlp rtx mark */
6543 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6544 if (rack->r_ctl.rc_resend != NULL)
6545 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
6546 rack->r_ctl.rc_prr_sndcnt = 0;
6547 rack_log_to_prr(rack, 6, 0);
6548 rack->r_timer_override = 1;
6549 if ((((tp->t_flags & TF_SACK_PERMIT) == 0)
6550 #ifdef NETFLIX_EXP_DETECTION
6551 || (rack->sack_attack_disable != 0)
6553 ) && ((tp->t_flags & TF_SENTFIN) == 0)) {
6555 * For non-sack customers new data
6556 * needs to go out as retransmits until
6557 * we retransmit up to snd_max.
6559 rack->r_must_retran = 1;
6560 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
6561 rack->r_ctl.rc_sacked);
6563 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
6567 rack_convert_rtts(struct tcpcb *tp)
6569 if (tp->t_srtt > 1) {
6572 val = tp->t_srtt >> TCP_RTT_SHIFT;
6573 frac = tp->t_srtt & 0x1f;
6574 tp->t_srtt = TICKS_2_USEC(val);
6576 * frac is the fractional part of the srtt (if any)
6577 * but its in ticks and every bit represents
6582 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6584 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6592 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT;
6593 frac = tp->t_rttvar & 0x1f;
6594 tp->t_rttvar = TICKS_2_USEC(val);
6596 * frac is the fractional part of the srtt (if any)
6597 * but its in ticks and every bit represents
6602 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
6604 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
6606 tp->t_rttvar += frac;
6609 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
6610 rack_rto_min, rack_rto_max);
6614 rack_cc_conn_init(struct tcpcb *tp)
6616 struct tcp_rack *rack;
6619 rack = (struct tcp_rack *)tp->t_fb_ptr;
6623 * Now convert to rack's internal format,
6626 if ((srtt == 0) && (tp->t_srtt != 0))
6627 rack_convert_rtts(tp);
6629 * We want a chance to stay in slowstart as
6630 * we create a connection. TCP spec says that
6631 * initially ssthresh is infinite. For our
6632 * purposes that is the snd_wnd.
6634 if (tp->snd_ssthresh < tp->snd_wnd) {
6635 tp->snd_ssthresh = tp->snd_wnd;
6638 * We also want to assure a IW worth of
6639 * data can get inflight.
6641 if (rc_init_window(rack) < tp->snd_cwnd)
6642 tp->snd_cwnd = rc_init_window(rack);
6646 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
6647 * we will setup to retransmit the lowest seq number outstanding.
6650 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
6658 if (tp->t_timers->tt_flags & TT_STOPPED) {
6661 if (ctf_progress_timeout_check(tp, false)) {
6662 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6663 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
6664 tcp_set_inp_to_drop(inp, ETIMEDOUT);
6667 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
6668 rack->r_ctl.retran_during_recovery = 0;
6669 rack->r_ctl.dsack_byte_cnt = 0;
6670 if (IN_FASTRECOVERY(tp->t_flags))
6671 tp->t_flags |= TF_WASFRECOVERY;
6673 tp->t_flags &= ~TF_WASFRECOVERY;
6674 if (IN_CONGRECOVERY(tp->t_flags))
6675 tp->t_flags |= TF_WASCRECOVERY;
6677 tp->t_flags &= ~TF_WASCRECOVERY;
6678 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
6679 (tp->snd_una == tp->snd_max)) {
6680 /* Nothing outstanding .. nothing to do */
6684 * Rack can only run one timer at a time, so we cannot
6685 * run a KEEPINIT (gating SYN sending) and a retransmit
6686 * timer for the SYN. So if we are in a front state and
6687 * have a KEEPINIT timer we need to check the first transmit
6688 * against now to see if we have exceeded the KEEPINIT time
6691 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
6692 (TP_KEEPINIT(tp) != 0)) {
6693 struct rack_sendmap *rsm;
6695 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
6697 /* Ok we have something outstanding to test keepinit with */
6698 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
6699 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
6700 /* We have exceeded the KEEPINIT time */
6701 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
6707 * Retransmission timer went off. Message has not been acked within
6708 * retransmit interval. Back off to a longer retransmit interval
6709 * and retransmit one segment.
6712 if ((rack->r_ctl.rc_resend == NULL) ||
6713 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
6715 * If the rwnd collapsed on
6716 * the one we are retransmitting
6717 * it does not count against the
6722 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) {
6723 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
6725 tp->t_rxtshift = TCP_MAXRXTSHIFT;
6726 KMOD_TCPSTAT_INC(tcps_timeoutdrop);
6728 tcp_set_inp_to_drop(rack->rc_inp,
6729 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
6732 if (tp->t_state == TCPS_SYN_SENT) {
6734 * If the SYN was retransmitted, indicate CWND to be limited
6735 * to 1 segment in cc_conn_init().
6738 } else if (tp->t_rxtshift == 1) {
6740 * first retransmit; record ssthresh and cwnd so they can be
6741 * recovered if this turns out to be a "bad" retransmit. A
6742 * retransmit is considered "bad" if an ACK for this segment
6743 * is received within RTT/2 interval; the assumption here is
6744 * that the ACK was already in flight. See "On Estimating
6745 * End-to-End Network Path Properties" by Allman and Paxson
6748 tp->snd_cwnd_prev = tp->snd_cwnd;
6749 tp->snd_ssthresh_prev = tp->snd_ssthresh;
6750 tp->snd_recover_prev = tp->snd_recover;
6751 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
6752 tp->t_flags |= TF_PREVVALID;
6753 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
6754 tp->t_flags &= ~TF_PREVVALID;
6755 KMOD_TCPSTAT_INC(tcps_rexmttimeo);
6756 if ((tp->t_state == TCPS_SYN_SENT) ||
6757 (tp->t_state == TCPS_SYN_RECEIVED))
6758 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
6760 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
6762 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
6763 max(rack_rto_min, rexmt), rack_rto_max);
6765 * We enter the path for PLMTUD if connection is established or, if
6766 * connection is FIN_WAIT_1 status, reason for the last is that if
6767 * amount of data we send is very small, we could send it in couple
6768 * of packets and process straight to FIN. In that case we won't
6769 * catch ESTABLISHED state.
6772 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
6776 if (((V_tcp_pmtud_blackhole_detect == 1) ||
6777 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
6778 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
6779 ((tp->t_state == TCPS_ESTABLISHED) ||
6780 (tp->t_state == TCPS_FIN_WAIT_1))) {
6782 * Idea here is that at each stage of mtu probe (usually,
6783 * 1448 -> 1188 -> 524) should be given 2 chances to recover
6784 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
6785 * should take care of that.
6787 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
6788 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
6789 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
6790 tp->t_rxtshift % 2 == 0)) {
6792 * Enter Path MTU Black-hole Detection mechanism: -
6793 * Disable Path MTU Discovery (IP "DF" bit). -
6794 * Reduce MTU to lower value than what we negotiated
6797 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
6798 /* Record that we may have found a black hole. */
6799 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
6800 /* Keep track of previous MSS. */
6801 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
6805 * Reduce the MSS to blackhole value or to the
6806 * default in an attempt to retransmit.
6810 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
6811 /* Use the sysctl tuneable blackhole MSS. */
6812 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
6813 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6814 } else if (isipv6) {
6815 /* Use the default MSS. */
6816 tp->t_maxseg = V_tcp_v6mssdflt;
6818 * Disable Path MTU Discovery when we switch
6821 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6822 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6825 #if defined(INET6) && defined(INET)
6829 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
6830 /* Use the sysctl tuneable blackhole MSS. */
6831 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
6832 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
6834 /* Use the default MSS. */
6835 tp->t_maxseg = V_tcp_mssdflt;
6837 * Disable Path MTU Discovery when we switch
6840 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
6841 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
6846 * If further retransmissions are still unsuccessful
6847 * with a lowered MTU, maybe this isn't a blackhole
6848 * and we restore the previous MSS and blackhole
6849 * detection flags. The limit '6' is determined by
6850 * giving each probe stage (1448, 1188, 524) 2
6851 * chances to recover.
6853 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
6854 (tp->t_rxtshift >= 6)) {
6855 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
6856 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
6857 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
6858 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
6863 * Disable RFC1323 and SACK if we haven't got any response to
6864 * our third SYN to work-around some broken terminal servers
6865 * (most of which have hopefully been retired) that have bad VJ
6866 * header compression code which trashes TCP segments containing
6867 * unknown-to-them TCP options.
6869 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
6870 (tp->t_rxtshift == 3))
6871 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
6873 * If we backed off this far, our srtt estimate is probably bogus.
6874 * Clobber it so we'll take the next rtt measurement as our srtt;
6875 * move the current srtt into rttvar to keep the current retransmit
6878 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
6880 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
6881 in6_losing(tp->t_inpcb);
6884 in_losing(tp->t_inpcb);
6885 tp->t_rttvar += tp->t_srtt;
6888 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
6889 tp->snd_recover = tp->snd_max;
6890 tp->t_flags |= TF_ACKNOW;
6892 rack_cong_signal(tp, CC_RTO, tp->snd_una);
6898 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
6901 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
6906 if (tp->t_state == TCPS_LISTEN) {
6907 /* no timers on listen sockets */
6908 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
6912 if ((timers & PACE_TMR_RACK) &&
6913 rack->rc_on_min_to) {
6915 * For the rack timer when we
6916 * are on a min-timeout (which means rrr_conf = 3)
6917 * we don't want to check the timer. It may
6918 * be going off for a pace and thats ok we
6919 * want to send the retransmit (if its ready).
6921 * If its on a normal rack timer (non-min) then
6922 * we will check if its expired.
6924 goto skip_time_check;
6926 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
6929 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
6931 rack_log_to_processing(rack, cts, ret, 0);
6934 if (hpts_calling == 0) {
6936 * A user send or queued mbuf (sack) has called us? We
6937 * return 0 and let the pacing guards
6938 * deal with it if they should or
6939 * should not cause a send.
6942 rack_log_to_processing(rack, cts, ret, 0);
6946 * Ok our timer went off early and we are not paced false
6947 * alarm, go back to sleep.
6950 left = rack->r_ctl.rc_timer_exp - cts;
6951 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
6952 rack_log_to_processing(rack, cts, ret, left);
6956 rack->rc_tmr_stopped = 0;
6957 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
6958 if (timers & PACE_TMR_DELACK) {
6959 ret = rack_timeout_delack(tp, rack, cts);
6960 } else if (timers & PACE_TMR_RACK) {
6961 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6962 rack->r_fast_output = 0;
6963 ret = rack_timeout_rack(tp, rack, cts);
6964 } else if (timers & PACE_TMR_TLP) {
6965 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6966 ret = rack_timeout_tlp(tp, rack, cts);
6967 } else if (timers & PACE_TMR_RXT) {
6968 rack->r_ctl.rc_tlp_rxt_last_time = cts;
6969 rack->r_fast_output = 0;
6970 ret = rack_timeout_rxt(tp, rack, cts);
6971 } else if (timers & PACE_TMR_PERSIT) {
6972 ret = rack_timeout_persist(tp, rack, cts);
6973 } else if (timers & PACE_TMR_KEEP) {
6974 ret = rack_timeout_keepalive(tp, rack, cts);
6976 rack_log_to_processing(rack, cts, ret, timers);
6981 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
6984 uint32_t us_cts, flags_on_entry;
6985 uint8_t hpts_removed = 0;
6987 flags_on_entry = rack->r_ctl.rc_hpts_flags;
6988 us_cts = tcp_get_usecs(&tv);
6989 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
6990 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
6991 ((tp->snd_max - tp->snd_una) == 0))) {
6992 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
6994 /* If we were not delayed cancel out the flag. */
6995 if ((tp->snd_max - tp->snd_una) == 0)
6996 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
6997 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
6999 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
7000 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
7001 if (rack->rc_inp->inp_in_hpts &&
7002 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
7004 * Canceling timer's when we have no output being
7005 * paced. We also must remove ourselves from the
7008 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
7011 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
7013 if (hpts_removed == 0)
7014 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
7018 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
7024 rack_stopall(struct tcpcb *tp)
7026 struct tcp_rack *rack;
7027 rack = (struct tcp_rack *)tp->t_fb_ptr;
7028 rack->t_timers_stopped = 1;
7033 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
7039 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
7045 rack_stop_all_timers(struct tcpcb *tp)
7047 struct tcp_rack *rack;
7050 * Assure no timers are running.
7052 if (tcp_timer_active(tp, TT_PERSIST)) {
7053 /* We enter in persists, set the flag appropriately */
7054 rack = (struct tcp_rack *)tp->t_fb_ptr;
7055 rack->rc_in_persist = 1;
7057 tcp_timer_suspend(tp, TT_PERSIST);
7058 tcp_timer_suspend(tp, TT_REXMT);
7059 tcp_timer_suspend(tp, TT_KEEP);
7060 tcp_timer_suspend(tp, TT_DELACK);
7064 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
7065 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag)
7068 uint16_t stripped_flags;
7071 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7073 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
7074 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
7075 rsm->r_flags |= RACK_OVERMAX;
7077 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
7078 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
7079 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
7081 idx = rsm->r_rtr_cnt - 1;
7082 rsm->r_tim_lastsent[idx] = ts;
7083 stripped_flags = rsm->r_flags & ~(RACK_SENT_SP|RACK_SENT_FP);
7084 if (rsm->r_flags & RACK_ACKED) {
7085 /* Problably MTU discovery messing with us */
7086 rsm->r_flags &= ~RACK_ACKED;
7087 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
7089 if (rsm->r_in_tmap) {
7090 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7093 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7095 if (rsm->r_flags & RACK_SACK_PASSED) {
7096 /* We have retransmitted due to the SACK pass */
7097 rsm->r_flags &= ~RACK_SACK_PASSED;
7098 rsm->r_flags |= RACK_WAS_SACKPASS;
7103 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
7104 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag)
7107 * We (re-)transmitted starting at rsm->r_start for some length
7108 * (possibly less than r_end.
7110 struct rack_sendmap *nrsm, *insret;
7115 c_end = rsm->r_start + len;
7116 if (SEQ_GEQ(c_end, rsm->r_end)) {
7118 * We retransmitted the whole piece or more than the whole
7119 * slopping into the next rsm.
7121 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7122 if (c_end == rsm->r_end) {
7128 /* Hangs over the end return whats left */
7129 act_len = rsm->r_end - rsm->r_start;
7130 *lenp = (len - act_len);
7131 return (rsm->r_end);
7133 /* We don't get out of this block. */
7136 * Here we retransmitted less than the whole thing which means we
7137 * have to split this into what was transmitted and what was not.
7139 nrsm = rack_alloc_full_limit(rack);
7142 * We can't get memory, so lets not proceed.
7148 * So here we are going to take the original rsm and make it what we
7149 * retransmitted. nrsm will be the tail portion we did not
7150 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
7151 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
7152 * 1, 6 and the new piece will be 6, 11.
7154 rack_clone_rsm(rack, nrsm, rsm, c_end);
7156 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
7157 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7159 if (insret != NULL) {
7160 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7161 nrsm, insret, rack, rsm);
7164 if (rsm->r_in_tmap) {
7165 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7166 nrsm->r_in_tmap = 1;
7168 rsm->r_flags &= (~RACK_HAS_FIN);
7169 rack_update_rsm(tp, rack, rsm, ts, add_flag);
7170 /* Log a split of rsm into rsm and nrsm */
7171 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7177 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
7178 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t cts,
7179 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff)
7181 struct tcp_rack *rack;
7182 struct rack_sendmap *rsm, *nrsm, *insret, fe;
7183 register uint32_t snd_max, snd_una;
7186 * Add to the RACK log of packets in flight or retransmitted. If
7187 * there is a TS option we will use the TS echoed, if not we will
7190 * Retransmissions will increment the count and move the ts to its
7191 * proper place. Note that if options do not include TS's then we
7192 * won't be able to effectively use the ACK for an RTT on a retran.
7194 * Notes about r_start and r_end. Lets consider a send starting at
7195 * sequence 1 for 10 bytes. In such an example the r_start would be
7196 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
7197 * This means that r_end is actually the first sequence for the next
7202 * If err is set what do we do XXXrrs? should we not add the thing?
7203 * -- i.e. return if err != 0 or should we pretend we sent it? --
7204 * i.e. proceed with add ** do this for now.
7206 INP_WLOCK_ASSERT(tp->t_inpcb);
7209 * We don't log errors -- we could but snd_max does not
7210 * advance in this case either.
7214 if (th_flags & TH_RST) {
7216 * We don't log resets and we return immediately from
7221 rack = (struct tcp_rack *)tp->t_fb_ptr;
7222 snd_una = tp->snd_una;
7223 snd_max = tp->snd_max;
7224 if (th_flags & (TH_SYN | TH_FIN)) {
7226 * The call to rack_log_output is made before bumping
7227 * snd_max. This means we can record one extra byte on a SYN
7228 * or FIN if seq_out is adding more on and a FIN is present
7229 * (and we are not resending).
7231 if ((th_flags & TH_SYN) && (seq_out == tp->iss))
7233 if (th_flags & TH_FIN)
7235 if (SEQ_LT(snd_max, tp->snd_nxt)) {
7237 * The add/update as not been done for the FIN/SYN
7240 snd_max = tp->snd_nxt;
7243 if (SEQ_LEQ((seq_out + len), snd_una)) {
7244 /* Are sending an old segment to induce an ack (keep-alive)? */
7247 if (SEQ_LT(seq_out, snd_una)) {
7248 /* huh? should we panic? */
7251 end = seq_out + len;
7253 if (SEQ_GEQ(end, seq_out))
7254 len = end - seq_out;
7259 /* We don't log zero window probes */
7262 rack->r_ctl.rc_time_last_sent = cts;
7263 if (IN_FASTRECOVERY(tp->t_flags)) {
7264 rack->r_ctl.rc_prr_out += len;
7266 /* First question is it a retransmission or new? */
7267 if (seq_out == snd_max) {
7270 rsm = rack_alloc(rack);
7273 * Hmm out of memory and the tcb got destroyed while
7278 if (th_flags & TH_FIN) {
7279 rsm->r_flags = RACK_HAS_FIN|add_flag;
7281 rsm->r_flags = add_flag;
7283 rsm->r_tim_lastsent[0] = cts;
7285 rsm->r_rtr_bytes = 0;
7286 if (th_flags & TH_SYN) {
7287 /* The data space is one beyond snd_una */
7288 rsm->r_flags |= RACK_HAS_SYN;
7290 rsm->r_start = seq_out;
7291 rsm->r_end = rsm->r_start + len;
7294 * save off the mbuf location that
7295 * sndmbuf_noadv returned (which is
7296 * where we started copying from)..
7300 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
7302 if (rsm->m->m_len <= rsm->soff) {
7304 * XXXrrs Question, will this happen?
7306 * If sbsndptr is set at the correct place
7307 * then s_moff should always be somewhere
7308 * within rsm->m. But if the sbsndptr was
7309 * off then that won't be true. If it occurs
7310 * we need to walkout to the correct location.
7315 while (lm->m_len <= rsm->soff) {
7316 rsm->soff -= lm->m_len;
7318 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
7319 __func__, rack, s_moff, s_mb, rsm->soff));
7322 counter_u64_add(rack_sbsndptr_wrong, 1);
7324 counter_u64_add(rack_sbsndptr_right, 1);
7325 rsm->orig_m_len = rsm->m->m_len;
7327 rsm->orig_m_len = 0;
7328 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7330 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
7331 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7333 if (insret != NULL) {
7334 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7335 nrsm, insret, rack, rsm);
7338 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7341 * Special case detection, is there just a single
7342 * packet outstanding when we are not in recovery?
7344 * If this is true mark it so.
7346 if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
7347 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
7348 struct rack_sendmap *prsm;
7350 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
7352 prsm->r_one_out_nr = 1;
7357 * If we reach here its a retransmission and we need to find it.
7359 memset(&fe, 0, sizeof(fe));
7361 if (hintrsm && (hintrsm->r_start == seq_out)) {
7365 /* No hints sorry */
7368 if ((rsm) && (rsm->r_start == seq_out)) {
7369 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7376 /* Ok it was not the last pointer go through it the hard way. */
7378 fe.r_start = seq_out;
7379 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
7381 if (rsm->r_start == seq_out) {
7382 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag);
7389 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
7390 /* Transmitted within this piece */
7392 * Ok we must split off the front and then let the
7393 * update do the rest
7395 nrsm = rack_alloc_full_limit(rack);
7397 rack_update_rsm(tp, rack, rsm, cts, add_flag);
7401 * copy rsm to nrsm and then trim the front of rsm
7402 * to not include this part.
7404 rack_clone_rsm(rack, nrsm, rsm, seq_out);
7405 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
7406 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7408 if (insret != NULL) {
7409 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
7410 nrsm, insret, rack, rsm);
7413 if (rsm->r_in_tmap) {
7414 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7415 nrsm->r_in_tmap = 1;
7417 rsm->r_flags &= (~RACK_HAS_FIN);
7418 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag);
7426 * Hmm not found in map did they retransmit both old and on into the
7429 if (seq_out == tp->snd_max) {
7431 } else if (SEQ_LT(seq_out, tp->snd_max)) {
7433 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
7434 seq_out, len, tp->snd_una, tp->snd_max);
7435 printf("Starting Dump of all rack entries\n");
7436 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
7437 printf("rsm:%p start:%u end:%u\n",
7438 rsm, rsm->r_start, rsm->r_end);
7440 printf("Dump complete\n");
7441 panic("seq_out not found rack:%p tp:%p",
7447 * Hmm beyond sndmax? (only if we are using the new rtt-pack
7450 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
7451 seq_out, len, tp->snd_max, tp);
7457 * Record one of the RTT updates from an ack into
7458 * our sample structure.
7462 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
7463 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
7465 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7466 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
7467 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
7469 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7470 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
7471 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
7473 if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
7474 if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
7475 rack->r_ctl.rc_gp_lowrtt = us_rtt;
7476 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
7477 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
7479 if ((confidence == 1) &&
7481 (rsm->r_just_ret) ||
7482 (rsm->r_one_out_nr &&
7483 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
7485 * If the rsm had a just return
7486 * hit it then we can't trust the
7487 * rtt measurement for buffer deterimination
7488 * Note that a confidence of 2, indicates
7489 * SACK'd which overrides the r_just_ret or
7490 * the r_one_out_nr. If it was a CUM-ACK and
7491 * we had only two outstanding, but get an
7492 * ack for only 1. Then that also lowers our
7497 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
7498 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
7499 if (rack->r_ctl.rack_rs.confidence == 0) {
7501 * We take anything with no current confidence
7504 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7505 rack->r_ctl.rack_rs.confidence = confidence;
7506 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7507 } else if (confidence || rack->r_ctl.rack_rs.confidence) {
7509 * Once we have a confident number,
7510 * we can update it with a smaller
7511 * value since this confident number
7512 * may include the DSACK time until
7513 * the next segment (the second one) arrived.
7515 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
7516 rack->r_ctl.rack_rs.confidence = confidence;
7517 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
7520 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
7521 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
7522 rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
7523 rack->r_ctl.rack_rs.rs_rtt_cnt++;
7527 * Collect new round-trip time estimate
7528 * and update averages and current timeout.
7531 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
7534 uint32_t o_srtt, o_var;
7535 int32_t hrtt_up = 0;
7538 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
7539 /* No valid sample */
7541 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
7542 /* We are to use the lowest RTT seen in a single ack */
7543 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
7544 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
7545 /* We are to use the highest RTT seen in a single ack */
7546 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
7547 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
7548 /* We are to use the average RTT seen in a single ack */
7549 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
7550 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
7553 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
7559 if (rack->rc_gp_rtt_set == 0) {
7561 * With no RTT we have to accept
7562 * even one we are not confident of.
7564 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
7565 rack->rc_gp_rtt_set = 1;
7566 } else if (rack->r_ctl.rack_rs.confidence) {
7567 /* update the running gp srtt */
7568 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
7569 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
7571 if (rack->r_ctl.rack_rs.confidence) {
7573 * record the low and high for highly buffered path computation,
7574 * we only do this if we are confident (not a retransmission).
7576 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
7577 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7580 if (rack->rc_highly_buffered == 0) {
7582 * Currently once we declare a path has
7583 * highly buffered there is no going
7584 * back, which may be a problem...
7586 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
7587 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
7588 rack->r_ctl.rc_highest_us_rtt,
7589 rack->r_ctl.rc_lowest_us_rtt,
7591 rack->rc_highly_buffered = 1;
7595 if ((rack->r_ctl.rack_rs.confidence) ||
7596 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
7598 * If we are highly confident of it <or> it was
7599 * never retransmitted we accept it as the last us_rtt.
7601 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7602 /* The lowest rtt can be set if its was not retransmited */
7603 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
7604 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
7605 if (rack->r_ctl.rc_lowest_us_rtt == 0)
7606 rack->r_ctl.rc_lowest_us_rtt = 1;
7609 o_srtt = tp->t_srtt;
7610 o_var = tp->t_rttvar;
7611 rack = (struct tcp_rack *)tp->t_fb_ptr;
7612 if (tp->t_srtt != 0) {
7614 * We keep a simple srtt in microseconds, like our rtt
7615 * measurement. We don't need to do any tricks with shifting
7616 * etc. Instead we just add in 1/8th of the new measurement
7617 * and subtract out 1/8 of the old srtt. We do the same with
7618 * the variance after finding the absolute value of the
7619 * difference between this sample and the current srtt.
7621 delta = tp->t_srtt - rtt;
7622 /* Take off 1/8th of the current sRTT */
7623 tp->t_srtt -= (tp->t_srtt >> 3);
7624 /* Add in 1/8th of the new RTT just measured */
7625 tp->t_srtt += (rtt >> 3);
7626 if (tp->t_srtt <= 0)
7628 /* Now lets make the absolute value of the variance */
7631 /* Subtract out 1/8th */
7632 tp->t_rttvar -= (tp->t_rttvar >> 3);
7633 /* Add in 1/8th of the new variance we just saw */
7634 tp->t_rttvar += (delta >> 3);
7635 if (tp->t_rttvar <= 0)
7637 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
7638 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7641 * No rtt measurement yet - use the unsmoothed rtt. Set the
7642 * variance to half the rtt (so our first retransmit happens
7646 tp->t_rttvar = rtt >> 1;
7647 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
7649 rack->rc_srtt_measure_made = 1;
7650 KMOD_TCPSTAT_INC(tcps_rttupdated);
7653 if (rack_stats_gets_ms_rtt == 0) {
7654 /* Send in the microsecond rtt used for rxt timeout purposes */
7655 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
7656 } else if (rack_stats_gets_ms_rtt == 1) {
7657 /* Send in the millisecond rtt used for rxt timeout purposes */
7661 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7662 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7663 } else if (rack_stats_gets_ms_rtt == 2) {
7664 /* Send in the millisecond rtt has close to the path RTT as we can get */
7668 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
7669 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
7671 /* Send in the microsecond rtt has close to the path RTT as we can get */
7672 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
7677 * the retransmit should happen at rtt + 4 * rttvar. Because of the
7678 * way we do the smoothing, srtt and rttvar will each average +1/2
7679 * tick of bias. When we compute the retransmit timer, we want 1/2
7680 * tick of rounding and 1 extra tick because of +-1/2 tick
7681 * uncertainty in the firing of the timer. The bias will give us
7682 * exactly the 1.5 tick we need. But, because the bias is
7683 * statistical, we have to test that we don't drop below the minimum
7684 * feasible timer (which is 2 ticks).
7687 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7688 max(rack_rto_min, rtt + 2), rack_rto_max);
7689 rack_log_rtt_sample(rack, rtt);
7690 tp->t_softerror = 0;
7695 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
7698 * Apply to filter the inbound us-rtt at us_cts.
7702 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
7703 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
7705 if (rack->r_ctl.last_pacing_time &&
7706 rack->rc_gp_dyn_mul &&
7707 (rack->r_ctl.last_pacing_time > us_rtt))
7708 rack->pacing_longer_than_rtt = 1;
7710 rack->pacing_longer_than_rtt = 0;
7711 if (old_rtt > us_rtt) {
7712 /* We just hit a new lower rtt time */
7713 rack_log_rtt_shrinks(rack, us_cts, old_rtt,
7714 __LINE__, RACK_RTTS_NEWRTT);
7716 * Only count it if its lower than what we saw within our
7719 if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
7720 if (rack_probertt_lower_within &&
7721 rack->rc_gp_dyn_mul &&
7722 (rack->use_fixed_rate == 0) &&
7723 (rack->rc_always_pace)) {
7725 * We are seeing a new lower rtt very close
7726 * to the time that we would have entered probe-rtt.
7727 * This is probably due to the fact that a peer flow
7728 * has entered probe-rtt. Lets go in now too.
7732 val = rack_probertt_lower_within * rack_time_between_probertt;
7734 if ((rack->in_probe_rtt == 0) &&
7735 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) {
7736 rack_enter_probertt(rack, us_cts);
7739 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
7745 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
7746 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
7749 uint32_t t, len_acked;
7751 if ((rsm->r_flags & RACK_ACKED) ||
7752 (rsm->r_flags & RACK_WAS_ACKED))
7755 if (rsm->r_no_rtt_allowed) {
7759 if (ack_type == CUM_ACKED) {
7760 if (SEQ_GT(th_ack, rsm->r_end)) {
7761 len_acked = rsm->r_end - rsm->r_start;
7764 len_acked = th_ack - rsm->r_start;
7768 len_acked = rsm->r_end - rsm->r_start;
7771 if (rsm->r_rtr_cnt == 1) {
7774 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7777 if (!tp->t_rttlow || tp->t_rttlow > t)
7779 if (!rack->r_ctl.rc_rack_min_rtt ||
7780 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7781 rack->r_ctl.rc_rack_min_rtt = t;
7782 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7783 rack->r_ctl.rc_rack_min_rtt = 1;
7786 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
7787 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7789 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
7792 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
7793 if (ack_type == SACKED) {
7794 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
7795 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
7798 * We need to setup what our confidence
7801 * If the rsm was app limited and it is
7802 * less than a mss in length (the end
7803 * of the send) then we have a gap. If we
7804 * were app limited but say we were sending
7805 * multiple MSS's then we are more confident
7808 * When we are not app-limited then we see if
7809 * the rsm is being included in the current
7810 * measurement, we tell this by the app_limited_needs_set
7813 * Note that being cwnd blocked is not applimited
7814 * as well as the pacing delay between packets which
7815 * are sending only 1 or 2 MSS's also will show up
7816 * in the RTT. We probably need to examine this algorithm
7817 * a bit more and enhance it to account for the delay
7818 * between rsm's. We could do that by saving off the
7819 * pacing delay of each rsm (in an rsm) and then
7820 * factoring that in somehow though for now I am
7825 if (rsm->r_flags & RACK_APP_LIMITED) {
7826 if (all && (len_acked <= ctf_fixed_maxseg(tp)))
7830 } else if (rack->app_limited_needs_set == 0) {
7835 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
7836 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
7837 calc_conf, rsm, rsm->r_rtr_cnt);
7839 if ((rsm->r_flags & RACK_TLP) &&
7840 (!IN_FASTRECOVERY(tp->t_flags))) {
7841 /* Segment was a TLP and our retrans matched */
7842 if (rack->r_ctl.rc_tlp_cwnd_reduce) {
7843 rack->r_ctl.rc_rsm_start = tp->snd_max;
7844 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
7845 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
7846 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
7849 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7850 /* New more recent rack_tmit_time */
7851 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7852 rack->rc_rack_rtt = t;
7857 * We clear the soft/rxtshift since we got an ack.
7858 * There is no assurance we will call the commit() function
7859 * so we need to clear these to avoid incorrect handling.
7862 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
7863 rack_rto_min, rack_rto_max);
7864 tp->t_softerror = 0;
7865 if (to && (to->to_flags & TOF_TS) &&
7866 (ack_type == CUM_ACKED) &&
7868 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
7870 * Now which timestamp does it match? In this block the ACK
7871 * must be coming from a previous transmission.
7873 for (i = 0; i < rsm->r_rtr_cnt; i++) {
7874 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
7875 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7878 if ((i + 1) < rsm->r_rtr_cnt) {
7880 * The peer ack'd from our previous
7881 * transmission. We have a spurious
7882 * retransmission and thus we dont
7883 * want to update our rack_rtt.
7887 if (!tp->t_rttlow || tp->t_rttlow > t)
7889 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7890 rack->r_ctl.rc_rack_min_rtt = t;
7891 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7892 rack->r_ctl.rc_rack_min_rtt = 1;
7895 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
7896 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
7897 /* New more recent rack_tmit_time */
7898 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
7899 rack->rc_rack_rtt = t;
7901 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
7902 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
7910 * Ok its a SACK block that we retransmitted. or a windows
7911 * machine without timestamps. We can tell nothing from the
7912 * time-stamp since its not there or the time the peer last
7913 * recieved a segment that moved forward its cum-ack point.
7916 i = rsm->r_rtr_cnt - 1;
7917 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
7920 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7922 * We retransmitted and the ack came back in less
7923 * than the smallest rtt we have observed. We most
7924 * likely did an improper retransmit as outlined in
7925 * 6.2 Step 2 point 2 in the rack-draft so we
7926 * don't want to update our rack_rtt. We in
7927 * theory (in future) might want to think about reverting our
7928 * cwnd state but we won't for now.
7931 } else if (rack->r_ctl.rc_rack_min_rtt) {
7933 * We retransmitted it and the retransmit did the
7936 if (!rack->r_ctl.rc_rack_min_rtt ||
7937 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
7938 rack->r_ctl.rc_rack_min_rtt = t;
7939 if (rack->r_ctl.rc_rack_min_rtt == 0) {
7940 rack->r_ctl.rc_rack_min_rtt = 1;
7943 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) {
7944 /* New more recent rack_tmit_time */
7945 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
7946 rack->rc_rack_rtt = t;
7955 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
7958 rack_log_sack_passed(struct tcpcb *tp,
7959 struct tcp_rack *rack, struct rack_sendmap *rsm)
7961 struct rack_sendmap *nrsm;
7964 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
7965 rack_head, r_tnext) {
7967 /* Skip orginal segment he is acked */
7970 if (nrsm->r_flags & RACK_ACKED) {
7972 * Skip ack'd segments, though we
7973 * should not see these, since tmap
7974 * should not have ack'd segments.
7978 if (nrsm->r_flags & RACK_SACK_PASSED) {
7980 * We found one that is already marked
7981 * passed, we have been here before and
7982 * so all others below this are marked.
7986 nrsm->r_flags |= RACK_SACK_PASSED;
7987 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
7992 rack_need_set_test(struct tcpcb *tp,
7993 struct tcp_rack *rack,
7994 struct rack_sendmap *rsm,
8000 if ((tp->t_flags & TF_GPUTINPROG) &&
8001 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8003 * We were app limited, and this ack
8004 * butts up or goes beyond the point where we want
8005 * to start our next measurement. We need
8006 * to record the new gput_ts as here and
8007 * possibly update the start sequence.
8011 if (rsm->r_rtr_cnt > 1) {
8013 * This is a retransmit, can we
8014 * really make any assessment at this
8015 * point? We are not really sure of
8016 * the timestamp, is it this or the
8017 * previous transmission?
8019 * Lets wait for something better that
8020 * is not retransmitted.
8026 rack->app_limited_needs_set = 0;
8027 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
8028 /* Do we start at a new end? */
8029 if ((use_which == RACK_USE_BEG) &&
8030 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
8032 * When we get an ACK that just eats
8033 * up some of the rsm, we set RACK_USE_BEG
8034 * since whats at r_start (i.e. th_ack)
8035 * is left unacked and thats where the
8036 * measurement not starts.
8038 tp->gput_seq = rsm->r_start;
8039 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8041 if ((use_which == RACK_USE_END) &&
8042 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
8044 * We use the end when the cumack
8045 * is moving forward and completely
8046 * deleting the rsm passed so basically
8047 * r_end holds th_ack.
8049 * For SACK's we also want to use the end
8050 * since this piece just got sacked and
8051 * we want to target anything after that
8052 * in our measurement.
8054 tp->gput_seq = rsm->r_end;
8055 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8057 if (use_which == RACK_USE_END_OR_THACK) {
8059 * special case for ack moving forward,
8060 * not a sack, we need to move all the
8061 * way up to where this ack cum-ack moves
8064 if (SEQ_GT(th_ack, rsm->r_end))
8065 tp->gput_seq = th_ack;
8067 tp->gput_seq = rsm->r_end;
8068 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8070 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
8072 * We moved beyond this guy's range, re-calculate
8073 * the new end point.
8075 if (rack->rc_gp_filled == 0) {
8076 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
8078 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
8082 * We are moving the goal post, we may be able to clear the
8083 * measure_saw_probe_rtt flag.
8085 if ((rack->in_probe_rtt == 0) &&
8086 (rack->measure_saw_probe_rtt) &&
8087 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
8088 rack->measure_saw_probe_rtt = 0;
8089 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
8090 seq, tp->gput_seq, 0, 5, line, NULL);
8091 if (rack->rc_gp_filled &&
8092 ((tp->gput_ack - tp->gput_seq) <
8093 max(rc_init_window(rack), (MIN_GP_WIN *
8094 ctf_fixed_maxseg(tp))))) {
8095 uint32_t ideal_amount;
8097 ideal_amount = rack_get_measure_window(tp, rack);
8098 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
8100 * There is no sense of continuing this measurement
8101 * because its too small to gain us anything we
8102 * trust. Skip it and that way we can start a new
8103 * measurement quicker.
8105 tp->t_flags &= ~TF_GPUTINPROG;
8106 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
8107 0, 0, 0, 6, __LINE__, NULL);
8110 * Reset the window further out.
8112 tp->gput_ack = tp->gput_seq + ideal_amount;
8119 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
8120 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two)
8122 uint32_t start, end, changed = 0;
8123 struct rack_sendmap stack_map;
8124 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next;
8125 int32_t used_ref = 1;
8128 start = sack->start;
8131 memset(&fe, 0, sizeof(fe));
8133 if ((rsm == NULL) ||
8134 (SEQ_LT(end, rsm->r_start)) ||
8135 (SEQ_GEQ(start, rsm->r_end)) ||
8136 (SEQ_LT(start, rsm->r_start))) {
8138 * We are not in the right spot,
8139 * find the correct spot in the tree.
8143 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
8150 /* Ok we have an ACK for some piece of this rsm */
8151 if (rsm->r_start != start) {
8152 if ((rsm->r_flags & RACK_ACKED) == 0) {
8154 * Need to split this in two pieces the before and after,
8155 * the before remains in the map, the after must be
8156 * added. In other words we have:
8157 * rsm |--------------|
8161 * and nrsm will be the sacked piece
8164 * But before we start down that path lets
8165 * see if the sack spans over on top of
8166 * the next guy and it is already sacked.
8168 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8169 if (next && (next->r_flags & RACK_ACKED) &&
8170 SEQ_GEQ(end, next->r_start)) {
8172 * So the next one is already acked, and
8173 * we can thus by hookery use our stack_map
8174 * to reflect the piece being sacked and
8175 * then adjust the two tree entries moving
8176 * the start and ends around. So we start like:
8177 * rsm |------------| (not-acked)
8178 * next |-----------| (acked)
8179 * sackblk |-------->
8180 * We want to end like so:
8181 * rsm |------| (not-acked)
8182 * next |-----------------| (acked)
8184 * Where nrsm is a temporary stack piece we
8185 * use to update all the gizmos.
8187 /* Copy up our fudge block */
8189 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8190 /* Now adjust our tree blocks */
8192 next->r_start = start;
8193 /* Now we must adjust back where next->m is */
8194 rack_setup_offset_for_rsm(rsm, next);
8196 /* We don't need to adjust rsm, it did not change */
8197 /* Clear out the dup ack count of the remainder */
8199 rsm->r_just_ret = 0;
8200 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8201 /* Now lets make sure our fudge block is right */
8202 nrsm->r_start = start;
8203 /* Now lets update all the stats and such */
8204 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8205 if (rack->app_limited_needs_set)
8206 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8207 changed += (nrsm->r_end - nrsm->r_start);
8208 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8209 if (nrsm->r_flags & RACK_SACK_PASSED) {
8210 counter_u64_add(rack_reorder_seen, 1);
8211 rack->r_ctl.rc_reorder_ts = cts;
8214 * Now we want to go up from rsm (the
8215 * one left un-acked) to the next one
8216 * in the tmap. We do this so when
8217 * we walk backwards we include marking
8218 * sack-passed on rsm (The one passed in
8219 * is skipped since it is generally called
8220 * on something sacked before removing it
8223 if (rsm->r_in_tmap) {
8224 nrsm = TAILQ_NEXT(rsm, r_tnext);
8226 * Now that we have the next
8227 * one walk backwards from there.
8229 if (nrsm && nrsm->r_in_tmap)
8230 rack_log_sack_passed(tp, rack, nrsm);
8232 /* Now are we done? */
8233 if (SEQ_LT(end, next->r_end) ||
8234 (end == next->r_end)) {
8235 /* Done with block */
8238 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
8239 counter_u64_add(rack_sack_used_next_merge, 1);
8240 /* Postion for the next block */
8241 start = next->r_end;
8242 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
8247 * We can't use any hookery here, so we
8248 * need to split the map. We enter like
8252 * We will add the new block nrsm and
8253 * that will be the new portion, and then
8254 * fall through after reseting rsm. So we
8255 * split and look like this:
8259 * We then fall through reseting
8260 * rsm to nrsm, so the next block
8263 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8266 * failed XXXrrs what can we do but loose the sack
8271 counter_u64_add(rack_sack_splits, 1);
8272 rack_clone_rsm(rack, nrsm, rsm, start);
8273 rsm->r_just_ret = 0;
8274 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8276 if (insret != NULL) {
8277 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8278 nrsm, insret, rack, rsm);
8281 if (rsm->r_in_tmap) {
8282 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8283 nrsm->r_in_tmap = 1;
8285 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
8286 rsm->r_flags &= (~RACK_HAS_FIN);
8287 /* Position us to point to the new nrsm that starts the sack blk */
8291 /* Already sacked this piece */
8292 counter_u64_add(rack_sack_skipped_acked, 1);
8294 if (end == rsm->r_end) {
8295 /* Done with block */
8296 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8298 } else if (SEQ_LT(end, rsm->r_end)) {
8299 /* A partial sack to a already sacked block */
8301 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8305 * The end goes beyond this guy
8306 * repostion the start to the
8310 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8316 if (SEQ_GEQ(end, rsm->r_end)) {
8318 * The end of this block is either beyond this guy or right
8319 * at this guy. I.e.:
8325 if ((rsm->r_flags & RACK_ACKED) == 0) {
8326 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8327 changed += (rsm->r_end - rsm->r_start);
8328 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8329 if (rsm->r_in_tmap) /* should be true */
8330 rack_log_sack_passed(tp, rack, rsm);
8331 /* Is Reordering occuring? */
8332 if (rsm->r_flags & RACK_SACK_PASSED) {
8333 rsm->r_flags &= ~RACK_SACK_PASSED;
8334 counter_u64_add(rack_reorder_seen, 1);
8335 rack->r_ctl.rc_reorder_ts = cts;
8337 if (rack->app_limited_needs_set)
8338 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8339 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8340 rsm->r_flags |= RACK_ACKED;
8341 rsm->r_flags &= ~RACK_TLP;
8342 if (rsm->r_in_tmap) {
8343 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8346 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
8348 counter_u64_add(rack_sack_skipped_acked, 1);
8351 if (end == rsm->r_end) {
8352 /* This block only - done, setup for next */
8356 * There is more not coverend by this rsm move on
8357 * to the next block in the RB tree.
8359 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8367 * The end of this sack block is smaller than
8372 if ((rsm->r_flags & RACK_ACKED) == 0) {
8373 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8374 if (prev && (prev->r_flags & RACK_ACKED)) {
8376 * Goal, we want the right remainder of rsm to shrink
8377 * in place and span from (rsm->r_start = end) to rsm->r_end.
8378 * We want to expand prev to go all the way
8379 * to prev->r_end <- end.
8380 * so in the tree we have before:
8381 * prev |--------| (acked)
8382 * rsm |-------| (non-acked)
8384 * We churn it so we end up with
8385 * prev |----------| (acked)
8386 * rsm |-----| (non-acked)
8387 * nrsm |-| (temporary)
8390 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
8393 /* Now adjust nrsm (stack copy) to be
8394 * the one that is the small
8395 * piece that was "sacked".
8399 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8401 * Now that the rsm has had its start moved forward
8402 * lets go ahead and get its new place in the world.
8404 rack_setup_offset_for_rsm(prev, rsm);
8406 * Now nrsm is our new little piece
8407 * that is acked (which was merged
8408 * to prev). Update the rtt and changed
8409 * based on that. Also check for reordering.
8411 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
8412 if (rack->app_limited_needs_set)
8413 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
8414 changed += (nrsm->r_end - nrsm->r_start);
8415 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
8416 if (nrsm->r_flags & RACK_SACK_PASSED) {
8417 counter_u64_add(rack_reorder_seen, 1);
8418 rack->r_ctl.rc_reorder_ts = cts;
8420 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
8422 counter_u64_add(rack_sack_used_prev_merge, 1);
8425 * This is the case where our previous
8426 * block is not acked either, so we must
8427 * split the block in two.
8429 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
8431 /* failed rrs what can we do but loose the sack info? */
8435 * In this case nrsm becomes
8436 * nrsm->r_start = end;
8437 * nrsm->r_end = rsm->r_end;
8438 * which is un-acked.
8440 * rsm->r_end = nrsm->r_start;
8441 * i.e. the remaining un-acked
8442 * piece is left on the left
8445 * So we start like this
8446 * rsm |----------| (not acked)
8448 * build it so we have
8450 * nrsm |------| (not acked)
8452 counter_u64_add(rack_sack_splits, 1);
8453 rack_clone_rsm(rack, nrsm, rsm, end);
8454 rsm->r_flags &= (~RACK_HAS_FIN);
8455 rsm->r_just_ret = 0;
8456 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
8458 if (insret != NULL) {
8459 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
8460 nrsm, insret, rack, rsm);
8463 if (rsm->r_in_tmap) {
8464 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8465 nrsm->r_in_tmap = 1;
8468 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
8469 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
8470 changed += (rsm->r_end - rsm->r_start);
8471 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
8472 if (rsm->r_in_tmap) /* should be true */
8473 rack_log_sack_passed(tp, rack, rsm);
8474 /* Is Reordering occuring? */
8475 if (rsm->r_flags & RACK_SACK_PASSED) {
8476 rsm->r_flags &= ~RACK_SACK_PASSED;
8477 counter_u64_add(rack_reorder_seen, 1);
8478 rack->r_ctl.rc_reorder_ts = cts;
8480 if (rack->app_limited_needs_set)
8481 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
8482 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8483 rsm->r_flags |= RACK_ACKED;
8484 rsm->r_flags &= ~RACK_TLP;
8485 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
8486 if (rsm->r_in_tmap) {
8487 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8491 } else if (start != end){
8493 * The block was already acked.
8495 counter_u64_add(rack_sack_skipped_acked, 1);
8499 if (rsm && (rsm->r_flags & RACK_ACKED)) {
8501 * Now can we merge where we worked
8502 * with either the previous or
8505 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8507 if (next->r_flags & RACK_ACKED) {
8508 /* yep this and next can be merged */
8509 rsm = rack_merge_rsm(rack, rsm, next);
8510 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8514 /* Now what about the previous? */
8515 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8517 if (prev->r_flags & RACK_ACKED) {
8518 /* yep the previous and this can be merged */
8519 rsm = rack_merge_rsm(rack, prev, rsm);
8520 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8525 if (used_ref == 0) {
8526 counter_u64_add(rack_sack_proc_all, 1);
8528 counter_u64_add(rack_sack_proc_short, 1);
8530 /* Save off the next one for quick reference. */
8532 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8535 *prsm = rack->r_ctl.rc_sacklast = nrsm;
8536 /* Pass back the moved. */
8542 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
8544 struct rack_sendmap *tmap;
8547 while (rsm && (rsm->r_flags & RACK_ACKED)) {
8548 /* Its no longer sacked, mark it so */
8549 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8551 if (rsm->r_in_tmap) {
8552 panic("rack:%p rsm:%p flags:0x%x in tmap?",
8553 rack, rsm, rsm->r_flags);
8556 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
8557 /* Rebuild it into our tmap */
8559 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8562 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
8565 tmap->r_in_tmap = 1;
8566 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8569 * Now lets possibly clear the sack filter so we start
8570 * recognizing sacks that cover this area.
8572 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
8577 rack_do_decay(struct tcp_rack *rack)
8581 #define timersub(tvp, uvp, vvp) \
8583 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
8584 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
8585 if ((vvp)->tv_usec < 0) { \
8587 (vvp)->tv_usec += 1000000; \
8591 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
8594 rack->r_ctl.input_pkt++;
8595 if ((rack->rc_in_persist) ||
8596 (res.tv_sec >= 1) ||
8597 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
8599 * Check for decay of non-SAD,
8600 * we want all SAD detection metrics to
8601 * decay 1/4 per second (or more) passed.
8605 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
8606 /* Update our saved tracking values */
8607 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
8608 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
8609 /* Now do we escape without decay? */
8610 #ifdef NETFLIX_EXP_DETECTION
8611 if (rack->rc_in_persist ||
8612 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
8613 (pkt_delta < tcp_sad_low_pps)){
8615 * We don't decay idle connections
8616 * or ones that have a low input pps.
8620 /* Decay the counters */
8621 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
8623 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
8625 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
8627 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
8634 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to)
8636 struct rack_sendmap *rsm, *rm;
8639 * The ACK point is advancing to th_ack, we must drop off
8640 * the packets in the rack log and calculate any eligble
8643 rack->r_wanted_output = 1;
8645 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8647 if ((th_ack - 1) == tp->iss) {
8649 * For the SYN incoming case we will not
8650 * have called tcp_output for the sending of
8651 * the SYN, so there will be no map. All
8652 * other cases should probably be a panic.
8656 if (tp->t_flags & TF_SENTFIN) {
8657 /* if we sent a FIN we often will not have map */
8661 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n",
8663 tp->t_state, th_ack, rack,
8664 tp->snd_una, tp->snd_max, tp->snd_nxt);
8668 if (SEQ_LT(th_ack, rsm->r_start)) {
8669 /* Huh map is missing this */
8671 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
8673 th_ack, tp->t_state, rack->r_state);
8677 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
8678 /* Now do we consume the whole thing? */
8679 if (SEQ_GEQ(th_ack, rsm->r_end)) {
8680 /* Its all consumed. */
8682 uint8_t newly_acked;
8684 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
8685 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
8686 rsm->r_rtr_bytes = 0;
8687 /* Record the time of highest cumack sent */
8688 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8689 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
8692 panic("removing head in rack:%p rsm:%p rm:%p",
8696 if (rsm->r_in_tmap) {
8697 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8701 if (rsm->r_flags & RACK_ACKED) {
8703 * It was acked on the scoreboard -- remove
8706 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8708 } else if (rsm->r_flags & RACK_SACK_PASSED) {
8710 * There are segments ACKED on the
8711 * scoreboard further up. We are seeing
8714 rsm->r_flags &= ~RACK_SACK_PASSED;
8715 counter_u64_add(rack_reorder_seen, 1);
8716 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
8717 rsm->r_flags |= RACK_ACKED;
8718 rack->r_ctl.rc_reorder_ts = cts;
8719 if (rack->r_ent_rec_ns) {
8721 * We have sent no more, and we saw an sack
8724 rack->r_might_revert = 1;
8727 if ((rsm->r_flags & RACK_TO_REXT) &&
8728 (tp->t_flags & TF_RCVD_TSTMP) &&
8729 (to->to_flags & TOF_TS) &&
8730 (tp->t_flags & TF_PREVVALID)) {
8732 * We can use the timestamp to see
8733 * if this retransmission was from the
8734 * first transmit. If so we made a mistake.
8736 tp->t_flags &= ~TF_PREVVALID;
8737 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
8738 /* The first transmit is what this ack is for */
8739 rack_cong_signal(tp, CC_RTO_ERR, th_ack);
8742 left = th_ack - rsm->r_end;
8743 if (rack->app_limited_needs_set && newly_acked)
8744 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
8745 /* Free back to zone */
8746 rack_free(rack, rsm);
8750 /* Check for reneging */
8751 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
8752 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
8754 * The peer has moved snd_una up to
8755 * the edge of this send, i.e. one
8756 * that it had previously acked. The only
8757 * way that can be true if the peer threw
8758 * away data (space issues) that it had
8759 * previously sacked (else it would have
8760 * given us snd_una up to (rsm->r_end).
8761 * We need to undo the acked markings here.
8763 * Note we have to look to make sure th_ack is
8764 * our rsm->r_start in case we get an old ack
8765 * where th_ack is behind snd_una.
8767 rack_peer_reneges(rack, rsm, th_ack);
8771 if (rsm->r_flags & RACK_ACKED) {
8773 * It was acked on the scoreboard -- remove it from
8774 * total for the part being cum-acked.
8776 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
8779 * Clear the dup ack count for
8780 * the piece that remains.
8783 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8784 if (rsm->r_rtr_bytes) {
8786 * It was retransmitted adjust the
8787 * sack holes for what was acked.
8791 ack_am = (th_ack - rsm->r_start);
8792 if (ack_am >= rsm->r_rtr_bytes) {
8793 rack->r_ctl.rc_holes_rxt -= ack_am;
8794 rsm->r_rtr_bytes -= ack_am;
8798 * Update where the piece starts and record
8799 * the time of send of highest cumack sent.
8801 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8802 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
8803 /* Now we need to move our offset forward too */
8804 if (rsm->orig_m_len != rsm->m->m_len) {
8805 /* Fix up the orig_m_len and possibly the mbuf offset */
8806 rack_adjust_orig_mlen(rsm);
8808 rsm->soff += (th_ack - rsm->r_start);
8809 rsm->r_start = th_ack;
8810 /* Now do we need to move the mbuf fwd too? */
8811 while (rsm->soff >= rsm->m->m_len) {
8812 rsm->soff -= rsm->m->m_len;
8813 rsm->m = rsm->m->m_next;
8814 KASSERT((rsm->m != NULL),
8815 (" nrsm:%p hit at soff:%u null m",
8818 rsm->orig_m_len = rsm->m->m_len;
8819 if (rack->app_limited_needs_set)
8820 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
8824 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
8826 struct rack_sendmap *rsm;
8827 int sack_pass_fnd = 0;
8829 if (rack->r_might_revert) {
8831 * Ok we have reordering, have not sent anything, we
8832 * might want to revert the congestion state if nothing
8833 * further has SACK_PASSED on it. Lets check.
8835 * We also get here when we have DSACKs come in for
8836 * all the data that we FR'd. Note that a rxt or tlp
8837 * timer clears this from happening.
8840 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
8841 if (rsm->r_flags & RACK_SACK_PASSED) {
8846 if (sack_pass_fnd == 0) {
8848 * We went into recovery
8849 * incorrectly due to reordering!
8853 rack->r_ent_rec_ns = 0;
8854 orig_cwnd = tp->snd_cwnd;
8855 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec;
8856 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
8857 tp->snd_recover = tp->snd_una;
8858 rack_log_to_prr(rack, 14, orig_cwnd);
8859 EXIT_RECOVERY(tp->t_flags);
8861 rack->r_might_revert = 0;
8865 #ifdef NETFLIX_EXP_DETECTION
8867 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz)
8869 if ((rack->do_detection || tcp_force_detection) &&
8870 tcp_sack_to_ack_thresh &&
8871 tcp_sack_to_move_thresh &&
8872 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
8874 * We have thresholds set to find
8875 * possible attackers and disable sack.
8878 uint64_t ackratio, moveratio, movetotal;
8881 rack_log_sad(rack, 1);
8882 ackratio = (uint64_t)(rack->r_ctl.sack_count);
8883 ackratio *= (uint64_t)(1000);
8884 if (rack->r_ctl.ack_count)
8885 ackratio /= (uint64_t)(rack->r_ctl.ack_count);
8887 /* We really should not hit here */
8890 if ((rack->sack_attack_disable == 0) &&
8891 (ackratio > rack_highest_sack_thresh_seen))
8892 rack_highest_sack_thresh_seen = (uint32_t)ackratio;
8893 movetotal = rack->r_ctl.sack_moved_extra;
8894 movetotal += rack->r_ctl.sack_noextra_move;
8895 moveratio = rack->r_ctl.sack_moved_extra;
8896 moveratio *= (uint64_t)1000;
8898 moveratio /= movetotal;
8900 /* No moves, thats pretty good */
8903 if ((rack->sack_attack_disable == 0) &&
8904 (moveratio > rack_highest_move_thresh_seen))
8905 rack_highest_move_thresh_seen = (uint32_t)moveratio;
8906 if (rack->sack_attack_disable == 0) {
8907 if ((ackratio > tcp_sack_to_ack_thresh) &&
8908 (moveratio > tcp_sack_to_move_thresh)) {
8909 /* Disable sack processing */
8910 rack->sack_attack_disable = 1;
8911 if (rack->r_rep_attack == 0) {
8912 rack->r_rep_attack = 1;
8913 counter_u64_add(rack_sack_attacks_detected, 1);
8915 if (tcp_attack_on_turns_on_logging) {
8917 * Turn on logging, used for debugging
8920 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging;
8922 /* Clamp the cwnd at flight size */
8923 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
8924 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
8925 rack_log_sad(rack, 2);
8928 /* We are sack-disabled check for false positives */
8929 if ((ackratio <= tcp_restoral_thresh) ||
8930 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) {
8931 rack->sack_attack_disable = 0;
8932 rack_log_sad(rack, 3);
8933 /* Restart counting */
8934 rack->r_ctl.sack_count = 0;
8935 rack->r_ctl.sack_moved_extra = 0;
8936 rack->r_ctl.sack_noextra_move = 1;
8937 rack->r_ctl.ack_count = max(1,
8938 (bytes_this_ack / segsiz));
8940 if (rack->r_rep_reverse == 0) {
8941 rack->r_rep_reverse = 1;
8942 counter_u64_add(rack_sack_attacks_reversed, 1);
8944 /* Restore the cwnd */
8945 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
8946 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
8954 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
8959 if (SEQ_GT(end, start))
8964 * We keep track of how many DSACK blocks we get
8965 * after a recovery incident.
8967 rack->r_ctl.dsack_byte_cnt += am;
8968 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
8969 rack->r_ctl.retran_during_recovery &&
8970 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
8972 * False recovery most likely culprit is reordering. If
8973 * nothing else is missing we need to revert.
8975 rack->r_might_revert = 1;
8976 rack_handle_might_revert(rack->rc_tp, rack);
8977 rack->r_might_revert = 0;
8978 rack->r_ctl.retran_during_recovery = 0;
8979 rack->r_ctl.dsack_byte_cnt = 0;
8984 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
8986 /* Deal with changed and PRR here (in recovery only) */
8987 uint32_t pipe, snd_una;
8989 rack->r_ctl.rc_prr_delivered += changed;
8991 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
8993 * It is all outstanding, we are application limited
8994 * and thus we don't need more room to send anything.
8995 * Note we use tp->snd_una here and not th_ack because
8996 * the data as yet not been cut from the sb.
8998 rack->r_ctl.rc_prr_sndcnt = 0;
9001 /* Compute prr_sndcnt */
9002 if (SEQ_GT(tp->snd_una, th_ack)) {
9003 snd_una = tp->snd_una;
9007 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
9008 if (pipe > tp->snd_ssthresh) {
9011 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
9012 if (rack->r_ctl.rc_prr_recovery_fs > 0)
9013 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
9015 rack->r_ctl.rc_prr_sndcnt = 0;
9016 rack_log_to_prr(rack, 9, 0);
9020 if (sndcnt > (long)rack->r_ctl.rc_prr_out)
9021 sndcnt -= rack->r_ctl.rc_prr_out;
9024 rack->r_ctl.rc_prr_sndcnt = sndcnt;
9025 rack_log_to_prr(rack, 10, 0);
9029 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
9030 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
9033 if (changed > limit)
9035 limit += ctf_fixed_maxseg(tp);
9036 if (tp->snd_ssthresh > pipe) {
9037 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
9038 rack_log_to_prr(rack, 11, 0);
9040 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
9041 rack_log_to_prr(rack, 12, 0);
9047 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck)
9050 struct tcp_rack *rack;
9051 struct rack_sendmap *rsm;
9052 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
9053 register uint32_t th_ack;
9054 int32_t i, j, k, num_sack_blks = 0;
9055 uint32_t cts, acked, ack_point, sack_changed = 0;
9056 int loop_start = 0, moved_two = 0;
9060 INP_WLOCK_ASSERT(tp->t_inpcb);
9061 if (th->th_flags & TH_RST) {
9062 /* We don't log resets */
9065 rack = (struct tcp_rack *)tp->t_fb_ptr;
9066 cts = tcp_get_usecs(NULL);
9067 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9069 th_ack = th->th_ack;
9070 if (rack->sack_attack_disable == 0)
9071 rack_do_decay(rack);
9072 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
9074 * You only get credit for
9075 * MSS and greater (and you get extra
9076 * credit for larger cum-ack moves).
9080 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
9081 rack->r_ctl.ack_count += ac;
9082 counter_u64_add(rack_ack_total, ac);
9084 if (rack->r_ctl.ack_count > 0xfff00000) {
9086 * reduce the number to keep us under
9089 rack->r_ctl.ack_count /= 2;
9090 rack->r_ctl.sack_count /= 2;
9092 if (SEQ_GT(th_ack, tp->snd_una)) {
9093 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
9094 tp->t_acktime = ticks;
9096 if (rsm && SEQ_GT(th_ack, rsm->r_start))
9097 changed = th_ack - rsm->r_start;
9099 rack_process_to_cumack(tp, rack, th_ack, cts, to);
9101 if ((to->to_flags & TOF_SACK) == 0) {
9102 /* We are done nothing left and no sack. */
9103 rack_handle_might_revert(tp, rack);
9105 * For cases where we struck a dup-ack
9106 * with no SACK, add to the changes so
9107 * PRR will work right.
9109 if (dup_ack_struck && (changed == 0)) {
9110 changed += ctf_fixed_maxseg(rack->rc_tp);
9114 /* Sack block processing */
9115 if (SEQ_GT(th_ack, tp->snd_una))
9118 ack_point = tp->snd_una;
9119 for (i = 0; i < to->to_nsacks; i++) {
9120 bcopy((to->to_sacks + i * TCPOLEN_SACK),
9121 &sack, sizeof(sack));
9122 sack.start = ntohl(sack.start);
9123 sack.end = ntohl(sack.end);
9124 if (SEQ_GT(sack.end, sack.start) &&
9125 SEQ_GT(sack.start, ack_point) &&
9126 SEQ_LT(sack.start, tp->snd_max) &&
9127 SEQ_GT(sack.end, ack_point) &&
9128 SEQ_LEQ(sack.end, tp->snd_max)) {
9129 sack_blocks[num_sack_blks] = sack;
9131 #ifdef NETFLIX_STATS
9132 } else if (SEQ_LEQ(sack.start, th_ack) &&
9133 SEQ_LEQ(sack.end, th_ack)) {
9135 * Its a D-SACK block.
9137 tcp_record_dsack(sack.start, sack.end);
9139 rack_note_dsack(rack, sack.start, sack.end);
9143 * Sort the SACK blocks so we can update the rack scoreboard with
9146 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
9147 num_sack_blks, th->th_ack);
9148 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
9149 if (num_sack_blks == 0) {
9150 /* Nothing to sack (DSACKs?) */
9151 goto out_with_totals;
9153 if (num_sack_blks < 2) {
9154 /* Only one, we don't need to sort */
9157 /* Sort the sacks */
9158 for (i = 0; i < num_sack_blks; i++) {
9159 for (j = i + 1; j < num_sack_blks; j++) {
9160 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
9161 sack = sack_blocks[i];
9162 sack_blocks[i] = sack_blocks[j];
9163 sack_blocks[j] = sack;
9168 * Now are any of the sack block ends the same (yes some
9169 * implementations send these)?
9172 if (num_sack_blks == 0)
9173 goto out_with_totals;
9174 if (num_sack_blks > 1) {
9175 for (i = 0; i < num_sack_blks; i++) {
9176 for (j = i + 1; j < num_sack_blks; j++) {
9177 if (sack_blocks[i].end == sack_blocks[j].end) {
9179 * Ok these two have the same end we
9180 * want the smallest end and then
9181 * throw away the larger and start
9184 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
9186 * The second block covers
9187 * more area use that
9189 sack_blocks[i].start = sack_blocks[j].start;
9192 * Now collapse out the dup-sack and
9195 for (k = (j + 1); k < num_sack_blks; k++) {
9196 sack_blocks[j].start = sack_blocks[k].start;
9197 sack_blocks[j].end = sack_blocks[k].end;
9208 * First lets look to see if
9209 * we have retransmitted and
9210 * can use the transmit next?
9212 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9214 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
9215 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
9217 * We probably did the FR and the next
9218 * SACK in continues as we would expect.
9220 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two);
9222 rack->r_wanted_output = 1;
9224 sack_changed += acked;
9226 if (num_sack_blks == 1) {
9228 * This is what we would expect from
9229 * a normal implementation to happen
9230 * after we have retransmitted the FR,
9231 * i.e the sack-filter pushes down
9232 * to 1 block and the next to be retransmitted
9233 * is the sequence in the sack block (has more
9234 * are acked). Count this as ACK'd data to boost
9235 * up the chances of recovering any false positives.
9237 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
9238 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
9239 counter_u64_add(rack_express_sack, 1);
9240 if (rack->r_ctl.ack_count > 0xfff00000) {
9242 * reduce the number to keep us under
9245 rack->r_ctl.ack_count /= 2;
9246 rack->r_ctl.sack_count /= 2;
9248 goto out_with_totals;
9251 * Start the loop through the
9252 * rest of blocks, past the first block.
9258 /* Its a sack of some sort */
9259 rack->r_ctl.sack_count++;
9260 if (rack->r_ctl.sack_count > 0xfff00000) {
9262 * reduce the number to keep us under
9265 rack->r_ctl.ack_count /= 2;
9266 rack->r_ctl.sack_count /= 2;
9268 counter_u64_add(rack_sack_total, 1);
9269 if (rack->sack_attack_disable) {
9270 /* An attacker disablement is in place */
9271 if (num_sack_blks > 1) {
9272 rack->r_ctl.sack_count += (num_sack_blks - 1);
9273 rack->r_ctl.sack_moved_extra++;
9274 counter_u64_add(rack_move_some, 1);
9275 if (rack->r_ctl.sack_moved_extra > 0xfff00000) {
9276 rack->r_ctl.sack_moved_extra /= 2;
9277 rack->r_ctl.sack_noextra_move /= 2;
9282 rsm = rack->r_ctl.rc_sacklast;
9283 for (i = loop_start; i < num_sack_blks; i++) {
9284 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two);
9286 rack->r_wanted_output = 1;
9288 sack_changed += acked;
9292 * If we did not get a SACK for at least a MSS and
9293 * had to move at all, or if we moved more than our
9294 * threshold, it counts against the "extra" move.
9296 rack->r_ctl.sack_moved_extra += moved_two;
9297 counter_u64_add(rack_move_some, 1);
9300 * else we did not have to move
9301 * any more than we would expect.
9303 rack->r_ctl.sack_noextra_move++;
9304 counter_u64_add(rack_move_none, 1);
9306 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
9308 * If the SACK was not a full MSS then
9309 * we add to sack_count the number of
9310 * MSS's (or possibly more than
9311 * a MSS if its a TSO send) we had to skip by.
9313 rack->r_ctl.sack_count += moved_two;
9314 counter_u64_add(rack_sack_total, moved_two);
9317 * Now we need to setup for the next
9318 * round. First we make sure we won't
9319 * exceed the size of our uint32_t on
9320 * the various counts, and then clear out
9323 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
9324 (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
9325 rack->r_ctl.sack_moved_extra /= 2;
9326 rack->r_ctl.sack_noextra_move /= 2;
9328 if (rack->r_ctl.sack_count > 0xfff00000) {
9329 rack->r_ctl.ack_count /= 2;
9330 rack->r_ctl.sack_count /= 2;
9335 if (num_sack_blks > 1) {
9337 * You get an extra stroke if
9338 * you have more than one sack-blk, this
9339 * could be where we are skipping forward
9340 * and the sack-filter is still working, or
9341 * it could be an attacker constantly
9344 rack->r_ctl.sack_moved_extra++;
9345 counter_u64_add(rack_move_some, 1);
9348 #ifdef NETFLIX_EXP_DETECTION
9349 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp));
9352 /* Something changed cancel the rack timer */
9353 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9355 tsused = tcp_get_usecs(NULL);
9356 rsm = tcp_rack_output(tp, rack, tsused);
9357 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
9359 /* Enter recovery */
9360 rack->r_ctl.rc_rsm_start = rsm->r_start;
9361 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
9362 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
9363 entered_recovery = 1;
9364 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
9366 * When we enter recovery we need to assure we send
9369 if (rack->rack_no_prr == 0) {
9370 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
9371 rack_log_to_prr(rack, 8, 0);
9373 rack->r_timer_override = 1;
9375 rack->r_ctl.rc_agg_early = 0;
9376 } else if (IN_FASTRECOVERY(tp->t_flags) &&
9378 (rack->r_rr_config == 3)) {
9380 * Assure we can output and we get no
9381 * remembered pace time except the retransmit.
9383 rack->r_timer_override = 1;
9384 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
9385 rack->r_ctl.rc_resend = rsm;
9387 if (IN_FASTRECOVERY(tp->t_flags) &&
9388 (rack->rack_no_prr == 0) &&
9389 (entered_recovery == 0)) {
9390 rack_update_prr(tp, rack, changed, th_ack);
9391 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
9392 ((rack->rc_inp->inp_in_hpts == 0) &&
9393 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
9395 * If you are pacing output you don't want
9399 rack->r_ctl.rc_agg_early = 0;
9400 rack->r_timer_override = 1;
9406 rack_strike_dupack(struct tcp_rack *rack)
9408 struct rack_sendmap *rsm;
9410 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
9411 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
9412 rsm = TAILQ_NEXT(rsm, r_tnext);
9414 if (rsm && (rsm->r_dupack < 0xff)) {
9416 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
9420 * Here we see if we need to retransmit. For
9421 * a SACK type connection if enough time has passed
9422 * we will get a return of the rsm. For a non-sack
9423 * connection we will get the rsm returned if the
9424 * dupack value is 3 or more.
9426 cts = tcp_get_usecs(&tv);
9427 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
9428 if (rack->r_ctl.rc_resend != NULL) {
9429 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
9430 rack_cong_signal(rack->rc_tp, CC_NDUPACK,
9431 rack->rc_tp->snd_una);
9433 rack->r_wanted_output = 1;
9434 rack->r_timer_override = 1;
9435 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
9438 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
9444 rack_check_bottom_drag(struct tcpcb *tp,
9445 struct tcp_rack *rack,
9446 struct socket *so, int32_t acked)
9448 uint32_t segsiz, minseg;
9450 segsiz = ctf_fixed_maxseg(tp);
9453 if (tp->snd_max == tp->snd_una) {
9455 * We are doing dynamic pacing and we are way
9456 * under. Basically everything got acked while
9457 * we were still waiting on the pacer to expire.
9459 * This means we need to boost the b/w in
9460 * addition to any earlier boosting of
9463 rack->rc_dragged_bottom = 1;
9464 rack_validate_multipliers_at_or_above100(rack);
9466 * Lets use the segment bytes acked plus
9467 * the lowest RTT seen as the basis to
9468 * form a b/w estimate. This will be off
9469 * due to the fact that the true estimate
9470 * should be around 1/2 the time of the RTT
9471 * but we can settle for that.
9473 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
9475 uint64_t bw, calc_bw, rtt;
9477 rtt = rack->r_ctl.rack_rs.rs_us_rtt;
9479 /* no us sample is there a ms one? */
9480 if (rack->r_ctl.rack_rs.rs_rtt_lowest) {
9481 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
9483 goto no_measurement;
9487 calc_bw = bw * 1000000;
9489 if (rack->r_ctl.last_max_bw &&
9490 (rack->r_ctl.last_max_bw < calc_bw)) {
9492 * If we have a last calculated max bw
9495 calc_bw = rack->r_ctl.last_max_bw;
9497 /* now plop it in */
9498 if (rack->rc_gp_filled == 0) {
9499 if (calc_bw > ONE_POINT_TWO_MEG) {
9501 * If we have no measurement
9502 * don't let us set in more than
9503 * 1.2Mbps. If we are still too
9504 * low after pacing with this we
9505 * will hopefully have a max b/w
9506 * available to sanity check things.
9508 calc_bw = ONE_POINT_TWO_MEG;
9510 rack->r_ctl.rc_rtt_diff = 0;
9511 rack->r_ctl.gp_bw = calc_bw;
9512 rack->rc_gp_filled = 1;
9513 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9514 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9515 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9516 } else if (calc_bw > rack->r_ctl.gp_bw) {
9517 rack->r_ctl.rc_rtt_diff = 0;
9518 if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
9519 rack->r_ctl.num_measurements = RACK_REQ_AVG;
9520 rack->r_ctl.gp_bw = calc_bw;
9521 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
9523 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9524 if ((rack->gp_ready == 0) &&
9525 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
9526 /* We have enough measurements now */
9528 rack_set_cc_pacing(rack);
9529 if (rack->defer_options)
9530 rack_apply_deferred_options(rack);
9533 * For acks over 1mss we do a extra boost to simulate
9534 * where we would get 2 acks (we want 110 for the mul).
9537 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9540 * zero rtt possibly?, settle for just an old increase.
9543 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9545 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
9546 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
9548 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9549 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
9550 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
9551 (segsiz * rack_req_segs))) {
9553 * We are doing dynamic GP pacing and
9554 * we have everything except 1MSS or less
9555 * bytes left out. We are still pacing away.
9556 * And there is data that could be sent, This
9557 * means we are inserting delayed ack time in
9558 * our measurements because we are pacing too slow.
9560 rack_validate_multipliers_at_or_above100(rack);
9561 rack->rc_dragged_bottom = 1;
9562 rack_increase_bw_mul(rack, -1, 0, 0, 1);
9569 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
9572 * The fast output path is enabled and we
9573 * have moved the cumack forward. Lets see if
9574 * we can expand forward the fast path length by
9575 * that amount. What we would ideally like to
9576 * do is increase the number of bytes in the
9577 * fast path block (left_to_send) by the
9578 * acked amount. However we have to gate that
9580 * 1) The amount outstanding and the rwnd of the peer
9581 * (i.e. we don't want to exceed the rwnd of the peer).
9583 * 2) The amount of data left in the socket buffer (i.e.
9584 * we can't send beyond what is in the buffer).
9586 * Note that this does not take into account any increase
9587 * in the cwnd. We will only extend the fast path by
9590 uint32_t new_total, gating_val;
9592 new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
9593 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
9594 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
9595 if (new_total <= gating_val) {
9596 /* We can increase left_to_send by the acked amount */
9597 counter_u64_add(rack_extended_rfo, 1);
9598 rack->r_ctl.fsb.left_to_send = new_total;
9599 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
9600 ("rack:%p left_to_send:%u sbavail:%u out:%u",
9601 rack, rack->r_ctl.fsb.left_to_send,
9602 sbavail(&rack->rc_inp->inp_socket->so_snd),
9603 (tp->snd_max - tp->snd_una)));
9609 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una)
9612 * Here any sendmap entry that points to the
9613 * beginning mbuf must be adjusted to the correct
9614 * offset. This must be called with:
9615 * 1) The socket buffer locked
9616 * 2) snd_una adjusted to its new postion.
9618 * Note that (2) implies rack_ack_received has also
9621 * We grab the first mbuf in the socket buffer and
9622 * then go through the front of the sendmap, recalculating
9623 * the stored offset for any sendmap entry that has
9624 * that mbuf. We must use the sb functions to do this
9625 * since its possible an add was done has well as
9626 * the subtraction we may have just completed. This should
9627 * not be a penalty though, since we just referenced the sb
9628 * to go in and trim off the mbufs that we freed (of course
9629 * there will be a penalty for the sendmap references though).
9632 struct rack_sendmap *rsm;
9634 SOCKBUF_LOCK_ASSERT(sb);
9636 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
9637 if ((rsm == NULL) || (m == NULL)) {
9638 /* Nothing outstanding */
9641 while (rsm->m == m) {
9647 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
9648 if (rsm->orig_m_len != m->m_len) {
9649 rack_adjust_orig_mlen(rsm);
9651 if (rsm->soff != soff) {
9653 * This is not a fatal error, we anticipate it
9654 * might happen (the else code), so we count it here
9655 * so that under invariant we can see that it really
9658 counter_u64_add(rack_adjust_map_bw, 1);
9662 rsm->orig_m_len = rsm->m->m_len;
9664 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
9665 rsm->orig_m_len = rsm->m->m_len;
9667 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
9675 * Return value of 1, we do not need to call rack_process_data().
9676 * return value of 0, rack_process_data can be called.
9677 * For ret_val if its 0 the TCP is locked, if its non-zero
9678 * its unlocked and probably unsafe to touch the TCB.
9681 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
9682 struct tcpcb *tp, struct tcpopt *to,
9683 uint32_t tiwin, int32_t tlen,
9684 int32_t * ofia, int32_t thflags, int32_t *ret_val)
9686 int32_t ourfinisacked = 0;
9687 int32_t nsegs, acked_amount;
9690 struct tcp_rack *rack;
9691 int32_t under_pacing = 0;
9692 int32_t recovery = 0;
9694 rack = (struct tcp_rack *)tp->t_fb_ptr;
9695 if (SEQ_GT(th->th_ack, tp->snd_max)) {
9696 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
9697 &rack->r_ctl.challenge_ack_ts,
9698 &rack->r_ctl.challenge_ack_cnt);
9699 rack->r_wanted_output = 1;
9702 if (rack->gp_ready &&
9703 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
9706 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
9707 int in_rec, dup_ack_struck = 0;
9709 in_rec = IN_FASTRECOVERY(tp->t_flags);
9710 if (rack->rc_in_persist) {
9712 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
9713 rack_rto_min, rack_rto_max);
9715 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) {
9716 rack_strike_dupack(rack);
9719 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck);
9721 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
9723 * Old ack, behind (or duplicate to) the last one rcv'd
9724 * Note: We mark reordering is occuring if its
9725 * less than and we have not closed our window.
9727 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
9728 counter_u64_add(rack_reorder_seen, 1);
9729 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
9734 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
9735 * something we sent.
9737 if (tp->t_flags & TF_NEEDSYN) {
9739 * T/TCP: Connection was half-synchronized, and our SYN has
9740 * been ACK'd (so connection is now fully synchronized). Go
9741 * to non-starred state, increment snd_una for ACK of SYN,
9742 * and check if we can do window scaling.
9744 tp->t_flags &= ~TF_NEEDSYN;
9746 /* Do window scaling? */
9747 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
9748 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
9749 tp->rcv_scale = tp->request_r_scale;
9750 /* Send window already scaled. */
9753 nsegs = max(1, m->m_pkthdr.lro_nsegs);
9754 INP_WLOCK_ASSERT(tp->t_inpcb);
9756 acked = BYTES_THIS_ACK(tp, th);
9757 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
9758 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
9760 * If we just performed our first retransmit, and the ACK arrives
9761 * within our recovery window, then it was a mistake to do the
9762 * retransmit in the first place. Recover our original cwnd and
9763 * ssthresh, and proceed to transmit where we left off.
9765 if ((tp->t_flags & TF_PREVVALID) &&
9766 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
9767 tp->t_flags &= ~TF_PREVVALID;
9768 if (tp->t_rxtshift == 1 &&
9769 (int)(ticks - tp->t_badrxtwin) < 0)
9770 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack);
9773 /* assure we are not backed off */
9775 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
9776 rack_rto_min, rack_rto_max);
9777 rack->rc_tlp_in_progress = 0;
9778 rack->r_ctl.rc_tlp_cnt_out = 0;
9780 * If it is the RXT timer we want to
9781 * stop it, so we can restart a TLP.
9783 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
9784 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9785 #ifdef NETFLIX_HTTP_LOGGING
9786 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
9790 * If we have a timestamp reply, update smoothed round trip time. If
9791 * no timestamp is present but transmit timer is running and timed
9792 * sequence number was acked, update smoothed round trip time. Since
9793 * we now have an rtt measurement, cancel the timer backoff (cf.,
9794 * Phil Karn's retransmit alg.). Recompute the initial retransmit
9797 * Some boxes send broken timestamp replies during the SYN+ACK
9798 * phase, ignore timestamps of 0 or we could calculate a huge RTT
9799 * and blow up the retransmit timer.
9802 * If all outstanding data is acked, stop retransmit timer and
9803 * remember to restart (more output or persist). If there is more
9804 * data to be acked, restart retransmit timer, using current
9805 * (possibly backed-off) value.
9809 *ofia = ourfinisacked;
9812 if (IN_RECOVERY(tp->t_flags)) {
9813 if (SEQ_LT(th->th_ack, tp->snd_recover) &&
9814 (SEQ_LT(th->th_ack, tp->snd_max))) {
9815 tcp_rack_partialack(tp);
9817 rack_post_recovery(tp, th->th_ack);
9822 * Let the congestion control algorithm update congestion control
9823 * related information. This typically means increasing the
9824 * congestion window.
9826 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery);
9827 SOCKBUF_LOCK(&so->so_snd);
9828 acked_amount = min(acked, (int)sbavail(&so->so_snd));
9829 tp->snd_wnd -= acked_amount;
9830 mfree = sbcut_locked(&so->so_snd, acked_amount);
9831 if ((sbused(&so->so_snd) == 0) &&
9832 (acked > acked_amount) &&
9833 (tp->t_state >= TCPS_FIN_WAIT_1) &&
9834 (tp->t_flags & TF_SENTFIN)) {
9836 * We must be sure our fin
9837 * was sent and acked (we can be
9838 * in FIN_WAIT_1 without having
9843 tp->snd_una = th->th_ack;
9844 if (acked_amount && sbavail(&so->so_snd))
9845 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
9846 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
9847 SOCKBUF_UNLOCK(&so->so_snd);
9848 tp->t_flags |= TF_WAKESOW;
9850 if (SEQ_GT(tp->snd_una, tp->snd_recover))
9851 tp->snd_recover = tp->snd_una;
9853 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
9854 tp->snd_nxt = tp->snd_una;
9857 (rack->use_fixed_rate == 0) &&
9858 (rack->in_probe_rtt == 0) &&
9859 rack->rc_gp_dyn_mul &&
9860 rack->rc_always_pace) {
9861 /* Check if we are dragging bottom */
9862 rack_check_bottom_drag(tp, rack, so, acked);
9864 if (tp->snd_una == tp->snd_max) {
9865 /* Nothing left outstanding */
9866 tp->t_flags &= ~TF_PREVVALID;
9867 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
9868 rack->r_ctl.retran_during_recovery = 0;
9869 rack->r_ctl.dsack_byte_cnt = 0;
9870 if (rack->r_ctl.rc_went_idle_time == 0)
9871 rack->r_ctl.rc_went_idle_time = 1;
9872 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
9873 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
9875 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
9876 /* Set need output so persist might get set */
9877 rack->r_wanted_output = 1;
9878 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
9879 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
9880 (sbavail(&so->so_snd) == 0) &&
9881 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
9883 * The socket was gone and the
9884 * peer sent data (now or in the past), time to
9888 /* tcp_close will kill the inp pre-log the Reset */
9889 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
9891 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
9896 *ofia = ourfinisacked;
9901 rack_collapsed_window(struct tcp_rack *rack)
9904 * Now we must walk the
9905 * send map and divide the
9906 * ones left stranded. These
9907 * guys can't cause us to abort
9908 * the connection and are really
9909 * "unsent". However if a buggy
9910 * client actually did keep some
9911 * of the data i.e. collapsed the win
9912 * and refused to ack and then opened
9913 * the win and acked that data. We would
9914 * get into an ack war, the simplier
9915 * method then of just pretending we
9916 * did not send those segments something
9919 struct rack_sendmap *rsm, *nrsm, fe, *insret;
9922 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd;
9923 memset(&fe, 0, sizeof(fe));
9924 fe.r_start = max_seq;
9925 /* Find the first seq past or at maxseq */
9926 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
9928 /* Nothing to do strange */
9929 rack->rc_has_collapsed = 0;
9933 * Now do we need to split at
9934 * the collapse point?
9936 if (SEQ_GT(max_seq, rsm->r_start)) {
9937 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
9939 /* We can't get a rsm, mark all? */
9944 rack_clone_rsm(rack, nrsm, rsm, max_seq);
9945 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm);
9947 if (insret != NULL) {
9948 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p",
9949 nrsm, insret, rack, rsm);
9952 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__);
9953 if (rsm->r_in_tmap) {
9954 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9955 nrsm->r_in_tmap = 1;
9958 * Set in the new RSM as the
9959 * collapsed starting point
9964 counter_u64_add(rack_collapsed_win, 1);
9965 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) {
9966 nrsm->r_flags |= RACK_RWND_COLLAPSED;
9968 rack->rc_has_collapsed = 1;
9972 rack_un_collapse_window(struct tcp_rack *rack)
9974 struct rack_sendmap *rsm;
9976 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
9977 if (rsm->r_flags & RACK_RWND_COLLAPSED)
9978 rsm->r_flags &= ~RACK_RWND_COLLAPSED;
9982 rack->rc_has_collapsed = 0;
9986 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
9987 int32_t tlen, int32_t tfo_syn)
9989 if (DELAY_ACK(tp, tlen) || tfo_syn) {
9990 if (rack->rc_dack_mode &&
9992 (rack->rc_dack_toggle == 1)) {
9993 goto no_delayed_ack;
9995 rack_timer_cancel(tp, rack,
9996 rack->r_ctl.rc_rcvtime, __LINE__);
9997 tp->t_flags |= TF_DELACK;
10000 rack->r_wanted_output = 1;
10001 tp->t_flags |= TF_ACKNOW;
10002 if (rack->rc_dack_mode) {
10003 if (tp->t_flags & TF_DELACK)
10004 rack->rc_dack_toggle = 1;
10006 rack->rc_dack_toggle = 0;
10012 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
10015 * If fast output is in progress, lets validate that
10016 * the new window did not shrink on us and make it
10017 * so fast output should end.
10019 if (rack->r_fast_output) {
10023 * Calculate what we will send if left as is
10024 * and compare that to our send window.
10026 out = ctf_outstanding(tp);
10027 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
10028 /* ok we have an issue */
10029 if (out >= tp->snd_wnd) {
10030 /* Turn off fast output the window is met or collapsed */
10031 rack->r_fast_output = 0;
10033 /* we have some room left */
10034 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
10035 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
10036 /* If not at least 1 full segment never mind */
10037 rack->r_fast_output = 0;
10045 * Return value of 1, the TCB is unlocked and most
10046 * likely gone, return value of 0, the TCP is still
10050 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
10051 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
10052 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
10055 * Update window information. Don't look at window if no ACK: TAC's
10056 * send garbage on first SYN.
10060 struct tcp_rack *rack;
10062 rack = (struct tcp_rack *)tp->t_fb_ptr;
10063 INP_WLOCK_ASSERT(tp->t_inpcb);
10064 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10065 if ((thflags & TH_ACK) &&
10066 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
10067 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
10068 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
10069 /* keep track of pure window updates */
10071 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
10072 KMOD_TCPSTAT_INC(tcps_rcvwinupd);
10073 tp->snd_wnd = tiwin;
10074 rack_validate_fo_sendwin_up(tp, rack);
10075 tp->snd_wl1 = th->th_seq;
10076 tp->snd_wl2 = th->th_ack;
10077 if (tp->snd_wnd > tp->max_sndwnd)
10078 tp->max_sndwnd = tp->snd_wnd;
10079 rack->r_wanted_output = 1;
10080 } else if (thflags & TH_ACK) {
10081 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
10082 tp->snd_wnd = tiwin;
10083 rack_validate_fo_sendwin_up(tp, rack);
10084 tp->snd_wl1 = th->th_seq;
10085 tp->snd_wl2 = th->th_ack;
10088 if (tp->snd_wnd < ctf_outstanding(tp))
10089 /* The peer collapsed the window */
10090 rack_collapsed_window(rack);
10091 else if (rack->rc_has_collapsed)
10092 rack_un_collapse_window(rack);
10093 /* Was persist timer active and now we have window space? */
10094 if ((rack->rc_in_persist != 0) &&
10095 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10096 rack->r_ctl.rc_pace_min_segs))) {
10097 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10098 tp->snd_nxt = tp->snd_max;
10099 /* Make sure we output to start the timer */
10100 rack->r_wanted_output = 1;
10102 /* Do we enter persists? */
10103 if ((rack->rc_in_persist == 0) &&
10104 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10105 TCPS_HAVEESTABLISHED(tp->t_state) &&
10106 (tp->snd_max == tp->snd_una) &&
10107 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
10108 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
10110 * Here the rwnd is less than
10111 * the pacing size, we are established,
10112 * nothing is outstanding, and there is
10113 * data to send. Enter persists.
10115 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10117 if (tp->t_flags2 & TF2_DROP_AF_DATA) {
10122 * don't process the URG bit, ignore them drag
10125 tp->rcv_up = tp->rcv_nxt;
10126 INP_WLOCK_ASSERT(tp->t_inpcb);
10129 * Process the segment text, merging it into the TCP sequencing
10130 * queue, and arranging for acknowledgment of receipt if necessary.
10131 * This process logically involves adjusting tp->rcv_wnd as data is
10132 * presented to the user (this happens in tcp_usrreq.c, case
10133 * PRU_RCVD). If a FIN has already been received on this connection
10134 * then we just ignore the text.
10136 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
10137 IS_FASTOPEN(tp->t_flags));
10138 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
10139 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10140 tcp_seq save_start = th->th_seq;
10141 tcp_seq save_rnxt = tp->rcv_nxt;
10142 int save_tlen = tlen;
10144 m_adj(m, drop_hdrlen); /* delayed header drop */
10146 * Insert segment which includes th into TCP reassembly
10147 * queue with control block tp. Set thflags to whether
10148 * reassembly now includes a segment with FIN. This handles
10149 * the common case inline (segment is the next to be
10150 * received on an established connection, and the queue is
10151 * empty), avoiding linkage into and removal from the queue
10152 * and repetition of various conversions. Set DELACK for
10153 * segments received in order, but ack immediately when
10154 * segments are out of order (so fast retransmit can work).
10156 if (th->th_seq == tp->rcv_nxt &&
10158 (TCPS_HAVEESTABLISHED(tp->t_state) ||
10160 #ifdef NETFLIX_SB_LIMITS
10161 u_int mcnt, appended;
10163 if (so->so_rcv.sb_shlim) {
10164 mcnt = m_memcnt(m);
10166 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10167 CFO_NOSLEEP, NULL) == false) {
10168 counter_u64_add(tcp_sb_shlim_fails, 1);
10174 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
10175 tp->rcv_nxt += tlen;
10177 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10178 (tp->t_fbyte_in == 0)) {
10179 tp->t_fbyte_in = ticks;
10180 if (tp->t_fbyte_in == 0)
10181 tp->t_fbyte_in = 1;
10182 if (tp->t_fbyte_out && tp->t_fbyte_in)
10183 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10185 thflags = th->th_flags & TH_FIN;
10186 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10187 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10188 SOCKBUF_LOCK(&so->so_rcv);
10189 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10192 #ifdef NETFLIX_SB_LIMITS
10195 sbappendstream_locked(&so->so_rcv, m, 0);
10197 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10198 SOCKBUF_UNLOCK(&so->so_rcv);
10199 tp->t_flags |= TF_WAKESOR;
10200 #ifdef NETFLIX_SB_LIMITS
10201 if (so->so_rcv.sb_shlim && appended != mcnt)
10202 counter_fo_release(so->so_rcv.sb_shlim,
10207 * XXX: Due to the header drop above "th" is
10208 * theoretically invalid by now. Fortunately
10209 * m_adj() doesn't actually frees any mbufs when
10210 * trimming from the head.
10212 tcp_seq temp = save_start;
10214 thflags = tcp_reass(tp, th, &temp, &tlen, m);
10215 tp->t_flags |= TF_ACKNOW;
10217 if ((tp->t_flags & TF_SACK_PERMIT) &&
10219 TCPS_HAVEESTABLISHED(tp->t_state)) {
10220 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
10222 * DSACK actually handled in the fastpath
10225 RACK_OPTS_INC(tcp_sack_path_1);
10226 tcp_update_sack_list(tp, save_start,
10227 save_start + save_tlen);
10228 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
10229 if ((tp->rcv_numsacks >= 1) &&
10230 (tp->sackblks[0].end == save_start)) {
10232 * Partial overlap, recorded at todrop
10235 RACK_OPTS_INC(tcp_sack_path_2a);
10236 tcp_update_sack_list(tp,
10237 tp->sackblks[0].start,
10238 tp->sackblks[0].end);
10240 RACK_OPTS_INC(tcp_sack_path_2b);
10241 tcp_update_dsack_list(tp, save_start,
10242 save_start + save_tlen);
10244 } else if (tlen >= save_tlen) {
10245 /* Update of sackblks. */
10246 RACK_OPTS_INC(tcp_sack_path_3);
10247 tcp_update_dsack_list(tp, save_start,
10248 save_start + save_tlen);
10249 } else if (tlen > 0) {
10250 RACK_OPTS_INC(tcp_sack_path_4);
10251 tcp_update_dsack_list(tp, save_start,
10252 save_start + tlen);
10257 thflags &= ~TH_FIN;
10261 * If FIN is received ACK the FIN and let the user know that the
10262 * connection is closing.
10264 if (thflags & TH_FIN) {
10265 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
10267 /* The socket upcall is handled by socantrcvmore. */
10268 tp->t_flags &= ~TF_WAKESOR;
10270 * If connection is half-synchronized (ie NEEDSYN
10271 * flag on) then delay ACK, so it may be piggybacked
10272 * when SYN is sent. Otherwise, since we received a
10273 * FIN then no more input can be expected, send ACK
10276 if (tp->t_flags & TF_NEEDSYN) {
10277 rack_timer_cancel(tp, rack,
10278 rack->r_ctl.rc_rcvtime, __LINE__);
10279 tp->t_flags |= TF_DELACK;
10281 tp->t_flags |= TF_ACKNOW;
10285 switch (tp->t_state) {
10287 * In SYN_RECEIVED and ESTABLISHED STATES enter the
10288 * CLOSE_WAIT state.
10290 case TCPS_SYN_RECEIVED:
10291 tp->t_starttime = ticks;
10293 case TCPS_ESTABLISHED:
10294 rack_timer_cancel(tp, rack,
10295 rack->r_ctl.rc_rcvtime, __LINE__);
10296 tcp_state_change(tp, TCPS_CLOSE_WAIT);
10300 * If still in FIN_WAIT_1 STATE FIN has not been
10301 * acked so enter the CLOSING state.
10303 case TCPS_FIN_WAIT_1:
10304 rack_timer_cancel(tp, rack,
10305 rack->r_ctl.rc_rcvtime, __LINE__);
10306 tcp_state_change(tp, TCPS_CLOSING);
10310 * In FIN_WAIT_2 state enter the TIME_WAIT state,
10311 * starting the time-wait timer, turning off the
10312 * other standard timers.
10314 case TCPS_FIN_WAIT_2:
10315 rack_timer_cancel(tp, rack,
10316 rack->r_ctl.rc_rcvtime, __LINE__);
10322 * Return any desired output.
10324 if ((tp->t_flags & TF_ACKNOW) ||
10325 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
10326 rack->r_wanted_output = 1;
10328 INP_WLOCK_ASSERT(tp->t_inpcb);
10333 * Here nothing is really faster, its just that we
10334 * have broken out the fast-data path also just like
10338 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
10339 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10340 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
10343 int32_t newsize = 0; /* automatic sockbuf scaling */
10344 struct tcp_rack *rack;
10345 #ifdef NETFLIX_SB_LIMITS
10346 u_int mcnt, appended;
10350 * The size of tcp_saveipgen must be the size of the max ip header,
10353 u_char tcp_saveipgen[IP6_HDR_LEN];
10354 struct tcphdr tcp_savetcp;
10359 * If last ACK falls within this segment's sequence numbers, record
10360 * the timestamp. NOTE that the test is modified according to the
10361 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10363 if (__predict_false(th->th_seq != tp->rcv_nxt)) {
10366 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10369 if (tiwin && tiwin != tp->snd_wnd) {
10372 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
10375 if (__predict_false((to->to_flags & TOF_TS) &&
10376 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
10379 if (__predict_false((th->th_ack != tp->snd_una))) {
10382 if (__predict_false(tlen > sbspace(&so->so_rcv))) {
10385 if ((to->to_flags & TOF_TS) != 0 &&
10386 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10387 tp->ts_recent_age = tcp_ts_getticks();
10388 tp->ts_recent = to->to_tsval;
10390 rack = (struct tcp_rack *)tp->t_fb_ptr;
10392 * This is a pure, in-sequence data packet with nothing on the
10393 * reassembly queue and we have enough buffer space to take it.
10395 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10397 #ifdef NETFLIX_SB_LIMITS
10398 if (so->so_rcv.sb_shlim) {
10399 mcnt = m_memcnt(m);
10401 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
10402 CFO_NOSLEEP, NULL) == false) {
10403 counter_u64_add(tcp_sb_shlim_fails, 1);
10409 /* Clean receiver SACK report if present */
10410 if (tp->rcv_numsacks)
10411 tcp_clean_sackreport(tp);
10412 KMOD_TCPSTAT_INC(tcps_preddat);
10413 tp->rcv_nxt += tlen;
10415 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
10416 (tp->t_fbyte_in == 0)) {
10417 tp->t_fbyte_in = ticks;
10418 if (tp->t_fbyte_in == 0)
10419 tp->t_fbyte_in = 1;
10420 if (tp->t_fbyte_out && tp->t_fbyte_in)
10421 tp->t_flags2 |= TF2_FBYTES_COMPLETE;
10424 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
10426 tp->snd_wl1 = th->th_seq;
10428 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
10430 tp->rcv_up = tp->rcv_nxt;
10431 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
10432 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
10434 if (so->so_options & SO_DEBUG)
10435 tcp_trace(TA_INPUT, ostate, tp,
10436 (void *)tcp_saveipgen, &tcp_savetcp, 0);
10438 newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
10440 /* Add data to socket buffer. */
10441 SOCKBUF_LOCK(&so->so_rcv);
10442 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10446 * Set new socket buffer size. Give up when limit is
10450 if (!sbreserve_locked(&so->so_rcv,
10451 newsize, so, NULL))
10452 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
10453 m_adj(m, drop_hdrlen); /* delayed header drop */
10454 #ifdef NETFLIX_SB_LIMITS
10457 sbappendstream_locked(&so->so_rcv, m, 0);
10458 ctf_calc_rwin(so, tp);
10460 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
10461 SOCKBUF_UNLOCK(&so->so_rcv);
10462 tp->t_flags |= TF_WAKESOR;
10463 #ifdef NETFLIX_SB_LIMITS
10464 if (so->so_rcv.sb_shlim && mcnt != appended)
10465 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
10467 rack_handle_delayed_ack(tp, rack, tlen, 0);
10468 if (tp->snd_una == tp->snd_max)
10469 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
10474 * This subfunction is used to try to highly optimize the
10475 * fast path. We again allow window updates that are
10476 * in sequence to remain in the fast-path. We also add
10477 * in the __predict's to attempt to help the compiler.
10478 * Note that if we return a 0, then we can *not* process
10479 * it and the caller should push the packet into the
10483 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
10484 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10485 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
10491 * The size of tcp_saveipgen must be the size of the max ip header,
10494 u_char tcp_saveipgen[IP6_HDR_LEN];
10495 struct tcphdr tcp_savetcp;
10498 int32_t under_pacing = 0;
10499 struct tcp_rack *rack;
10501 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
10502 /* Old ack, behind (or duplicate to) the last one rcv'd */
10505 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
10506 /* Above what we have sent? */
10509 if (__predict_false(tp->snd_nxt != tp->snd_max)) {
10510 /* We are retransmitting */
10513 if (__predict_false(tiwin == 0)) {
10517 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
10518 /* We need a SYN or a FIN, unlikely.. */
10521 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
10522 /* Timestamp is behind .. old ack with seq wrap? */
10525 if (__predict_false(IN_RECOVERY(tp->t_flags))) {
10526 /* Still recovering */
10529 rack = (struct tcp_rack *)tp->t_fb_ptr;
10530 if (rack->r_ctl.rc_sacked) {
10531 /* We have sack holes on our scoreboard */
10534 /* Ok if we reach here, we can process a fast-ack */
10535 if (rack->gp_ready &&
10536 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
10539 nsegs = max(1, m->m_pkthdr.lro_nsegs);
10540 rack_log_ack(tp, to, th, 0, 0);
10541 /* Did the window get updated? */
10542 if (tiwin != tp->snd_wnd) {
10543 tp->snd_wnd = tiwin;
10544 rack_validate_fo_sendwin_up(tp, rack);
10545 tp->snd_wl1 = th->th_seq;
10546 if (tp->snd_wnd > tp->max_sndwnd)
10547 tp->max_sndwnd = tp->snd_wnd;
10549 /* Do we exit persists? */
10550 if ((rack->rc_in_persist != 0) &&
10551 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
10552 rack->r_ctl.rc_pace_min_segs))) {
10553 rack_exit_persist(tp, rack, cts);
10555 /* Do we enter persists? */
10556 if ((rack->rc_in_persist == 0) &&
10557 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
10558 TCPS_HAVEESTABLISHED(tp->t_state) &&
10559 (tp->snd_max == tp->snd_una) &&
10560 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
10561 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
10563 * Here the rwnd is less than
10564 * the pacing size, we are established,
10565 * nothing is outstanding, and there is
10566 * data to send. Enter persists.
10568 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
10571 * If last ACK falls within this segment's sequence numbers, record
10572 * the timestamp. NOTE that the test is modified according to the
10573 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
10575 if ((to->to_flags & TOF_TS) != 0 &&
10576 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
10577 tp->ts_recent_age = tcp_ts_getticks();
10578 tp->ts_recent = to->to_tsval;
10581 * This is a pure ack for outstanding data.
10583 KMOD_TCPSTAT_INC(tcps_predack);
10586 * "bad retransmit" recovery.
10588 if ((tp->t_flags & TF_PREVVALID) &&
10589 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
10590 tp->t_flags &= ~TF_PREVVALID;
10591 if (tp->t_rxtshift == 1 &&
10592 (int)(ticks - tp->t_badrxtwin) < 0)
10593 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack);
10596 * Recalculate the transmit timer / rtt.
10598 * Some boxes send broken timestamp replies during the SYN+ACK
10599 * phase, ignore timestamps of 0 or we could calculate a huge RTT
10600 * and blow up the retransmit timer.
10602 acked = BYTES_THIS_ACK(tp, th);
10605 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
10606 hhook_run_tcp_est_in(tp, th, to);
10608 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
10609 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
10611 struct mbuf *mfree;
10613 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
10614 SOCKBUF_LOCK(&so->so_snd);
10615 mfree = sbcut_locked(&so->so_snd, acked);
10616 tp->snd_una = th->th_ack;
10617 /* Note we want to hold the sb lock through the sendmap adjust */
10618 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
10619 /* Wake up the socket if we have room to write more */
10620 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
10621 SOCKBUF_UNLOCK(&so->so_snd);
10622 tp->t_flags |= TF_WAKESOW;
10624 tp->t_rxtshift = 0;
10625 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
10626 rack_rto_min, rack_rto_max);
10627 rack->rc_tlp_in_progress = 0;
10628 rack->r_ctl.rc_tlp_cnt_out = 0;
10630 * If it is the RXT timer we want to
10631 * stop it, so we can restart a TLP.
10633 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
10634 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10635 #ifdef NETFLIX_HTTP_LOGGING
10636 tcp_http_check_for_comp(rack->rc_tp, th->th_ack);
10640 * Let the congestion control algorithm update congestion control
10641 * related information. This typically means increasing the
10642 * congestion window.
10644 if (tp->snd_wnd < ctf_outstanding(tp)) {
10645 /* The peer collapsed the window */
10646 rack_collapsed_window(rack);
10647 } else if (rack->rc_has_collapsed)
10648 rack_un_collapse_window(rack);
10651 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
10653 tp->snd_wl2 = th->th_ack;
10656 /* ND6_HINT(tp); *//* Some progress has been made. */
10659 * If all outstanding data are acked, stop retransmit timer,
10660 * otherwise restart timer using current (possibly backed-off)
10661 * value. If process is waiting for space, wakeup/selwakeup/signal.
10662 * If data are ready to send, let tcp_output decide between more
10663 * output or persist.
10666 if (so->so_options & SO_DEBUG)
10667 tcp_trace(TA_INPUT, ostate, tp,
10668 (void *)tcp_saveipgen,
10671 if (under_pacing &&
10672 (rack->use_fixed_rate == 0) &&
10673 (rack->in_probe_rtt == 0) &&
10674 rack->rc_gp_dyn_mul &&
10675 rack->rc_always_pace) {
10676 /* Check if we are dragging bottom */
10677 rack_check_bottom_drag(tp, rack, so, acked);
10679 if (tp->snd_una == tp->snd_max) {
10680 tp->t_flags &= ~TF_PREVVALID;
10681 rack->r_ctl.retran_during_recovery = 0;
10682 rack->r_ctl.dsack_byte_cnt = 0;
10683 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
10684 if (rack->r_ctl.rc_went_idle_time == 0)
10685 rack->r_ctl.rc_went_idle_time = 1;
10686 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
10687 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
10689 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
10691 if (acked && rack->r_fast_output)
10692 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
10693 if (sbavail(&so->so_snd)) {
10694 rack->r_wanted_output = 1;
10700 * Return value of 1, the TCB is unlocked and most
10701 * likely gone, return value of 0, the TCP is still
10705 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
10706 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10707 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10709 int32_t ret_val = 0;
10711 int32_t ourfinisacked = 0;
10712 struct tcp_rack *rack;
10714 ctf_calc_rwin(so, tp);
10716 * If the state is SYN_SENT: if seg contains an ACK, but not for our
10717 * SYN, drop the input. if seg contains a RST, then drop the
10718 * connection. if seg does not contain SYN, then drop it. Otherwise
10719 * this is an acceptable SYN segment initialize tp->rcv_nxt and
10720 * tp->irs if seg contains ack then advance tp->snd_una if seg
10721 * contains an ECE and ECN support is enabled, the stream is ECN
10722 * capable. if SYN has been acked change to ESTABLISHED else
10723 * SYN_RCVD state arrange for segment to be acked (eventually)
10724 * continue processing rest of data/controls.
10726 if ((thflags & TH_ACK) &&
10727 (SEQ_LEQ(th->th_ack, tp->iss) ||
10728 SEQ_GT(th->th_ack, tp->snd_max))) {
10729 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10730 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10733 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
10734 TCP_PROBE5(connect__refused, NULL, tp,
10735 mtod(m, const char *), tp, th);
10736 tp = tcp_drop(tp, ECONNREFUSED);
10737 ctf_do_drop(m, tp);
10740 if (thflags & TH_RST) {
10741 ctf_do_drop(m, tp);
10744 if (!(thflags & TH_SYN)) {
10745 ctf_do_drop(m, tp);
10748 tp->irs = th->th_seq;
10749 tcp_rcvseqinit(tp);
10750 rack = (struct tcp_rack *)tp->t_fb_ptr;
10751 if (thflags & TH_ACK) {
10752 int tfo_partial = 0;
10754 KMOD_TCPSTAT_INC(tcps_connects);
10757 mac_socketpeer_set_from_mbuf(m, so);
10759 /* Do window scaling on this connection? */
10760 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
10761 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
10762 tp->rcv_scale = tp->request_r_scale;
10764 tp->rcv_adv += min(tp->rcv_wnd,
10765 TCP_MAXWIN << tp->rcv_scale);
10767 * If not all the data that was sent in the TFO SYN
10768 * has been acked, resend the remainder right away.
10770 if (IS_FASTOPEN(tp->t_flags) &&
10771 (tp->snd_una != tp->snd_max)) {
10772 tp->snd_nxt = th->th_ack;
10776 * If there's data, delay ACK; if there's also a FIN ACKNOW
10777 * will be turned on later.
10779 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
10780 rack_timer_cancel(tp, rack,
10781 rack->r_ctl.rc_rcvtime, __LINE__);
10782 tp->t_flags |= TF_DELACK;
10784 rack->r_wanted_output = 1;
10785 tp->t_flags |= TF_ACKNOW;
10786 rack->rc_dack_toggle = 0;
10788 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) &&
10789 (V_tcp_do_ecn == 1)) {
10790 tp->t_flags2 |= TF2_ECN_PERMIT;
10791 KMOD_TCPSTAT_INC(tcps_ecn_shs);
10793 if (SEQ_GT(th->th_ack, tp->snd_una)) {
10795 * We advance snd_una for the
10796 * fast open case. If th_ack is
10797 * acknowledging data beyond
10798 * snd_una we can't just call
10799 * ack-processing since the
10800 * data stream in our send-map
10801 * will start at snd_una + 1 (one
10802 * beyond the SYN). If its just
10803 * equal we don't need to do that
10804 * and there is no send_map.
10809 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
10810 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
10812 tp->t_starttime = ticks;
10813 if (tp->t_flags & TF_NEEDFIN) {
10814 tcp_state_change(tp, TCPS_FIN_WAIT_1);
10815 tp->t_flags &= ~TF_NEEDFIN;
10816 thflags &= ~TH_SYN;
10818 tcp_state_change(tp, TCPS_ESTABLISHED);
10819 TCP_PROBE5(connect__established, NULL, tp,
10820 mtod(m, const char *), tp, th);
10821 rack_cc_conn_init(tp);
10825 * Received initial SYN in SYN-SENT[*] state => simultaneous
10826 * open. If segment contains CC option and there is a
10827 * cached CC, apply TAO test. If it succeeds, connection is *
10828 * half-synchronized. Otherwise, do 3-way handshake:
10829 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
10830 * there was no CC option, clear cached CC value.
10832 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
10833 tcp_state_change(tp, TCPS_SYN_RECEIVED);
10835 INP_WLOCK_ASSERT(tp->t_inpcb);
10837 * Advance th->th_seq to correspond to first data byte. If data,
10838 * trim to stay within window, dropping FIN if necessary.
10841 if (tlen > tp->rcv_wnd) {
10842 todrop = tlen - tp->rcv_wnd;
10844 tlen = tp->rcv_wnd;
10845 thflags &= ~TH_FIN;
10846 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
10847 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
10849 tp->snd_wl1 = th->th_seq - 1;
10850 tp->rcv_up = th->th_seq;
10852 * Client side of transaction: already sent SYN and data. If the
10853 * remote host used T/TCP to validate the SYN, our data will be
10854 * ACK'd; if so, enter normal data segment processing in the middle
10855 * of step 5, ack processing. Otherwise, goto step 6.
10857 if (thflags & TH_ACK) {
10858 /* For syn-sent we need to possibly update the rtt */
10859 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
10862 mcts = tcp_ts_getticks();
10863 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
10864 if (!tp->t_rttlow || tp->t_rttlow > t)
10866 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
10867 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
10868 tcp_rack_xmit_timer_commit(rack, tp);
10870 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
10872 /* We may have changed to FIN_WAIT_1 above */
10873 if (tp->t_state == TCPS_FIN_WAIT_1) {
10875 * In FIN_WAIT_1 STATE in addition to the processing
10876 * for the ESTABLISHED state if our FIN is now
10877 * acknowledged then enter FIN_WAIT_2.
10879 if (ourfinisacked) {
10881 * If we can't receive any more data, then
10882 * closing user can proceed. Starting the
10883 * timer is contrary to the specification,
10884 * but if we don't get a FIN we'll hang
10887 * XXXjl: we should release the tp also, and
10888 * use a compressed state.
10890 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
10891 soisdisconnected(so);
10892 tcp_timer_activate(tp, TT_2MSL,
10893 (tcp_fast_finwait2_recycle ?
10894 tcp_finwait2_timeout :
10897 tcp_state_change(tp, TCPS_FIN_WAIT_2);
10901 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
10902 tiwin, thflags, nxt_pkt));
10906 * Return value of 1, the TCB is unlocked and most
10907 * likely gone, return value of 0, the TCP is still
10911 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
10912 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
10913 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
10915 struct tcp_rack *rack;
10916 int32_t ret_val = 0;
10917 int32_t ourfinisacked = 0;
10919 ctf_calc_rwin(so, tp);
10920 if ((thflags & TH_ACK) &&
10921 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
10922 SEQ_GT(th->th_ack, tp->snd_max))) {
10923 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10924 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10927 rack = (struct tcp_rack *)tp->t_fb_ptr;
10928 if (IS_FASTOPEN(tp->t_flags)) {
10930 * When a TFO connection is in SYN_RECEIVED, the
10931 * only valid packets are the initial SYN, a
10932 * retransmit/copy of the initial SYN (possibly with
10933 * a subset of the original data), a valid ACK, a
10936 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
10937 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10938 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10940 } else if (thflags & TH_SYN) {
10941 /* non-initial SYN is ignored */
10942 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
10943 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
10944 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
10945 ctf_do_drop(m, NULL);
10948 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
10949 ctf_do_drop(m, NULL);
10953 if ((thflags & TH_RST) ||
10954 (tp->t_fin_is_rst && (thflags & TH_FIN)))
10955 return (ctf_process_rst(m, th, so, tp));
10957 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
10958 * it's less than ts_recent, drop it.
10960 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
10961 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
10962 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
10966 * In the SYN-RECEIVED state, validate that the packet belongs to
10967 * this connection before trimming the data to fit the receive
10968 * window. Check the sequence number versus IRS since we know the
10969 * sequence numbers haven't wrapped. This is a partial fix for the
10970 * "LAND" DoS attack.
10972 if (SEQ_LT(th->th_seq, tp->irs)) {
10973 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
10974 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
10977 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
10978 &rack->r_ctl.challenge_ack_ts,
10979 &rack->r_ctl.challenge_ack_cnt)) {
10983 * If last ACK falls within this segment's sequence numbers, record
10984 * its timestamp. NOTE: 1) That the test incorporates suggestions
10985 * from the latest proposal of the tcplw@cray.com list (Braden
10986 * 1993/04/26). 2) That updating only on newer timestamps interferes
10987 * with our earlier PAWS tests, so this check should be solely
10988 * predicated on the sequence space of this segment. 3) That we
10989 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
10990 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
10991 * SEG.Len, This modified check allows us to overcome RFC1323's
10992 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
10993 * p.869. In such cases, we can still calculate the RTT correctly
10994 * when RCV.NXT == Last.ACK.Sent.
10996 if ((to->to_flags & TOF_TS) != 0 &&
10997 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
10998 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
10999 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11000 tp->ts_recent_age = tcp_ts_getticks();
11001 tp->ts_recent = to->to_tsval;
11003 tp->snd_wnd = tiwin;
11004 rack_validate_fo_sendwin_up(tp, rack);
11006 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11007 * is on (half-synchronized state), then queue data for later
11008 * processing; else drop segment and return.
11010 if ((thflags & TH_ACK) == 0) {
11011 if (IS_FASTOPEN(tp->t_flags)) {
11012 rack_cc_conn_init(tp);
11014 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11015 tiwin, thflags, nxt_pkt));
11017 KMOD_TCPSTAT_INC(tcps_connects);
11019 /* Do window scaling? */
11020 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
11021 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
11022 tp->rcv_scale = tp->request_r_scale;
11025 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* ->
11028 tp->t_starttime = ticks;
11029 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
11030 tcp_fastopen_decrement_counter(tp->t_tfo_pending);
11031 tp->t_tfo_pending = NULL;
11033 if (tp->t_flags & TF_NEEDFIN) {
11034 tcp_state_change(tp, TCPS_FIN_WAIT_1);
11035 tp->t_flags &= ~TF_NEEDFIN;
11037 tcp_state_change(tp, TCPS_ESTABLISHED);
11038 TCP_PROBE5(accept__established, NULL, tp,
11039 mtod(m, const char *), tp, th);
11041 * TFO connections call cc_conn_init() during SYN
11042 * processing. Calling it again here for such connections
11043 * is not harmless as it would undo the snd_cwnd reduction
11044 * that occurs when a TFO SYN|ACK is retransmitted.
11046 if (!IS_FASTOPEN(tp->t_flags))
11047 rack_cc_conn_init(tp);
11050 * Account for the ACK of our SYN prior to
11051 * regular ACK processing below, except for
11052 * simultaneous SYN, which is handled later.
11054 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
11057 * If segment contains data or ACK, will call tcp_reass() later; if
11058 * not, do so now to pass queued data to user.
11060 if (tlen == 0 && (thflags & TH_FIN) == 0)
11061 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
11063 tp->snd_wl1 = th->th_seq - 1;
11064 /* For syn-recv we need to possibly update the rtt */
11065 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
11068 mcts = tcp_ts_getticks();
11069 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
11070 if (!tp->t_rttlow || tp->t_rttlow > t)
11072 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
11073 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
11074 tcp_rack_xmit_timer_commit(rack, tp);
11076 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11079 if (tp->t_state == TCPS_FIN_WAIT_1) {
11080 /* We could have went to FIN_WAIT_1 (or EST) above */
11082 * In FIN_WAIT_1 STATE in addition to the processing for the
11083 * ESTABLISHED state if our FIN is now acknowledged then
11084 * enter FIN_WAIT_2.
11086 if (ourfinisacked) {
11088 * If we can't receive any more data, then closing
11089 * user can proceed. Starting the timer is contrary
11090 * to the specification, but if we don't get a FIN
11091 * we'll hang forever.
11093 * XXXjl: we should release the tp also, and use a
11094 * compressed state.
11096 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11097 soisdisconnected(so);
11098 tcp_timer_activate(tp, TT_2MSL,
11099 (tcp_fast_finwait2_recycle ?
11100 tcp_finwait2_timeout :
11103 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11106 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11107 tiwin, thflags, nxt_pkt));
11111 * Return value of 1, the TCB is unlocked and most
11112 * likely gone, return value of 0, the TCP is still
11116 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
11117 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11118 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11120 int32_t ret_val = 0;
11121 struct tcp_rack *rack;
11124 * Header prediction: check for the two common cases of a
11125 * uni-directional data xfer. If the packet has no control flags,
11126 * is in-sequence, the window didn't change and we're not
11127 * retransmitting, it's a candidate. If the length is zero and the
11128 * ack moved forward, we're the sender side of the xfer. Just free
11129 * the data acked & wake any higher level process that was blocked
11130 * waiting for space. If the length is non-zero and the ack didn't
11131 * move, we're the receiver side. If we're getting packets in-order
11132 * (the reassembly queue is empty), add the data toc The socket
11133 * buffer and note that we need a delayed ack. Make sure that the
11134 * hidden state-flags are also off. Since we check for
11135 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
11137 rack = (struct tcp_rack *)tp->t_fb_ptr;
11138 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
11139 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
11140 __predict_true(SEGQ_EMPTY(tp)) &&
11141 __predict_true(th->th_seq == tp->rcv_nxt)) {
11143 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
11144 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
11148 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
11149 tiwin, nxt_pkt, iptos)) {
11154 ctf_calc_rwin(so, tp);
11156 if ((thflags & TH_RST) ||
11157 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11158 return (ctf_process_rst(m, th, so, tp));
11161 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11162 * synchronized state.
11164 if (thflags & TH_SYN) {
11165 ctf_challenge_ack(m, th, tp, &ret_val);
11169 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11170 * it's less than ts_recent, drop it.
11172 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11173 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11174 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11177 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11178 &rack->r_ctl.challenge_ack_ts,
11179 &rack->r_ctl.challenge_ack_cnt)) {
11183 * If last ACK falls within this segment's sequence numbers, record
11184 * its timestamp. NOTE: 1) That the test incorporates suggestions
11185 * from the latest proposal of the tcplw@cray.com list (Braden
11186 * 1993/04/26). 2) That updating only on newer timestamps interferes
11187 * with our earlier PAWS tests, so this check should be solely
11188 * predicated on the sequence space of this segment. 3) That we
11189 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11190 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11191 * SEG.Len, This modified check allows us to overcome RFC1323's
11192 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11193 * p.869. In such cases, we can still calculate the RTT correctly
11194 * when RCV.NXT == Last.ACK.Sent.
11196 if ((to->to_flags & TOF_TS) != 0 &&
11197 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11198 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11199 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11200 tp->ts_recent_age = tcp_ts_getticks();
11201 tp->ts_recent = to->to_tsval;
11204 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11205 * is on (half-synchronized state), then queue data for later
11206 * processing; else drop segment and return.
11208 if ((thflags & TH_ACK) == 0) {
11209 if (tp->t_flags & TF_NEEDSYN) {
11210 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11211 tiwin, thflags, nxt_pkt));
11213 } else if (tp->t_flags & TF_ACKNOW) {
11214 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11215 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11218 ctf_do_drop(m, NULL);
11225 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11228 if (sbavail(&so->so_snd)) {
11229 if (ctf_progress_timeout_check(tp, true)) {
11230 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
11231 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11232 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11236 /* State changes only happen in rack_process_data() */
11237 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11238 tiwin, thflags, nxt_pkt));
11242 * Return value of 1, the TCB is unlocked and most
11243 * likely gone, return value of 0, the TCP is still
11247 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
11248 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11249 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11251 int32_t ret_val = 0;
11252 struct tcp_rack *rack;
11254 rack = (struct tcp_rack *)tp->t_fb_ptr;
11255 ctf_calc_rwin(so, tp);
11256 if ((thflags & TH_RST) ||
11257 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11258 return (ctf_process_rst(m, th, so, tp));
11260 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11261 * synchronized state.
11263 if (thflags & TH_SYN) {
11264 ctf_challenge_ack(m, th, tp, &ret_val);
11268 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11269 * it's less than ts_recent, drop it.
11271 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11272 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11273 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11276 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11277 &rack->r_ctl.challenge_ack_ts,
11278 &rack->r_ctl.challenge_ack_cnt)) {
11282 * If last ACK falls within this segment's sequence numbers, record
11283 * its timestamp. NOTE: 1) That the test incorporates suggestions
11284 * from the latest proposal of the tcplw@cray.com list (Braden
11285 * 1993/04/26). 2) That updating only on newer timestamps interferes
11286 * with our earlier PAWS tests, so this check should be solely
11287 * predicated on the sequence space of this segment. 3) That we
11288 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11289 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11290 * SEG.Len, This modified check allows us to overcome RFC1323's
11291 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11292 * p.869. In such cases, we can still calculate the RTT correctly
11293 * when RCV.NXT == Last.ACK.Sent.
11295 if ((to->to_flags & TOF_TS) != 0 &&
11296 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11297 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11298 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11299 tp->ts_recent_age = tcp_ts_getticks();
11300 tp->ts_recent = to->to_tsval;
11303 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11304 * is on (half-synchronized state), then queue data for later
11305 * processing; else drop segment and return.
11307 if ((thflags & TH_ACK) == 0) {
11308 if (tp->t_flags & TF_NEEDSYN) {
11309 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11310 tiwin, thflags, nxt_pkt));
11312 } else if (tp->t_flags & TF_ACKNOW) {
11313 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11314 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11317 ctf_do_drop(m, NULL);
11324 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
11327 if (sbavail(&so->so_snd)) {
11328 if (ctf_progress_timeout_check(tp, true)) {
11329 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11330 tp, tick, PROGRESS_DROP, __LINE__);
11331 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11332 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11336 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11337 tiwin, thflags, nxt_pkt));
11341 rack_check_data_after_close(struct mbuf *m,
11342 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
11344 struct tcp_rack *rack;
11346 rack = (struct tcp_rack *)tp->t_fb_ptr;
11347 if (rack->rc_allow_data_af_clo == 0) {
11349 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11350 /* tcp_close will kill the inp pre-log the Reset */
11351 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
11352 tp = tcp_close(tp);
11353 KMOD_TCPSTAT_INC(tcps_rcvafterclose);
11354 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
11357 if (sbavail(&so->so_snd) == 0)
11359 /* Ok we allow data that is ignored and a followup reset */
11360 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
11361 tp->rcv_nxt = th->th_seq + *tlen;
11362 tp->t_flags2 |= TF2_DROP_AF_DATA;
11363 rack->r_wanted_output = 1;
11369 * Return value of 1, the TCB is unlocked and most
11370 * likely gone, return value of 0, the TCP is still
11374 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
11375 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11376 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11378 int32_t ret_val = 0;
11379 int32_t ourfinisacked = 0;
11380 struct tcp_rack *rack;
11382 rack = (struct tcp_rack *)tp->t_fb_ptr;
11383 ctf_calc_rwin(so, tp);
11385 if ((thflags & TH_RST) ||
11386 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11387 return (ctf_process_rst(m, th, so, tp));
11389 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11390 * synchronized state.
11392 if (thflags & TH_SYN) {
11393 ctf_challenge_ack(m, th, tp, &ret_val);
11397 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11398 * it's less than ts_recent, drop it.
11400 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11401 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11402 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11405 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11406 &rack->r_ctl.challenge_ack_ts,
11407 &rack->r_ctl.challenge_ack_cnt)) {
11411 * If new data are received on a connection after the user processes
11412 * are gone, then RST the other end.
11414 if ((so->so_state & SS_NOFDREF) && tlen) {
11415 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11419 * If last ACK falls within this segment's sequence numbers, record
11420 * its timestamp. NOTE: 1) That the test incorporates suggestions
11421 * from the latest proposal of the tcplw@cray.com list (Braden
11422 * 1993/04/26). 2) That updating only on newer timestamps interferes
11423 * with our earlier PAWS tests, so this check should be solely
11424 * predicated on the sequence space of this segment. 3) That we
11425 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11426 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11427 * SEG.Len, This modified check allows us to overcome RFC1323's
11428 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11429 * p.869. In such cases, we can still calculate the RTT correctly
11430 * when RCV.NXT == Last.ACK.Sent.
11432 if ((to->to_flags & TOF_TS) != 0 &&
11433 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11434 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11435 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11436 tp->ts_recent_age = tcp_ts_getticks();
11437 tp->ts_recent = to->to_tsval;
11440 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11441 * is on (half-synchronized state), then queue data for later
11442 * processing; else drop segment and return.
11444 if ((thflags & TH_ACK) == 0) {
11445 if (tp->t_flags & TF_NEEDSYN) {
11446 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11447 tiwin, thflags, nxt_pkt));
11448 } else if (tp->t_flags & TF_ACKNOW) {
11449 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11450 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11453 ctf_do_drop(m, NULL);
11460 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11463 if (ourfinisacked) {
11465 * If we can't receive any more data, then closing user can
11466 * proceed. Starting the timer is contrary to the
11467 * specification, but if we don't get a FIN we'll hang
11470 * XXXjl: we should release the tp also, and use a
11471 * compressed state.
11473 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
11474 soisdisconnected(so);
11475 tcp_timer_activate(tp, TT_2MSL,
11476 (tcp_fast_finwait2_recycle ?
11477 tcp_finwait2_timeout :
11480 tcp_state_change(tp, TCPS_FIN_WAIT_2);
11482 if (sbavail(&so->so_snd)) {
11483 if (ctf_progress_timeout_check(tp, true)) {
11484 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11485 tp, tick, PROGRESS_DROP, __LINE__);
11486 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11487 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11491 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11492 tiwin, thflags, nxt_pkt));
11496 * Return value of 1, the TCB is unlocked and most
11497 * likely gone, return value of 0, the TCP is still
11501 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
11502 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11503 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11505 int32_t ret_val = 0;
11506 int32_t ourfinisacked = 0;
11507 struct tcp_rack *rack;
11509 rack = (struct tcp_rack *)tp->t_fb_ptr;
11510 ctf_calc_rwin(so, tp);
11512 if ((thflags & TH_RST) ||
11513 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11514 return (ctf_process_rst(m, th, so, tp));
11516 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11517 * synchronized state.
11519 if (thflags & TH_SYN) {
11520 ctf_challenge_ack(m, th, tp, &ret_val);
11524 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11525 * it's less than ts_recent, drop it.
11527 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11528 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11529 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11532 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11533 &rack->r_ctl.challenge_ack_ts,
11534 &rack->r_ctl.challenge_ack_cnt)) {
11538 * If new data are received on a connection after the user processes
11539 * are gone, then RST the other end.
11541 if ((so->so_state & SS_NOFDREF) && tlen) {
11542 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11546 * If last ACK falls within this segment's sequence numbers, record
11547 * its timestamp. NOTE: 1) That the test incorporates suggestions
11548 * from the latest proposal of the tcplw@cray.com list (Braden
11549 * 1993/04/26). 2) That updating only on newer timestamps interferes
11550 * with our earlier PAWS tests, so this check should be solely
11551 * predicated on the sequence space of this segment. 3) That we
11552 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11553 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11554 * SEG.Len, This modified check allows us to overcome RFC1323's
11555 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11556 * p.869. In such cases, we can still calculate the RTT correctly
11557 * when RCV.NXT == Last.ACK.Sent.
11559 if ((to->to_flags & TOF_TS) != 0 &&
11560 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11561 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11562 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11563 tp->ts_recent_age = tcp_ts_getticks();
11564 tp->ts_recent = to->to_tsval;
11567 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11568 * is on (half-synchronized state), then queue data for later
11569 * processing; else drop segment and return.
11571 if ((thflags & TH_ACK) == 0) {
11572 if (tp->t_flags & TF_NEEDSYN) {
11573 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11574 tiwin, thflags, nxt_pkt));
11575 } else if (tp->t_flags & TF_ACKNOW) {
11576 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11577 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11580 ctf_do_drop(m, NULL);
11587 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11590 if (ourfinisacked) {
11595 if (sbavail(&so->so_snd)) {
11596 if (ctf_progress_timeout_check(tp, true)) {
11597 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11598 tp, tick, PROGRESS_DROP, __LINE__);
11599 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11600 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11604 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11605 tiwin, thflags, nxt_pkt));
11609 * Return value of 1, the TCB is unlocked and most
11610 * likely gone, return value of 0, the TCP is still
11614 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
11615 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11616 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11618 int32_t ret_val = 0;
11619 int32_t ourfinisacked = 0;
11620 struct tcp_rack *rack;
11622 rack = (struct tcp_rack *)tp->t_fb_ptr;
11623 ctf_calc_rwin(so, tp);
11625 if ((thflags & TH_RST) ||
11626 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11627 return (ctf_process_rst(m, th, so, tp));
11629 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11630 * synchronized state.
11632 if (thflags & TH_SYN) {
11633 ctf_challenge_ack(m, th, tp, &ret_val);
11637 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11638 * it's less than ts_recent, drop it.
11640 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11641 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11642 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11645 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11646 &rack->r_ctl.challenge_ack_ts,
11647 &rack->r_ctl.challenge_ack_cnt)) {
11651 * If new data are received on a connection after the user processes
11652 * are gone, then RST the other end.
11654 if ((so->so_state & SS_NOFDREF) && tlen) {
11655 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11659 * If last ACK falls within this segment's sequence numbers, record
11660 * its timestamp. NOTE: 1) That the test incorporates suggestions
11661 * from the latest proposal of the tcplw@cray.com list (Braden
11662 * 1993/04/26). 2) That updating only on newer timestamps interferes
11663 * with our earlier PAWS tests, so this check should be solely
11664 * predicated on the sequence space of this segment. 3) That we
11665 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11666 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11667 * SEG.Len, This modified check allows us to overcome RFC1323's
11668 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11669 * p.869. In such cases, we can still calculate the RTT correctly
11670 * when RCV.NXT == Last.ACK.Sent.
11672 if ((to->to_flags & TOF_TS) != 0 &&
11673 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11674 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11675 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11676 tp->ts_recent_age = tcp_ts_getticks();
11677 tp->ts_recent = to->to_tsval;
11680 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11681 * is on (half-synchronized state), then queue data for later
11682 * processing; else drop segment and return.
11684 if ((thflags & TH_ACK) == 0) {
11685 if (tp->t_flags & TF_NEEDSYN) {
11686 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11687 tiwin, thflags, nxt_pkt));
11688 } else if (tp->t_flags & TF_ACKNOW) {
11689 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11690 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11693 ctf_do_drop(m, NULL);
11698 * case TCPS_LAST_ACK: Ack processing.
11700 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11703 if (ourfinisacked) {
11704 tp = tcp_close(tp);
11705 ctf_do_drop(m, tp);
11708 if (sbavail(&so->so_snd)) {
11709 if (ctf_progress_timeout_check(tp, true)) {
11710 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11711 tp, tick, PROGRESS_DROP, __LINE__);
11712 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11713 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11717 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11718 tiwin, thflags, nxt_pkt));
11722 * Return value of 1, the TCB is unlocked and most
11723 * likely gone, return value of 0, the TCP is still
11727 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
11728 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
11729 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
11731 int32_t ret_val = 0;
11732 int32_t ourfinisacked = 0;
11733 struct tcp_rack *rack;
11735 rack = (struct tcp_rack *)tp->t_fb_ptr;
11736 ctf_calc_rwin(so, tp);
11738 /* Reset receive buffer auto scaling when not in bulk receive mode. */
11739 if ((thflags & TH_RST) ||
11740 (tp->t_fin_is_rst && (thflags & TH_FIN)))
11741 return (ctf_process_rst(m, th, so, tp));
11743 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
11744 * synchronized state.
11746 if (thflags & TH_SYN) {
11747 ctf_challenge_ack(m, th, tp, &ret_val);
11751 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
11752 * it's less than ts_recent, drop it.
11754 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
11755 TSTMP_LT(to->to_tsval, tp->ts_recent)) {
11756 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
11759 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
11760 &rack->r_ctl.challenge_ack_ts,
11761 &rack->r_ctl.challenge_ack_cnt)) {
11765 * If new data are received on a connection after the user processes
11766 * are gone, then RST the other end.
11768 if ((so->so_state & SS_NOFDREF) &&
11770 if (rack_check_data_after_close(m, tp, &tlen, th, so))
11774 * If last ACK falls within this segment's sequence numbers, record
11775 * its timestamp. NOTE: 1) That the test incorporates suggestions
11776 * from the latest proposal of the tcplw@cray.com list (Braden
11777 * 1993/04/26). 2) That updating only on newer timestamps interferes
11778 * with our earlier PAWS tests, so this check should be solely
11779 * predicated on the sequence space of this segment. 3) That we
11780 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
11781 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
11782 * SEG.Len, This modified check allows us to overcome RFC1323's
11783 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
11784 * p.869. In such cases, we can still calculate the RTT correctly
11785 * when RCV.NXT == Last.ACK.Sent.
11787 if ((to->to_flags & TOF_TS) != 0 &&
11788 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
11789 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
11790 ((thflags & (TH_SYN | TH_FIN)) != 0))) {
11791 tp->ts_recent_age = tcp_ts_getticks();
11792 tp->ts_recent = to->to_tsval;
11795 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag
11796 * is on (half-synchronized state), then queue data for later
11797 * processing; else drop segment and return.
11799 if ((thflags & TH_ACK) == 0) {
11800 if (tp->t_flags & TF_NEEDSYN) {
11801 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11802 tiwin, thflags, nxt_pkt));
11803 } else if (tp->t_flags & TF_ACKNOW) {
11804 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
11805 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
11808 ctf_do_drop(m, NULL);
11815 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
11818 if (sbavail(&so->so_snd)) {
11819 if (ctf_progress_timeout_check(tp, true)) {
11820 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
11821 tp, tick, PROGRESS_DROP, __LINE__);
11822 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
11823 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
11827 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
11828 tiwin, thflags, nxt_pkt));
11832 rack_clear_rate_sample(struct tcp_rack *rack)
11834 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
11835 rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
11836 rack->r_ctl.rack_rs.rs_rtt_tot = 0;
11840 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
11842 uint64_t bw_est, rate_wanted;
11844 uint32_t user_max, orig_min, orig_max;
11846 orig_min = rack->r_ctl.rc_pace_min_segs;
11847 orig_max = rack->r_ctl.rc_pace_max_segs;
11848 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
11849 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
11851 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
11852 if (rack->use_fixed_rate || rack->rc_force_max_seg) {
11853 if (user_max != rack->r_ctl.rc_pace_max_segs)
11856 if (rack->rc_force_max_seg) {
11857 rack->r_ctl.rc_pace_max_segs = user_max;
11858 } else if (rack->use_fixed_rate) {
11859 bw_est = rack_get_bw(rack);
11860 if ((rack->r_ctl.crte == NULL) ||
11861 (bw_est != rack->r_ctl.crte->rate)) {
11862 rack->r_ctl.rc_pace_max_segs = user_max;
11864 /* We are pacing right at the hardware rate */
11867 segsiz = min(ctf_fixed_maxseg(tp),
11868 rack->r_ctl.rc_pace_min_segs);
11869 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(
11870 tp, bw_est, segsiz, 0,
11871 rack->r_ctl.crte, NULL);
11873 } else if (rack->rc_always_pace) {
11874 if (rack->r_ctl.gp_bw ||
11875 #ifdef NETFLIX_PEAKRATE
11876 rack->rc_tp->t_maxpeakrate ||
11878 rack->r_ctl.init_rate) {
11879 /* We have a rate of some sort set */
11882 bw_est = rack_get_bw(rack);
11883 orig = rack->r_ctl.rc_pace_max_segs;
11885 rate_wanted = *fill_override;
11887 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL);
11889 /* We have something */
11890 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
11892 ctf_fixed_maxseg(rack->rc_tp));
11894 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
11895 if (orig != rack->r_ctl.rc_pace_max_segs)
11897 } else if ((rack->r_ctl.gp_bw == 0) &&
11898 (rack->r_ctl.rc_pace_max_segs == 0)) {
11900 * If we have nothing limit us to bursting
11901 * out IW sized pieces.
11904 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
11907 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
11909 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
11912 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
11917 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack)
11920 struct ip6_hdr *ip6 = NULL;
11923 struct ip *ip = NULL;
11925 struct udphdr *udp = NULL;
11927 /* Ok lets fill in the fast block, it can only be used with no IP options! */
11929 if (rack->r_is_v6) {
11930 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
11931 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
11933 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
11934 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
11935 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
11936 udp->uh_dport = tp->t_port;
11937 rack->r_ctl.fsb.udp = udp;
11938 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
11941 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
11942 rack->r_ctl.fsb.udp = NULL;
11944 tcpip_fillheaders(rack->rc_inp,
11946 ip6, rack->r_ctl.fsb.th);
11950 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
11951 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
11953 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
11954 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
11955 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
11956 udp->uh_dport = tp->t_port;
11957 rack->r_ctl.fsb.udp = udp;
11958 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
11961 rack->r_ctl.fsb.udp = NULL;
11962 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
11964 tcpip_fillheaders(rack->rc_inp,
11966 ip, rack->r_ctl.fsb.th);
11968 rack->r_fsb_inited = 1;
11972 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
11975 * Allocate the larger of spaces V6 if available else just
11976 * V4 and include udphdr (overbook)
11979 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
11981 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
11983 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
11984 M_TCPFSB, M_NOWAIT|M_ZERO);
11985 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
11988 rack->r_fsb_inited = 0;
11993 rack_init(struct tcpcb *tp)
11995 struct tcp_rack *rack = NULL;
11996 struct rack_sendmap *insret;
11997 uint32_t iwin, snt, us_cts;
12000 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
12001 if (tp->t_fb_ptr == NULL) {
12003 * We need to allocate memory but cant. The INP and INP_INFO
12004 * locks and they are recusive (happens during setup. So a
12005 * scheme to drop the locks fails :(
12010 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
12012 rack = (struct tcp_rack *)tp->t_fb_ptr;
12013 RB_INIT(&rack->r_ctl.rc_mtree);
12014 TAILQ_INIT(&rack->r_ctl.rc_free);
12015 TAILQ_INIT(&rack->r_ctl.rc_tmap);
12017 rack->rc_inp = tp->t_inpcb;
12019 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
12020 /* Probably not needed but lets be sure */
12021 rack_clear_rate_sample(rack);
12023 * Save off the default values, socket options will poke
12024 * at these if pacing is not on or we have not yet
12025 * reached where pacing is on (gp_ready/fixed enabled).
12026 * When they get set into the CC module (when gp_ready
12027 * is enabled or we enable fixed) then we will set these
12028 * values into the CC and place in here the old values
12029 * so we have a restoral. Then we will set the flag
12030 * rc_pacing_cc_set. That way whenever we turn off pacing
12031 * or switch off this stack, we will know to go restore
12032 * the saved values.
12034 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
12035 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
12036 /* We want abe like behavior as well */
12037 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN;
12038 rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
12039 rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
12040 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
12042 rack->use_rack_rr = 1;
12043 if (V_tcp_delack_enabled)
12044 tp->t_delayed_ack = 1;
12046 tp->t_delayed_ack = 0;
12047 #ifdef TCP_ACCOUNTING
12048 if (rack_tcp_accounting) {
12049 tp->t_flags2 |= TF2_TCP_ACCOUNTING;
12052 if (rack_enable_shared_cwnd)
12053 rack->rack_enable_scwnd = 1;
12054 rack->rc_user_set_max_segs = rack_hptsi_segments;
12055 rack->rc_force_max_seg = 0;
12056 if (rack_use_imac_dack)
12057 rack->rc_dack_mode = 1;
12058 TAILQ_INIT(&rack->r_ctl.opt_list);
12059 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
12060 rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
12061 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
12062 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
12063 rack->r_ctl.rc_highest_us_rtt = 0;
12064 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
12065 if (rack_use_cmp_acks)
12066 rack->r_use_cmp_ack = 1;
12067 if (rack_disable_prr)
12068 rack->rack_no_prr = 1;
12069 if (rack_gp_no_rec_chg)
12070 rack->rc_gp_no_rec_chg = 1;
12071 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
12072 rack->rc_always_pace = 1;
12073 if (rack->use_fixed_rate || rack->gp_ready)
12074 rack_set_cc_pacing(rack);
12076 rack->rc_always_pace = 0;
12077 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
12078 rack->r_mbuf_queue = 1;
12080 rack->r_mbuf_queue = 0;
12081 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
12082 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
12084 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12085 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12086 if (rack_limits_scwnd)
12087 rack->r_limit_scw = 1;
12089 rack->r_limit_scw = 0;
12090 rack->rc_labc = V_tcp_abc_l_var;
12091 rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
12092 rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
12093 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
12094 rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
12095 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
12096 rack->r_ctl.rc_min_to = rack_min_to;
12097 microuptime(&rack->r_ctl.act_rcv_time);
12098 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
12099 rack->r_running_late = 0;
12100 rack->r_running_early = 0;
12101 rack->rc_init_win = rack_default_init_window;
12102 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
12103 if (rack_hw_up_only)
12104 rack->r_up_only = 1;
12105 if (rack_do_dyn_mul) {
12106 /* When dynamic adjustment is on CA needs to start at 100% */
12107 rack->rc_gp_dyn_mul = 1;
12108 if (rack_do_dyn_mul >= 100)
12109 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
12111 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
12112 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
12113 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
12114 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
12115 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
12116 rack_probertt_filter_life);
12117 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
12118 rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
12119 rack->r_ctl.rc_time_of_last_probertt = us_cts;
12120 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks();
12121 rack->r_ctl.rc_time_probertt_starts = 0;
12122 /* We require at least one measurement, even if the sysctl is 0 */
12123 if (rack_req_measurements)
12124 rack->r_ctl.req_measurements = rack_req_measurements;
12126 rack->r_ctl.req_measurements = 1;
12127 if (rack_enable_hw_pacing)
12128 rack->rack_hdw_pace_ena = 1;
12129 if (rack_hw_rate_caps)
12130 rack->r_rack_hw_rate_caps = 1;
12131 /* Do we force on detection? */
12132 #ifdef NETFLIX_EXP_DETECTION
12133 if (tcp_force_detection)
12134 rack->do_detection = 1;
12137 rack->do_detection = 0;
12138 if (rack_non_rxt_use_cr)
12139 rack->rack_rec_nonrxt_use_cr = 1;
12140 err = rack_init_fsb(tp, rack);
12142 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12143 tp->t_fb_ptr = NULL;
12146 if (tp->snd_una != tp->snd_max) {
12147 /* Create a send map for the current outstanding data */
12148 struct rack_sendmap *rsm;
12150 rsm = rack_alloc(rack);
12152 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12153 tp->t_fb_ptr = NULL;
12156 rsm->r_no_rtt_allowed = 1;
12157 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
12158 rsm->r_rtr_cnt = 1;
12159 rsm->r_rtr_bytes = 0;
12160 if (tp->t_flags & TF_SENTFIN) {
12161 rsm->r_end = tp->snd_max - 1;
12162 rsm->r_flags |= RACK_HAS_FIN;
12164 rsm->r_end = tp->snd_max;
12166 if (tp->snd_una == tp->iss) {
12167 /* The data space is one beyond snd_una */
12168 rsm->r_flags |= RACK_HAS_SYN;
12169 rsm->r_start = tp->iss;
12170 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una);
12172 rsm->r_start = tp->snd_una;
12174 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
12175 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
12176 rsm->orig_m_len = rsm->m->m_len;
12179 * This can happen if we have a stand-alone FIN or
12183 rsm->orig_m_len = 0;
12186 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12188 if (insret != NULL) {
12189 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p",
12190 insret, rack, rsm);
12193 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
12194 rsm->r_in_tmap = 1;
12197 * Timers in Rack are kept in microseconds so lets
12198 * convert any initial incoming variables
12199 * from ticks into usecs. Note that we
12200 * also change the values of t_srtt and t_rttvar, if
12201 * they are non-zero. They are kept with a 5
12202 * bit decimal so we have to carefully convert
12203 * these to get the full precision.
12205 rack_convert_rtts(tp);
12206 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow);
12207 if (rack_def_profile)
12208 rack_set_profile(rack, rack_def_profile);
12209 /* Cancel the GP measurement in progress */
12210 tp->t_flags &= ~TF_GPUTINPROG;
12211 if (SEQ_GT(tp->snd_max, tp->iss))
12212 snt = tp->snd_max - tp->iss;
12215 iwin = rc_init_window(rack);
12217 /* We are not past the initial window
12218 * so we need to make sure cwnd is
12221 if (tp->snd_cwnd < iwin)
12222 tp->snd_cwnd = iwin;
12224 * If we are within the initial window
12225 * we want ssthresh to be unlimited. Setting
12226 * it to the rwnd (which the default stack does
12227 * and older racks) is not really a good idea
12228 * since we want to be in SS and grow both the
12229 * cwnd and the rwnd (via dynamic rwnd growth). If
12230 * we set it to the rwnd then as the peer grows its
12231 * rwnd we will be stuck in CA and never hit SS.
12233 * Its far better to raise it up high (this takes the
12234 * risk that there as been a loss already, probably
12235 * we should have an indicator in all stacks of loss
12236 * but we don't), but considering the normal use this
12237 * is a risk worth taking. The consequences of not
12238 * hitting SS are far worse than going one more time
12239 * into it early on (before we have sent even a IW).
12240 * It is highly unlikely that we will have had a loss
12241 * before getting the IW out.
12243 tp->snd_ssthresh = 0xffffffff;
12245 rack_stop_all_timers(tp);
12246 /* Lets setup the fsb block */
12247 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12248 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur,
12249 __LINE__, RACK_RTTS_INIT);
12254 rack_handoff_ok(struct tcpcb *tp)
12256 if ((tp->t_state == TCPS_CLOSED) ||
12257 (tp->t_state == TCPS_LISTEN)) {
12258 /* Sure no problem though it may not stick */
12261 if ((tp->t_state == TCPS_SYN_SENT) ||
12262 (tp->t_state == TCPS_SYN_RECEIVED)) {
12264 * We really don't know if you support sack,
12265 * you have to get to ESTAB or beyond to tell.
12269 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
12271 * Rack will only send a FIN after all data is acknowledged.
12272 * So in this case we have more data outstanding. We can't
12273 * switch stacks until either all data and only the FIN
12274 * is left (in which case rack_init() now knows how
12275 * to deal with that) <or> all is acknowledged and we
12276 * are only left with incoming data, though why you
12277 * would want to switch to rack after all data is acknowledged
12278 * I have no idea (rrs)!
12282 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
12286 * If we reach here we don't do SACK on this connection so we can
12294 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
12298 if (tp->t_fb_ptr) {
12299 struct tcp_rack *rack;
12300 struct rack_sendmap *rsm, *nrsm, *rm;
12302 rack = (struct tcp_rack *)tp->t_fb_ptr;
12303 if (tp->t_in_pkt) {
12305 * Since we are switching we need to process any
12306 * inbound packets in case a compressed ack is
12307 * in queue or the new stack does not support
12308 * mbuf queuing. These packets in theory should
12309 * have been handled by the old stack anyway.
12311 if ((rack->rc_inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) ||
12312 (rack->rc_inp->inp_flags2 & INP_FREED)) {
12313 /* Kill all the packets */
12314 struct mbuf *save, *m;
12317 tp->t_in_pkt = NULL;
12318 tp->t_tail_pkt = NULL;
12320 save = m->m_nextpkt;
12321 m->m_nextpkt = NULL;
12326 /* Process all the packets */
12327 ctf_do_queued_segments(rack->rc_inp->inp_socket, rack->rc_tp, 0);
12329 if ((tp->t_inpcb) &&
12330 (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP))
12333 /* Total if we used large or small (if ack-cmp was used). */
12334 if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS)
12335 counter_u64_add(rack_large_ackcmp, 1);
12337 counter_u64_add(rack_small_ackcmp, 1);
12340 tp->t_flags &= ~TF_FORCEDATA;
12341 #ifdef NETFLIX_SHARED_CWND
12342 if (rack->r_ctl.rc_scw) {
12345 if (rack->r_limit_scw)
12346 limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
12349 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
12350 rack->r_ctl.rc_scw_index,
12352 rack->r_ctl.rc_scw = NULL;
12355 if (rack->r_ctl.fsb.tcp_ip_hdr) {
12356 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
12357 rack->r_ctl.fsb.tcp_ip_hdr = NULL;
12358 rack->r_ctl.fsb.th = NULL;
12360 /* Convert back to ticks, with */
12361 if (tp->t_srtt > 1) {
12362 uint32_t val, frac;
12364 val = USEC_2_TICKS(tp->t_srtt);
12365 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12366 tp->t_srtt = val << TCP_RTT_SHIFT;
12368 * frac is the fractional part here is left
12369 * over from converting to hz and shifting.
12370 * We need to convert this to the 5 bit
12375 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12377 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12379 tp->t_srtt += frac;
12382 if (tp->t_rttvar) {
12383 uint32_t val, frac;
12385 val = USEC_2_TICKS(tp->t_rttvar);
12386 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
12387 tp->t_rttvar = val << TCP_RTTVAR_SHIFT;
12389 * frac is the fractional part here is left
12390 * over from converting to hz and shifting.
12391 * We need to convert this to the 5 bit
12396 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
12398 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
12400 tp->t_rttvar += frac;
12403 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur);
12404 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow);
12405 if (rack->rc_always_pace) {
12406 tcp_decrement_paced_conn();
12407 rack_undo_cc_pacing(rack);
12408 rack->rc_always_pace = 0;
12410 /* Clean up any options if they were not applied */
12411 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
12412 struct deferred_opt_list *dol;
12414 dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
12415 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
12416 free(dol, M_TCPDO);
12418 /* rack does not use force data but other stacks may clear it */
12419 if (rack->r_ctl.crte != NULL) {
12420 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
12421 rack->rack_hdrw_pacing = 0;
12422 rack->r_ctl.crte = NULL;
12424 #ifdef TCP_BLACKBOX
12425 tcp_log_flowend(tp);
12427 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) {
12428 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
12431 panic("At fini, rack:%p rsm:%p rm:%p",
12435 uma_zfree(rack_zone, rsm);
12437 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12439 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
12440 uma_zfree(rack_zone, rsm);
12441 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
12443 rack->rc_free_cnt = 0;
12444 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
12445 tp->t_fb_ptr = NULL;
12448 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
12449 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
12450 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
12451 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP;
12452 /* Cancel the GP measurement in progress */
12453 tp->t_flags &= ~TF_GPUTINPROG;
12454 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS;
12456 /* Make sure snd_nxt is correctly set */
12457 tp->snd_nxt = tp->snd_max;
12461 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
12463 switch (tp->t_state) {
12464 case TCPS_SYN_SENT:
12465 rack->r_state = TCPS_SYN_SENT;
12466 rack->r_substate = rack_do_syn_sent;
12468 case TCPS_SYN_RECEIVED:
12469 rack->r_state = TCPS_SYN_RECEIVED;
12470 rack->r_substate = rack_do_syn_recv;
12472 case TCPS_ESTABLISHED:
12473 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12474 rack->r_state = TCPS_ESTABLISHED;
12475 rack->r_substate = rack_do_established;
12477 case TCPS_CLOSE_WAIT:
12478 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12479 rack->r_state = TCPS_CLOSE_WAIT;
12480 rack->r_substate = rack_do_close_wait;
12482 case TCPS_FIN_WAIT_1:
12483 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12484 rack->r_state = TCPS_FIN_WAIT_1;
12485 rack->r_substate = rack_do_fin_wait_1;
12488 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12489 rack->r_state = TCPS_CLOSING;
12490 rack->r_substate = rack_do_closing;
12492 case TCPS_LAST_ACK:
12493 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12494 rack->r_state = TCPS_LAST_ACK;
12495 rack->r_substate = rack_do_lastack;
12497 case TCPS_FIN_WAIT_2:
12498 rack_set_pace_segments(tp, rack, __LINE__, NULL);
12499 rack->r_state = TCPS_FIN_WAIT_2;
12500 rack->r_substate = rack_do_fin_wait_2;
12504 case TCPS_TIME_WAIT:
12508 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
12509 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
12514 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
12517 * We received an ack, and then did not
12518 * call send or were bounced out due to the
12519 * hpts was running. Now a timer is up as well, is
12520 * it the right timer?
12522 struct rack_sendmap *rsm;
12525 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
12526 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
12528 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
12529 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
12530 (tmr_up == PACE_TMR_RXT)) {
12531 /* Should be an RXT */
12535 /* Nothing outstanding? */
12536 if (tp->t_flags & TF_DELACK) {
12537 if (tmr_up == PACE_TMR_DELACK)
12538 /* We are supposed to have delayed ack up and we do */
12540 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
12542 * if we hit enobufs then we would expect the possiblity
12543 * of nothing outstanding and the RXT up (and the hptsi timer).
12546 } else if (((V_tcp_always_keepalive ||
12547 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
12548 (tp->t_state <= TCPS_CLOSING)) &&
12549 (tmr_up == PACE_TMR_KEEP) &&
12550 (tp->snd_max == tp->snd_una)) {
12551 /* We should have keep alive up and we do */
12555 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
12556 ((tmr_up == PACE_TMR_TLP) ||
12557 (tmr_up == PACE_TMR_RACK) ||
12558 (tmr_up == PACE_TMR_RXT))) {
12560 * Either a Rack, TLP or RXT is fine if we
12561 * have outstanding data.
12564 } else if (tmr_up == PACE_TMR_DELACK) {
12566 * If the delayed ack was going to go off
12567 * before the rtx/tlp/rack timer were going to
12568 * expire, then that would be the timer in control.
12569 * Note we don't check the time here trusting the
12575 * Ok the timer originally started is not what we want now.
12576 * We will force the hpts to be stopped if any, and restart
12577 * with the slot set to what was in the saved slot.
12579 if (rack->rc_inp->inp_in_hpts) {
12580 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
12583 us_cts = tcp_get_usecs(NULL);
12584 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
12586 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
12588 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
12590 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
12592 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
12593 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
12598 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq)
12600 tp->snd_wnd = tiwin;
12601 rack_validate_fo_sendwin_up(tp, rack);
12604 if (tp->snd_wnd > tp->max_sndwnd)
12605 tp->max_sndwnd = tp->snd_wnd;
12606 if (tp->snd_wnd < (tp->snd_max - high_seq)) {
12607 /* The peer collapsed the window */
12608 rack_collapsed_window(rack);
12609 } else if (rack->rc_has_collapsed)
12610 rack_un_collapse_window(rack);
12611 /* Do we exit persists? */
12612 if ((rack->rc_in_persist != 0) &&
12613 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
12614 rack->r_ctl.rc_pace_min_segs))) {
12615 rack_exit_persist(tp, rack, cts);
12617 /* Do we enter persists? */
12618 if ((rack->rc_in_persist == 0) &&
12619 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
12620 TCPS_HAVEESTABLISHED(tp->t_state) &&
12621 (tp->snd_max == tp->snd_una) &&
12622 sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
12623 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
12625 * Here the rwnd is less than
12626 * the pacing size, we are established,
12627 * nothing is outstanding, and there is
12628 * data to send. Enter persists.
12630 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
12635 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
12638 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
12639 union tcp_log_stackspecific log;
12640 struct timeval ltv;
12641 char tcp_hdr_buf[60];
12643 struct timespec ts;
12644 uint32_t orig_snd_una;
12647 #ifdef NETFLIX_HTTP_LOGGING
12648 struct http_sendfile_track *http_req;
12650 if (SEQ_GT(ae->ack, tp->snd_una)) {
12651 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1));
12653 http_req = tcp_http_find_req_for_seq(tp, ae->ack);
12656 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
12657 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
12658 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
12659 if (rack->rack_no_prr == 0)
12660 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
12662 log.u_bbr.flex1 = 0;
12663 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
12664 log.u_bbr.use_lt_bw <<= 1;
12665 log.u_bbr.use_lt_bw |= rack->r_might_revert;
12666 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
12667 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
12668 log.u_bbr.pkts_out = tp->t_maxseg;
12669 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
12670 log.u_bbr.flex7 = 1;
12671 log.u_bbr.lost = ae->flags;
12672 log.u_bbr.cwnd_gain = ackval;
12673 log.u_bbr.pacing_gain = 0x2;
12674 if (ae->flags & TSTMP_HDWR) {
12675 /* Record the hardware timestamp if present */
12676 log.u_bbr.flex3 = M_TSTMP;
12677 ts.tv_sec = ae->timestamp / 1000000000;
12678 ts.tv_nsec = ae->timestamp % 1000000000;
12679 ltv.tv_sec = ts.tv_sec;
12680 ltv.tv_usec = ts.tv_nsec / 1000;
12681 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
12682 } else if (ae->flags & TSTMP_LRO) {
12683 /* Record the LRO the arrival timestamp */
12684 log.u_bbr.flex3 = M_TSTMP_LRO;
12685 ts.tv_sec = ae->timestamp / 1000000000;
12686 ts.tv_nsec = ae->timestamp % 1000000000;
12687 ltv.tv_sec = ts.tv_sec;
12688 ltv.tv_usec = ts.tv_nsec / 1000;
12689 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
12691 log.u_bbr.timeStamp = tcp_get_usecs(<v);
12692 /* Log the rcv time */
12693 log.u_bbr.delRate = ae->timestamp;
12694 #ifdef NETFLIX_HTTP_LOGGING
12695 log.u_bbr.applimited = tp->t_http_closed;
12696 log.u_bbr.applimited <<= 8;
12697 log.u_bbr.applimited |= tp->t_http_open;
12698 log.u_bbr.applimited <<= 8;
12699 log.u_bbr.applimited |= tp->t_http_req;
12701 /* Copy out any client req info */
12703 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
12705 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
12706 log.u_bbr.rttProp = http_req->timestamp;
12707 log.u_bbr.cur_del_rate = http_req->start;
12708 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
12709 log.u_bbr.flex8 |= 1;
12711 log.u_bbr.flex8 |= 2;
12712 log.u_bbr.bw_inuse = http_req->end;
12714 log.u_bbr.flex6 = http_req->start_seq;
12715 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
12716 log.u_bbr.flex8 |= 4;
12717 log.u_bbr.epoch = http_req->end_seq;
12721 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
12722 th = (struct tcphdr *)tcp_hdr_buf;
12723 th->th_seq = ae->seq;
12724 th->th_ack = ae->ack;
12725 th->th_win = ae->win;
12726 /* Now fill in the ports */
12727 th->th_sport = tp->t_inpcb->inp_fport;
12728 th->th_dport = tp->t_inpcb->inp_lport;
12729 th->th_flags = ae->flags & 0xff;
12730 /* Now do we have a timestamp option? */
12731 if (ae->flags & HAS_TSTMP) {
12735 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
12736 cp = (u_char *)(th + 1);
12741 *cp = TCPOPT_TIMESTAMP;
12743 *cp = TCPOLEN_TIMESTAMP;
12745 val = htonl(ae->ts_value);
12746 bcopy((char *)&val,
12747 (char *)cp, sizeof(uint32_t));
12748 val = htonl(ae->ts_echo);
12749 bcopy((char *)&val,
12750 (char *)(cp + 4), sizeof(uint32_t));
12752 th->th_off = (sizeof(struct tcphdr) >> 2);
12755 * For sane logging we need to play a little trick.
12756 * If the ack were fully processed we would have moved
12757 * snd_una to high_seq, but since compressed acks are
12758 * processed in two phases, at this point (logging) snd_una
12759 * won't be advanced. So we would see multiple acks showing
12760 * the advancement. We can prevent that by "pretending" that
12761 * snd_una was advanced and then un-advancing it so that the
12762 * logging code has the right value for tlb_snd_una.
12764 if (tp->snd_una != high_seq) {
12765 orig_snd_una = tp->snd_una;
12766 tp->snd_una = high_seq;
12770 TCP_LOG_EVENTP(tp, th,
12771 &tp->t_inpcb->inp_socket->so_rcv,
12772 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0,
12773 0, &log, true, <v);
12775 tp->snd_una = orig_snd_una;
12782 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
12785 * Handle a "special" compressed ack mbuf. Each incoming
12786 * ack has only four possible dispositions:
12788 * A) It moves the cum-ack forward
12789 * B) It is behind the cum-ack.
12790 * C) It is a window-update ack.
12791 * D) It is a dup-ack.
12793 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
12794 * in the incoming mbuf. We also need to still pay attention
12795 * to nxt_pkt since there may be another packet after this
12798 #ifdef TCP_ACCOUNTING
12803 struct timespec ts;
12804 struct tcp_rack *rack;
12805 struct tcp_ackent *ae;
12806 uint32_t tiwin, us_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
12807 int cnt, i, did_out, ourfinisacked = 0;
12808 int win_up_req = 0;
12809 struct tcpopt to_holder, *to = NULL;
12811 int under_pacing = 1;
12814 #ifdef TCP_ACCOUNTING
12817 rack = (struct tcp_rack *)tp->t_fb_ptr;
12818 if (rack->gp_ready &&
12819 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
12824 if (rack->r_state != tp->t_state)
12825 rack_set_state(tp, rack);
12828 KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
12829 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
12830 cnt = m->m_len / sizeof(struct tcp_ackent);
12832 if (idx >= MAX_NUM_OF_CNTS)
12833 idx = MAX_NUM_OF_CNTS - 1;
12834 counter_u64_add(rack_proc_comp_ack[idx], 1);
12835 counter_u64_add(rack_multi_single_eq, cnt);
12836 high_seq = tp->snd_una;
12837 the_win = tp->snd_wnd;
12838 win_seq = tp->snd_wl1;
12839 win_upd_ack = tp->snd_wl2;
12840 cts = us_cts = tcp_tv_to_usectick(tv);
12841 segsiz = ctf_fixed_maxseg(tp);
12842 if ((rack->rc_gp_dyn_mul) &&
12843 (rack->use_fixed_rate == 0) &&
12844 (rack->rc_always_pace)) {
12845 /* Check in on probertt */
12846 rack_check_probe_rtt(rack, us_cts);
12848 for (i = 0; i < cnt; i++) {
12849 #ifdef TCP_ACCOUNTING
12850 ts_val = get_cyclecount();
12852 rack_clear_rate_sample(rack);
12853 ae = ((mtod(m, struct tcp_ackent *)) + i);
12854 /* Setup the window */
12855 tiwin = ae->win << tp->snd_scale;
12856 /* figure out the type of ack */
12857 if (SEQ_LT(ae->ack, high_seq)) {
12859 ae->ack_val_set = ACK_BEHIND;
12860 } else if (SEQ_GT(ae->ack, high_seq)) {
12862 ae->ack_val_set = ACK_CUMACK;
12863 } else if (tiwin == the_win) {
12865 ae->ack_val_set = ACK_DUPACK;
12868 ae->ack_val_set = ACK_RWND;
12870 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
12871 /* Validate timestamp */
12872 if (ae->flags & HAS_TSTMP) {
12873 /* Setup for a timestamp */
12874 to->to_flags = TOF_TS;
12875 ae->ts_echo -= tp->ts_offset;
12876 to->to_tsecr = ae->ts_echo;
12877 to->to_tsval = ae->ts_value;
12879 * If echoed timestamp is later than the current time, fall back to
12880 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
12881 * were used when this connection was established.
12883 if (TSTMP_GT(ae->ts_echo, cts))
12885 if (tp->ts_recent &&
12886 TSTMP_LT(ae->ts_value, tp->ts_recent)) {
12887 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
12888 #ifdef TCP_ACCOUNTING
12889 rdstc = get_cyclecount();
12890 if (rdstc > ts_val) {
12891 counter_u64_add(tcp_proc_time[ae->ack_val_set] ,
12893 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
12894 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
12901 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
12902 SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
12903 tp->ts_recent_age = tcp_ts_getticks();
12904 tp->ts_recent = ae->ts_value;
12907 /* Setup for a no options */
12910 /* Update the rcv time and perform idle reduction possibly */
12911 if (tp->t_idle_reduce &&
12912 (tp->snd_max == tp->snd_una) &&
12913 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
12914 counter_u64_add(rack_input_idle_reduces, 1);
12915 rack_cc_after_idle(rack, tp);
12917 tp->t_rcvtime = ticks;
12918 /* Now what about ECN? */
12919 if (tp->t_flags2 & TF2_ECN_PERMIT) {
12920 if (ae->flags & TH_CWR) {
12921 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
12922 tp->t_flags |= TF_ACKNOW;
12924 switch (ae->codepoint & IPTOS_ECN_MASK) {
12926 tp->t_flags2 |= TF2_ECN_SND_ECE;
12927 KMOD_TCPSTAT_INC(tcps_ecn_ce);
12929 case IPTOS_ECN_ECT0:
12930 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
12932 case IPTOS_ECN_ECT1:
12933 KMOD_TCPSTAT_INC(tcps_ecn_ect1);
12937 /* Process a packet differently from RFC3168. */
12938 cc_ecnpkt_handler_flags(tp, ae->flags, ae->codepoint);
12939 /* Congestion experienced. */
12940 if (ae->flags & TH_ECE) {
12941 rack_cong_signal(tp, CC_ECN, ae->ack);
12944 #ifdef TCP_ACCOUNTING
12945 /* Count for the specific type of ack in */
12946 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1);
12947 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
12948 tp->tcp_cnt_counters[ae->ack_val_set]++;
12952 * Note how we could move up these in the determination
12953 * above, but we don't so that way the timestamp checks (and ECN)
12954 * is done first before we do any processing on the ACK.
12955 * The non-compressed path through the code has this
12956 * weakness (noted by @jtl) that it actually does some
12957 * processing before verifying the timestamp information.
12958 * We don't take that path here which is why we set
12959 * the ack_val_set first, do the timestamp and ecn
12960 * processing, and then look at what we have setup.
12962 if (ae->ack_val_set == ACK_BEHIND) {
12964 * Case B flag reordering, if window is not closed
12965 * or it could be a keep-alive or persists
12967 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
12968 counter_u64_add(rack_reorder_seen, 1);
12969 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
12971 } else if (ae->ack_val_set == ACK_DUPACK) {
12974 rack_strike_dupack(rack);
12975 } else if (ae->ack_val_set == ACK_RWND) {
12979 win_upd_ack = ae->ack;
12985 if (SEQ_GT(ae->ack, tp->snd_max)) {
12987 * We just send an ack since the incoming
12988 * ack is beyond the largest seq we sent.
12990 if ((tp->t_flags & TF_ACKNOW) == 0) {
12991 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt);
12992 if (tp->t_flags && TF_ACKNOW)
12993 rack->r_wanted_output = 1;
12997 /* If the window changed setup to update */
12998 if (tiwin != tp->snd_wnd) {
13000 win_upd_ack = ae->ack;
13004 #ifdef TCP_ACCOUNTING
13005 /* Account for the acks */
13006 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13007 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
13009 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN],
13010 (((ae->ack - high_seq) + segsiz - 1) / segsiz));
13012 high_seq = ae->ack;
13013 /* Setup our act_rcv_time */
13014 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
13015 ts.tv_sec = ae->timestamp / 1000000000;
13016 ts.tv_nsec = ae->timestamp % 1000000000;
13017 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13018 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13020 rack->r_ctl.act_rcv_time = *tv;
13022 rack_process_to_cumack(tp, rack, ae->ack, cts, to);
13025 /* And lets be sure to commit the rtt measurements for this ack */
13026 tcp_rack_xmit_timer_commit(rack, tp);
13027 #ifdef TCP_ACCOUNTING
13028 rdstc = get_cyclecount();
13029 if (rdstc > ts_val) {
13030 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val));
13031 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13032 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
13033 if (ae->ack_val_set == ACK_CUMACK)
13034 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
13039 #ifdef TCP_ACCOUNTING
13040 ts_val = get_cyclecount();
13042 acked_amount = acked = (high_seq - tp->snd_una);
13044 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq);
13047 if (rack->sack_attack_disable == 0)
13048 rack_do_decay(rack);
13049 if (acked >= segsiz) {
13051 * You only get credit for
13052 * MSS and greater (and you get extra
13053 * credit for larger cum-ack moves).
13057 ac = acked / segsiz;
13058 rack->r_ctl.ack_count += ac;
13059 counter_u64_add(rack_ack_total, ac);
13061 if (rack->r_ctl.ack_count > 0xfff00000) {
13063 * reduce the number to keep us under
13066 rack->r_ctl.ack_count /= 2;
13067 rack->r_ctl.sack_count /= 2;
13069 if (tp->t_flags & TF_NEEDSYN) {
13071 * T/TCP: Connection was half-synchronized, and our SYN has
13072 * been ACK'd (so connection is now fully synchronized). Go
13073 * to non-starred state, increment snd_una for ACK of SYN,
13074 * and check if we can do window scaling.
13076 tp->t_flags &= ~TF_NEEDSYN;
13078 acked_amount = acked = (high_seq - tp->snd_una);
13080 if (acked > sbavail(&so->so_snd))
13081 acked_amount = sbavail(&so->so_snd);
13082 #ifdef NETFLIX_EXP_DETECTION
13084 * We only care on a cum-ack move if we are in a sack-disabled
13085 * state. We have already added in to the ack_count, and we never
13086 * would disable on a cum-ack move, so we only care to do the
13087 * detection if it may "undo" it, i.e. we were in disabled already.
13089 if (rack->sack_attack_disable)
13090 rack_do_detection(tp, rack, acked_amount, segsiz);
13092 if (IN_FASTRECOVERY(tp->t_flags) &&
13093 (rack->rack_no_prr == 0))
13094 rack_update_prr(tp, rack, acked_amount, high_seq);
13095 if (IN_RECOVERY(tp->t_flags)) {
13096 if (SEQ_LT(high_seq, tp->snd_recover) &&
13097 (SEQ_LT(high_seq, tp->snd_max))) {
13098 tcp_rack_partialack(tp);
13100 rack_post_recovery(tp, high_seq);
13104 /* Handle the rack-log-ack part (sendmap) */
13105 if ((sbused(&so->so_snd) == 0) &&
13106 (acked > acked_amount) &&
13107 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13108 (tp->t_flags & TF_SENTFIN)) {
13110 * We must be sure our fin
13111 * was sent and acked (we can be
13112 * in FIN_WAIT_1 without having
13117 * Lets make sure snd_una is updated
13118 * since most likely acked_amount = 0 (it
13121 tp->snd_una = high_seq;
13123 /* Did we make a RTO error? */
13124 if ((tp->t_flags & TF_PREVVALID) &&
13125 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
13126 tp->t_flags &= ~TF_PREVVALID;
13127 if (tp->t_rxtshift == 1 &&
13128 (int)(ticks - tp->t_badrxtwin) < 0)
13129 rack_cong_signal(tp, CC_RTO_ERR, high_seq);
13131 /* Handle the data in the socket buffer */
13132 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
13133 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
13134 if (acked_amount > 0) {
13135 struct mbuf *mfree;
13137 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery);
13138 SOCKBUF_LOCK(&so->so_snd);
13139 mfree = sbcut_locked(&so->so_snd, acked);
13140 tp->snd_una = high_seq;
13141 /* Note we want to hold the sb lock through the sendmap adjust */
13142 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una);
13143 /* Wake up the socket if we have room to write more */
13144 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
13145 SOCKBUF_UNLOCK(&so->so_snd);
13146 tp->t_flags |= TF_WAKESOW;
13149 /* update progress */
13150 tp->t_acktime = ticks;
13151 rack_log_progress_event(rack, tp, tp->t_acktime,
13152 PROGRESS_UPDATE, __LINE__);
13153 /* Clear out shifts and such */
13154 tp->t_rxtshift = 0;
13155 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
13156 rack_rto_min, rack_rto_max);
13157 rack->rc_tlp_in_progress = 0;
13158 rack->r_ctl.rc_tlp_cnt_out = 0;
13159 /* Send recover and snd_nxt must be dragged along */
13160 if (SEQ_GT(tp->snd_una, tp->snd_recover))
13161 tp->snd_recover = tp->snd_una;
13162 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
13163 tp->snd_nxt = tp->snd_una;
13165 * If the RXT timer is running we want to
13166 * stop it, so we can restart a TLP (or new RXT).
13168 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
13169 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13170 #ifdef NETFLIX_HTTP_LOGGING
13171 tcp_http_check_for_comp(rack->rc_tp, high_seq);
13173 tp->snd_wl2 = high_seq;
13175 if (under_pacing &&
13176 (rack->use_fixed_rate == 0) &&
13177 (rack->in_probe_rtt == 0) &&
13178 rack->rc_gp_dyn_mul &&
13179 rack->rc_always_pace) {
13180 /* Check if we are dragging bottom */
13181 rack_check_bottom_drag(tp, rack, so, acked);
13183 if (tp->snd_una == tp->snd_max) {
13184 tp->t_flags &= ~TF_PREVVALID;
13185 rack->r_ctl.retran_during_recovery = 0;
13186 rack->r_ctl.dsack_byte_cnt = 0;
13187 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
13188 if (rack->r_ctl.rc_went_idle_time == 0)
13189 rack->r_ctl.rc_went_idle_time = 1;
13190 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
13191 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
13193 /* Set so we might enter persists... */
13194 rack->r_wanted_output = 1;
13195 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13196 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
13197 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
13198 (sbavail(&so->so_snd) == 0) &&
13199 (tp->t_flags2 & TF2_DROP_AF_DATA)) {
13201 * The socket was gone and the
13202 * peer sent data (not now in the past), time to
13205 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
13206 /* tcp_close will kill the inp pre-log the Reset */
13207 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
13208 #ifdef TCP_ACCOUNTING
13209 rdstc = get_cyclecount();
13210 if (rdstc > ts_val) {
13211 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13212 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13213 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13214 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13219 tp = tcp_close(tp);
13221 #ifdef TCP_ACCOUNTING
13227 * We would normally do drop-with-reset which would
13228 * send back a reset. We can't since we don't have
13229 * all the needed bits. Instead lets arrange for
13230 * a call to tcp_output(). That way since we
13231 * are in the closed state we will generate a reset.
13233 * Note if tcp_accounting is on we don't unpin since
13234 * we do that after the goto label.
13236 goto send_out_a_rst;
13238 if ((sbused(&so->so_snd) == 0) &&
13239 (tp->t_state >= TCPS_FIN_WAIT_1) &&
13240 (tp->t_flags & TF_SENTFIN)) {
13242 * If we can't receive any more data, then closing user can
13243 * proceed. Starting the timer is contrary to the
13244 * specification, but if we don't get a FIN we'll hang
13248 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13249 soisdisconnected(so);
13250 tcp_timer_activate(tp, TT_2MSL,
13251 (tcp_fast_finwait2_recycle ?
13252 tcp_finwait2_timeout :
13255 if (ourfinisacked == 0) {
13257 * We don't change to fin-wait-2 if we have our fin acked
13258 * which means we are probably in TCPS_CLOSING.
13260 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13264 /* Wake up the socket if we have room to write more */
13265 if (sbavail(&so->so_snd)) {
13266 rack->r_wanted_output = 1;
13267 if (ctf_progress_timeout_check(tp, true)) {
13268 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
13269 tp, tick, PROGRESS_DROP, __LINE__);
13270 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
13272 * We cheat here and don't send a RST, we should send one
13273 * when the pacer drops the connection.
13275 #ifdef TCP_ACCOUNTING
13276 rdstc = get_cyclecount();
13277 if (rdstc > ts_val) {
13278 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13279 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13280 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13281 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13286 INP_WUNLOCK(rack->rc_inp);
13291 if (ourfinisacked) {
13292 switch(tp->t_state) {
13294 #ifdef TCP_ACCOUNTING
13295 rdstc = get_cyclecount();
13296 if (rdstc > ts_val) {
13297 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13299 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13300 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13301 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13310 case TCPS_LAST_ACK:
13311 #ifdef TCP_ACCOUNTING
13312 rdstc = get_cyclecount();
13313 if (rdstc > ts_val) {
13314 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13316 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13317 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13318 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13323 tp = tcp_close(tp);
13324 ctf_do_drop(m, tp);
13327 case TCPS_FIN_WAIT_1:
13328 #ifdef TCP_ACCOUNTING
13329 rdstc = get_cyclecount();
13330 if (rdstc > ts_val) {
13331 counter_u64_add(tcp_proc_time[ACK_CUMACK] ,
13333 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13334 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13335 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13339 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
13340 soisdisconnected(so);
13341 tcp_timer_activate(tp, TT_2MSL,
13342 (tcp_fast_finwait2_recycle ?
13343 tcp_finwait2_timeout :
13346 tcp_state_change(tp, TCPS_FIN_WAIT_2);
13352 if (rack->r_fast_output) {
13354 * We re doing fast output.. can we expand that?
13356 rack_gain_for_fastoutput(rack, tp, so, acked_amount);
13358 #ifdef TCP_ACCOUNTING
13359 rdstc = get_cyclecount();
13360 if (rdstc > ts_val) {
13361 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val));
13362 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13363 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
13364 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
13368 } else if (win_up_req) {
13369 rdstc = get_cyclecount();
13370 if (rdstc > ts_val) {
13371 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val));
13372 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13373 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
13378 /* Now is there a next packet, if so we are done */
13382 #ifdef TCP_ACCOUNTING
13385 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
13388 rack_handle_might_revert(tp, rack);
13389 ctf_calc_rwin(so, tp);
13390 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
13392 (void)tp->t_fb->tfb_tcp_output(tp);
13395 rack_free_trim(rack);
13396 #ifdef TCP_ACCOUNTING
13399 rack_timer_audit(tp, rack, &so->so_snd);
13400 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
13406 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
13407 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
13408 int32_t nxt_pkt, struct timeval *tv)
13410 #ifdef TCP_ACCOUNTING
13413 int32_t thflags, retval, did_out = 0;
13414 int32_t way_out = 0;
13417 struct timespec ts;
13419 struct tcp_rack *rack;
13420 struct rack_sendmap *rsm;
13421 int32_t prev_state = 0;
13422 #ifdef TCP_ACCOUNTING
13423 int ack_val_set = 0xf;
13427 * tv passed from common code is from either M_TSTMP_LRO or
13428 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
13430 if (m->m_flags & M_ACKCMP) {
13431 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
13433 if (m->m_flags & M_ACKCMP) {
13434 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
13436 counter_u64_add(rack_proc_non_comp_ack, 1);
13437 thflags = th->th_flags;
13438 #ifdef TCP_ACCOUNTING
13440 if (thflags & TH_ACK)
13441 ts_val = get_cyclecount();
13443 cts = tcp_tv_to_usectick(tv);
13444 rack = (struct tcp_rack *)tp->t_fb_ptr;
13446 if ((m->m_flags & M_TSTMP) ||
13447 (m->m_flags & M_TSTMP_LRO)) {
13448 mbuf_tstmp2timespec(m, &ts);
13449 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
13450 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
13452 rack->r_ctl.act_rcv_time = *tv;
13453 kern_prefetch(rack, &prev_state);
13456 * Unscale the window into a 32-bit value. For the SYN_SENT state
13457 * the scale is zero.
13459 tiwin = th->th_win << tp->snd_scale;
13461 * Parse options on any incoming segment.
13463 memset(&to, 0, sizeof(to));
13464 tcp_dooptions(&to, (u_char *)(th + 1),
13465 (th->th_off << 2) - sizeof(struct tcphdr),
13466 (thflags & TH_SYN) ? TO_SYN : 0);
13467 #ifdef TCP_ACCOUNTING
13468 if (thflags & TH_ACK) {
13470 * We have a tradeoff here. We can either do what we are
13471 * doing i.e. pinning to this CPU and then doing the accounting
13472 * <or> we could do a critical enter, setup the rdtsc and cpu
13473 * as in below, and then validate we are on the same CPU on
13474 * exit. I have choosen to not do the critical enter since
13475 * that often will gain you a context switch, and instead lock
13476 * us (line above this if) to the same CPU with sched_pin(). This
13477 * means we may be context switched out for a higher priority
13478 * interupt but we won't be moved to another CPU.
13480 * If this occurs (which it won't very often since we most likely
13481 * are running this code in interupt context and only a higher
13482 * priority will bump us ... clock?) we will falsely add in
13483 * to the time the interupt processing time plus the ack processing
13484 * time. This is ok since its a rare event.
13486 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
13487 ctf_fixed_maxseg(tp));
13490 NET_EPOCH_ASSERT();
13491 INP_WLOCK_ASSERT(tp->t_inpcb);
13492 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
13494 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
13496 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
13497 union tcp_log_stackspecific log;
13498 struct timeval ltv;
13499 #ifdef NETFLIX_HTTP_LOGGING
13500 struct http_sendfile_track *http_req;
13502 if (SEQ_GT(th->th_ack, tp->snd_una)) {
13503 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1));
13505 http_req = tcp_http_find_req_for_seq(tp, th->th_ack);
13508 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
13509 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
13510 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
13511 if (rack->rack_no_prr == 0)
13512 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
13514 log.u_bbr.flex1 = 0;
13515 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
13516 log.u_bbr.use_lt_bw <<= 1;
13517 log.u_bbr.use_lt_bw |= rack->r_might_revert;
13518 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
13519 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
13520 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
13521 log.u_bbr.flex3 = m->m_flags;
13522 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
13523 log.u_bbr.lost = thflags;
13524 log.u_bbr.pacing_gain = 0x1;
13525 #ifdef TCP_ACCOUNTING
13526 log.u_bbr.cwnd_gain = ack_val_set;
13528 log.u_bbr.flex7 = 2;
13529 if (m->m_flags & M_TSTMP) {
13530 /* Record the hardware timestamp if present */
13531 mbuf_tstmp2timespec(m, &ts);
13532 ltv.tv_sec = ts.tv_sec;
13533 ltv.tv_usec = ts.tv_nsec / 1000;
13534 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v);
13535 } else if (m->m_flags & M_TSTMP_LRO) {
13536 /* Record the LRO the arrival timestamp */
13537 mbuf_tstmp2timespec(m, &ts);
13538 ltv.tv_sec = ts.tv_sec;
13539 ltv.tv_usec = ts.tv_nsec / 1000;
13540 log.u_bbr.flex5 = tcp_tv_to_usectick(<v);
13542 log.u_bbr.timeStamp = tcp_get_usecs(<v);
13543 /* Log the rcv time */
13544 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
13545 #ifdef NETFLIX_HTTP_LOGGING
13546 log.u_bbr.applimited = tp->t_http_closed;
13547 log.u_bbr.applimited <<= 8;
13548 log.u_bbr.applimited |= tp->t_http_open;
13549 log.u_bbr.applimited <<= 8;
13550 log.u_bbr.applimited |= tp->t_http_req;
13552 /* Copy out any client req info */
13554 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC);
13556 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC);
13557 log.u_bbr.rttProp = http_req->timestamp;
13558 log.u_bbr.cur_del_rate = http_req->start;
13559 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) {
13560 log.u_bbr.flex8 |= 1;
13562 log.u_bbr.flex8 |= 2;
13563 log.u_bbr.bw_inuse = http_req->end;
13565 log.u_bbr.flex6 = http_req->start_seq;
13566 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) {
13567 log.u_bbr.flex8 |= 4;
13568 log.u_bbr.epoch = http_req->end_seq;
13572 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
13573 tlen, &log, true, <v);
13575 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
13578 goto done_with_input;
13581 * If a segment with the ACK-bit set arrives in the SYN-SENT state
13582 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
13584 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
13585 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
13586 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
13587 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
13588 #ifdef TCP_ACCOUNTING
13595 * Parse options on any incoming segment.
13597 tcp_dooptions(&to, (u_char *)(th + 1),
13598 (th->th_off << 2) - sizeof(struct tcphdr),
13599 (thflags & TH_SYN) ? TO_SYN : 0);
13602 * If timestamps were negotiated during SYN/ACK and a
13603 * segment without a timestamp is received, silently drop
13604 * the segment, unless it is a RST segment or missing timestamps are
13606 * See section 3.2 of RFC 7323.
13608 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
13609 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
13612 goto done_with_input;
13616 * Segment received on connection. Reset idle time and keep-alive
13617 * timer. XXX: This should be done after segment validation to
13618 * ignore broken/spoofed segs.
13620 if (tp->t_idle_reduce &&
13621 (tp->snd_max == tp->snd_una) &&
13622 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
13623 counter_u64_add(rack_input_idle_reduces, 1);
13624 rack_cc_after_idle(rack, tp);
13626 tp->t_rcvtime = ticks;
13628 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
13630 if (tiwin > rack->r_ctl.rc_high_rwnd)
13631 rack->r_ctl.rc_high_rwnd = tiwin;
13633 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
13634 * this to occur after we've validated the segment.
13636 if (tp->t_flags2 & TF2_ECN_PERMIT) {
13637 if (thflags & TH_CWR) {
13638 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
13639 tp->t_flags |= TF_ACKNOW;
13641 switch (iptos & IPTOS_ECN_MASK) {
13643 tp->t_flags2 |= TF2_ECN_SND_ECE;
13644 KMOD_TCPSTAT_INC(tcps_ecn_ce);
13646 case IPTOS_ECN_ECT0:
13647 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
13649 case IPTOS_ECN_ECT1:
13650 KMOD_TCPSTAT_INC(tcps_ecn_ect1);
13654 /* Process a packet differently from RFC3168. */
13655 cc_ecnpkt_handler(tp, th, iptos);
13657 /* Congestion experienced. */
13658 if (thflags & TH_ECE) {
13659 rack_cong_signal(tp, CC_ECN, th->th_ack);
13664 * If echoed timestamp is later than the current time, fall back to
13665 * non RFC1323 RTT calculation. Normalize timestamp if syncookies
13666 * were used when this connection was established.
13668 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
13669 to.to_tsecr -= tp->ts_offset;
13670 if (TSTMP_GT(to.to_tsecr, cts))
13675 * If its the first time in we need to take care of options and
13676 * verify we can do SACK for rack!
13678 if (rack->r_state == 0) {
13679 /* Should be init'd by rack_init() */
13680 KASSERT(rack->rc_inp != NULL,
13681 ("%s: rack->rc_inp unexpectedly NULL", __func__));
13682 if (rack->rc_inp == NULL) {
13683 rack->rc_inp = tp->t_inpcb;
13687 * Process options only when we get SYN/ACK back. The SYN
13688 * case for incoming connections is handled in tcp_syncache.
13689 * According to RFC1323 the window field in a SYN (i.e., a
13690 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
13691 * this is traditional behavior, may need to be cleaned up.
13693 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
13694 /* Handle parallel SYN for ECN */
13695 if (!(thflags & TH_ACK) &&
13696 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) &&
13697 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) {
13698 tp->t_flags2 |= TF2_ECN_PERMIT;
13699 tp->t_flags2 |= TF2_ECN_SND_ECE;
13700 TCPSTAT_INC(tcps_ecn_shs);
13702 if ((to.to_flags & TOF_SCALE) &&
13703 (tp->t_flags & TF_REQ_SCALE)) {
13704 tp->t_flags |= TF_RCVD_SCALE;
13705 tp->snd_scale = to.to_wscale;
13707 tp->t_flags &= ~TF_REQ_SCALE;
13709 * Initial send window. It will be updated with the
13710 * next incoming segment to the scaled value.
13712 tp->snd_wnd = th->th_win;
13713 rack_validate_fo_sendwin_up(tp, rack);
13714 if ((to.to_flags & TOF_TS) &&
13715 (tp->t_flags & TF_REQ_TSTMP)) {
13716 tp->t_flags |= TF_RCVD_TSTMP;
13717 tp->ts_recent = to.to_tsval;
13718 tp->ts_recent_age = cts;
13720 tp->t_flags &= ~TF_REQ_TSTMP;
13721 if (to.to_flags & TOF_MSS) {
13722 tcp_mss(tp, to.to_mss);
13724 if ((tp->t_flags & TF_SACK_PERMIT) &&
13725 (to.to_flags & TOF_SACKPERM) == 0)
13726 tp->t_flags &= ~TF_SACK_PERMIT;
13727 if (IS_FASTOPEN(tp->t_flags)) {
13728 if (to.to_flags & TOF_FASTOPEN) {
13731 if (to.to_flags & TOF_MSS)
13734 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
13738 tcp_fastopen_update_cache(tp, mss,
13739 to.to_tfo_len, to.to_tfo_cookie);
13741 tcp_fastopen_disable_path(tp);
13745 * At this point we are at the initial call. Here we decide
13746 * if we are doing RACK or not. We do this by seeing if
13747 * TF_SACK_PERMIT is set and the sack-not-required is clear.
13748 * The code now does do dup-ack counting so if you don't
13749 * switch back you won't get rack & TLP, but you will still
13753 if ((rack_sack_not_required == 0) &&
13754 ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
13755 tcp_switch_back_to_default(tp);
13756 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
13758 #ifdef TCP_ACCOUNTING
13763 tcp_set_hpts(tp->t_inpcb);
13764 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
13766 if (thflags & TH_FIN)
13767 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
13768 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
13769 if ((rack->rc_gp_dyn_mul) &&
13770 (rack->use_fixed_rate == 0) &&
13771 (rack->rc_always_pace)) {
13772 /* Check in on probertt */
13773 rack_check_probe_rtt(rack, us_cts);
13775 if (rack->forced_ack) {
13779 * A persist or keep-alive was forced out, update our
13780 * min rtt time. Note we do not worry about lost
13781 * retransmissions since KEEP-ALIVES and persists
13782 * are usually way long on times of sending (though
13783 * if we were really paranoid or worried we could
13784 * at least use timestamps if available to validate).
13786 rack->forced_ack = 0;
13787 us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
13790 rack_log_rtt_upd(tp, rack, us_rtt, 0, NULL, 3);
13791 rack_apply_updated_usrtt(rack, us_rtt, us_cts);
13794 * This is the one exception case where we set the rack state
13795 * always. All other times (timers etc) we must have a rack-state
13796 * set (so we assure we have done the checks above for SACK).
13798 rack->r_ctl.rc_rcvtime = cts;
13799 if (rack->r_state != tp->t_state)
13800 rack_set_state(tp, rack);
13801 if (SEQ_GT(th->th_ack, tp->snd_una) &&
13802 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL)
13803 kern_prefetch(rsm, &prev_state);
13804 prev_state = rack->r_state;
13805 rack_clear_rate_sample(rack);
13806 retval = (*rack->r_substate) (m, th, so,
13807 tp, &to, drop_hdrlen,
13808 tlen, tiwin, thflags, nxt_pkt, iptos);
13810 if ((retval == 0) &&
13811 (tp->t_inpcb == NULL)) {
13812 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
13813 retval, tp, prev_state);
13818 * If retval is 1 the tcb is unlocked and most likely the tp
13821 INP_WLOCK_ASSERT(tp->t_inpcb);
13822 if ((rack->rc_gp_dyn_mul) &&
13823 (rack->rc_always_pace) &&
13824 (rack->use_fixed_rate == 0) &&
13825 rack->in_probe_rtt &&
13826 (rack->r_ctl.rc_time_probertt_starts == 0)) {
13828 * If we are going for target, lets recheck before
13831 rack_check_probe_rtt(rack, us_cts);
13833 if (rack->set_pacing_done_a_iw == 0) {
13834 /* How much has been acked? */
13835 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
13836 /* We have enough to set in the pacing segment size */
13837 rack->set_pacing_done_a_iw = 1;
13838 rack_set_pace_segments(tp, rack, __LINE__, NULL);
13841 tcp_rack_xmit_timer_commit(rack, tp);
13842 #ifdef TCP_ACCOUNTING
13844 * If we set the ack_val_se to what ack processing we are doing
13845 * we also want to track how many cycles we burned. Note
13846 * the bits after tcp_output we let be "free". This is because
13847 * we are also tracking the tcp_output times as well. Note the
13848 * use of 0xf here since we only have 11 counter (0 - 0xa) and
13849 * 0xf cannot be returned and is what we initialize it too to
13850 * indicate we are not doing the tabulations.
13852 if (ack_val_set != 0xf) {
13855 crtsc = get_cyclecount();
13856 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
13857 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
13858 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
13862 if (nxt_pkt == 0) {
13863 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
13866 (void)tp->t_fb->tfb_tcp_output(tp);
13868 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
13869 rack_free_trim(rack);
13871 if ((nxt_pkt == 0) &&
13872 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
13873 (SEQ_GT(tp->snd_max, tp->snd_una) ||
13874 (tp->t_flags & TF_DELACK) ||
13875 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
13876 (tp->t_state <= TCPS_CLOSING)))) {
13877 /* We could not send (probably in the hpts but stopped the timer earlier)? */
13878 if ((tp->snd_max == tp->snd_una) &&
13879 ((tp->t_flags & TF_DELACK) == 0) &&
13880 (rack->rc_inp->inp_in_hpts) &&
13881 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
13882 /* keep alive not needed if we are hptsi output yet */
13886 if (rack->rc_inp->inp_in_hpts) {
13887 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
13888 us_cts = tcp_get_usecs(NULL);
13889 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
13891 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
13894 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
13896 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
13898 if (late && (did_out == 0)) {
13900 * We are late in the sending
13901 * and we did not call the output
13902 * (this probably should not happen).
13904 goto do_output_now;
13906 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
13909 } else if (nxt_pkt == 0) {
13910 /* Do we have the correct timer running? */
13911 rack_timer_audit(tp, rack, &so->so_snd);
13915 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, m->m_pkthdr.lro_nsegs));
13917 rack->r_wanted_output = 0;
13919 if (tp->t_inpcb == NULL) {
13920 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
13922 retval, tp, prev_state);
13925 #ifdef TCP_ACCOUNTING
13928 * Track the time (see above).
13930 if (ack_val_set != 0xf) {
13933 crtsc = get_cyclecount();
13934 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val));
13936 * Note we *DO NOT* increment the per-tcb counters since
13937 * in the else the TP may be gone!!
13942 #ifdef TCP_ACCOUNTING
13949 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
13950 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
13954 /* First lets see if we have old packets */
13955 if (tp->t_in_pkt) {
13956 if (ctf_do_queued_segments(so, tp, 1)) {
13961 if (m->m_flags & M_TSTMP_LRO) {
13962 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
13963 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
13965 /* Should not be should we kassert instead? */
13966 tcp_get_usecs(&tv);
13968 if (rack_do_segment_nounlock(m, th, so, tp,
13969 drop_hdrlen, tlen, iptos, 0, &tv) == 0) {
13970 tcp_handle_wakeup(tp, so);
13971 INP_WUNLOCK(tp->t_inpcb);
13975 struct rack_sendmap *
13976 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
13978 struct rack_sendmap *rsm = NULL;
13980 uint32_t srtt = 0, thresh = 0, ts_low = 0;
13982 /* Return the next guy to be re-transmitted */
13983 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) {
13986 if (tp->t_flags & TF_SENTFIN) {
13987 /* retran the end FIN? */
13990 /* ok lets look at this one */
13991 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
13992 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
13995 rsm = rack_find_lowest_rsm(rack);
14000 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) &&
14001 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
14003 * No sack so we automatically do the 3 strikes and
14004 * retransmit (no rack timer would be started).
14009 if (rsm->r_flags & RACK_ACKED) {
14012 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
14013 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
14014 /* Its not yet ready */
14017 srtt = rack_grab_rtt(tp, rack);
14018 idx = rsm->r_rtr_cnt - 1;
14019 ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
14020 thresh = rack_calc_thresh_rack(rack, srtt, tsused);
14021 if ((tsused == ts_low) ||
14022 (TSTMP_LT(tsused, ts_low))) {
14023 /* No time since sending */
14026 if ((tsused - ts_low) < thresh) {
14027 /* It has not been long enough yet */
14030 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
14031 ((rsm->r_flags & RACK_SACK_PASSED) &&
14032 (rack->sack_attack_disable == 0))) {
14034 * We have passed the dup-ack threshold <or>
14035 * a SACK has indicated this is missing.
14036 * Note that if you are a declared attacker
14037 * it is only the dup-ack threshold that
14038 * will cause retransmits.
14040 /* log retransmit reason */
14041 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
14042 rack->r_fast_output = 0;
14049 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
14050 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
14051 int line, struct rack_sendmap *rsm)
14053 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
14054 union tcp_log_stackspecific log;
14057 memset(&log, 0, sizeof(log));
14058 log.u_bbr.flex1 = slot;
14059 log.u_bbr.flex2 = len;
14060 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
14061 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
14062 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
14063 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
14064 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
14065 log.u_bbr.use_lt_bw <<= 1;
14066 log.u_bbr.use_lt_bw |= rack->r_late;
14067 log.u_bbr.use_lt_bw <<= 1;
14068 log.u_bbr.use_lt_bw |= rack->r_early;
14069 log.u_bbr.use_lt_bw <<= 1;
14070 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
14071 log.u_bbr.use_lt_bw <<= 1;
14072 log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
14073 log.u_bbr.use_lt_bw <<= 1;
14074 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
14075 log.u_bbr.use_lt_bw <<= 1;
14076 log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
14077 log.u_bbr.use_lt_bw <<= 1;
14078 log.u_bbr.use_lt_bw |= rack->gp_ready;
14079 log.u_bbr.pkt_epoch = line;
14080 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
14081 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
14082 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
14083 log.u_bbr.bw_inuse = bw_est;
14084 log.u_bbr.delRate = bw;
14085 if (rack->r_ctl.gp_bw == 0)
14086 log.u_bbr.cur_del_rate = 0;
14088 log.u_bbr.cur_del_rate = rack_get_bw(rack);
14089 log.u_bbr.rttProp = len_time;
14090 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
14091 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
14092 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
14093 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
14094 /* We are in slow start */
14095 log.u_bbr.flex7 = 1;
14097 /* we are on congestion avoidance */
14098 log.u_bbr.flex7 = 0;
14100 log.u_bbr.flex8 = method;
14101 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14102 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14103 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
14104 log.u_bbr.cwnd_gain <<= 1;
14105 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
14106 log.u_bbr.cwnd_gain <<= 1;
14107 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
14108 TCP_LOG_EVENTP(rack->rc_tp, NULL,
14109 &rack->rc_inp->inp_socket->so_rcv,
14110 &rack->rc_inp->inp_socket->so_snd,
14111 BBR_LOG_HPTSI_CALC, 0,
14112 0, &log, false, &tv);
14117 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
14119 uint32_t new_tso, user_max;
14121 user_max = rack->rc_user_set_max_segs * mss;
14122 if (rack->rc_force_max_seg) {
14125 if (rack->use_fixed_rate &&
14126 ((rack->r_ctl.crte == NULL) ||
14127 (bw != rack->r_ctl.crte->rate))) {
14128 /* Use the user mss since we are not exactly matched */
14131 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL);
14132 if (new_tso > user_max)
14133 new_tso = user_max;
14138 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
14140 uint64_t lentim, fill_bw;
14142 /* Lets first see if we are full, if so continue with normal rate */
14143 rack->r_via_fill_cw = 0;
14144 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
14146 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
14148 if (rack->r_ctl.rc_last_us_rtt == 0)
14150 if (rack->rc_pace_fill_if_rttin_range &&
14151 (rack->r_ctl.rc_last_us_rtt >=
14152 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
14153 /* The rtt is huge, N * smallest, lets not fill */
14157 * first lets calculate the b/w based on the last us-rtt
14160 fill_bw = rack->r_ctl.cwnd_to_use;
14161 /* Take the rwnd if its smaller */
14162 if (fill_bw > rack->rc_tp->snd_wnd)
14163 fill_bw = rack->rc_tp->snd_wnd;
14164 if (rack->r_fill_less_agg) {
14166 * Now take away the inflight (this will reduce our
14167 * aggressiveness and yeah, if we get that much out in 1RTT
14168 * we will have had acks come back and still be behind).
14170 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14172 /* Now lets make it into a b/w */
14173 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
14174 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
14175 /* We are below the min b/w */
14177 *rate_wanted = fill_bw;
14178 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
14180 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap))
14181 fill_bw = rack->r_ctl.bw_rate_cap;
14182 rack->r_via_fill_cw = 1;
14183 if (rack->r_rack_hw_rate_caps &&
14184 (rack->r_ctl.crte != NULL)) {
14185 uint64_t high_rate;
14187 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
14188 if (fill_bw > high_rate) {
14189 /* We are capping bw at the highest rate table entry */
14190 if (*rate_wanted > high_rate) {
14191 /* The original rate was also capped */
14192 rack->r_via_fill_cw = 0;
14194 rack_log_hdwr_pacing(rack,
14195 fill_bw, high_rate, __LINE__,
14197 fill_bw = high_rate;
14201 } else if ((rack->r_ctl.crte == NULL) &&
14202 (rack->rack_hdrw_pacing == 0) &&
14203 (rack->rack_hdw_pace_ena) &&
14204 rack->r_rack_hw_rate_caps &&
14205 (rack->rack_attempt_hdwr_pace == 0) &&
14206 (rack->rc_inp->inp_route.ro_nh != NULL) &&
14207 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14209 * Ok we may have a first attempt that is greater than our top rate
14212 uint64_t high_rate;
14214 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
14216 if (fill_bw > high_rate) {
14217 fill_bw = high_rate;
14224 * Ok fill_bw holds our mythical b/w to fill the cwnd
14225 * in a rtt, what does that time wise equate too?
14227 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
14229 *rate_wanted = fill_bw;
14230 if (non_paced || (lentim < slot)) {
14231 rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
14232 0, lentim, 12, __LINE__, NULL);
14233 return ((int32_t)lentim);
14239 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
14241 struct rack_sendmap *lrsm;
14243 int can_start_hw_pacing = 1;
14246 if (rack->rc_always_pace == 0) {
14248 * We use the most optimistic possible cwnd/srtt for
14249 * sending calculations. This will make our
14250 * calculation anticipate getting more through
14251 * quicker then possible. But thats ok we don't want
14252 * the peer to have a gap in data sending.
14254 uint32_t srtt, cwnd, tr_perms = 0;
14255 int32_t reduce = 0;
14259 * We keep no precise pacing with the old method
14260 * instead we use the pacer to mitigate bursts.
14262 if (rack->r_ctl.rc_rack_min_rtt)
14263 srtt = rack->r_ctl.rc_rack_min_rtt;
14265 srtt = max(tp->t_srtt, 1);
14266 if (rack->r_ctl.rc_rack_largest_cwnd)
14267 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
14269 cwnd = rack->r_ctl.cwnd_to_use;
14270 /* Inflate cwnd by 1000 so srtt of usecs is in ms */
14271 tr_perms = (cwnd * 1000) / srtt;
14272 if (tr_perms == 0) {
14273 tr_perms = ctf_fixed_maxseg(tp);
14276 * Calculate how long this will take to drain, if
14277 * the calculation comes out to zero, thats ok we
14278 * will use send_a_lot to possibly spin around for
14279 * more increasing tot_len_this_send to the point
14280 * that its going to require a pace, or we hit the
14281 * cwnd. Which in that case we are just waiting for
14284 slot = len / tr_perms;
14285 /* Now do we reduce the time so we don't run dry? */
14286 if (slot && rack_slot_reduction) {
14287 reduce = (slot / rack_slot_reduction);
14288 if (reduce < slot) {
14293 slot *= HPTS_USEC_IN_MSEC;
14296 * We always consider ourselves app limited with old style
14297 * that are not retransmits. This could be the initial
14298 * measurement, but thats ok its all setup and specially
14299 * handled. If another send leaks out, then that too will
14300 * be mark app-limited.
14302 lrsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
14303 if (lrsm && ((lrsm->r_flags & RACK_APP_LIMITED) == 0)) {
14304 rack->r_ctl.rc_first_appl = lrsm;
14305 lrsm->r_flags |= RACK_APP_LIMITED;
14306 rack->r_ctl.rc_app_limited_cnt++;
14309 if (rack->rc_pace_to_cwnd) {
14310 uint64_t rate_wanted = 0;
14312 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
14313 rack->rc_ack_can_sendout_data = 1;
14314 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL);
14316 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL);
14318 uint64_t bw_est, res, lentim, rate_wanted;
14319 uint32_t orig_val, srtt, segs, oh;
14323 if ((rack->r_rr_config == 1) && rsm) {
14324 return (rack->r_ctl.rc_min_to);
14326 if (rack->use_fixed_rate) {
14327 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
14328 } else if ((rack->r_ctl.init_rate == 0) &&
14329 #ifdef NETFLIX_PEAKRATE
14330 (rack->rc_tp->t_maxpeakrate == 0) &&
14332 (rack->r_ctl.gp_bw == 0)) {
14333 /* no way to yet do an estimate */
14334 bw_est = rate_wanted = 0;
14336 bw_est = rack_get_bw(rack);
14337 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
14339 if ((bw_est == 0) || (rate_wanted == 0) ||
14340 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
14342 * No way yet to make a b/w estimate or
14343 * our raise is set incorrectly.
14347 /* We need to account for all the overheads */
14348 segs = (len + segsiz - 1) / segsiz;
14350 * We need the diff between 1514 bytes (e-mtu with e-hdr)
14351 * and how much data we put in each packet. Yes this
14352 * means we may be off if we are larger than 1500 bytes
14353 * or smaller. But this just makes us more conservative.
14355 if (rack_hw_rate_min &&
14356 (bw_est < rack_hw_rate_min))
14357 can_start_hw_pacing = 0;
14358 if (ETHERNET_SEGMENT_SIZE > segsiz)
14359 oh = ETHERNET_SEGMENT_SIZE - segsiz;
14363 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
14364 res = lentim / rate_wanted;
14365 slot = (uint32_t)res;
14366 orig_val = rack->r_ctl.rc_pace_max_segs;
14367 if (rack->r_ctl.crte == NULL) {
14369 * Only do this if we are not hardware pacing
14370 * since if we are doing hw-pacing below we will
14371 * set make a call after setting up or changing
14374 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
14375 } else if (rack->rc_inp->inp_snd_tag == NULL) {
14377 * We lost our rate somehow, this can happen
14378 * if the interface changed underneath us.
14380 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14381 rack->r_ctl.crte = NULL;
14382 /* Lets re-allow attempting to setup pacing */
14383 rack->rack_hdrw_pacing = 0;
14384 rack->rack_attempt_hdwr_pace = 0;
14385 rack_log_hdwr_pacing(rack,
14386 rate_wanted, bw_est, __LINE__,
14389 /* Did we change the TSO size, if so log it */
14390 if (rack->r_ctl.rc_pace_max_segs != orig_val)
14391 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL);
14392 prev_fill = rack->r_via_fill_cw;
14393 if ((rack->rc_pace_to_cwnd) &&
14395 (rack->use_fixed_rate == 0) &&
14396 (rack->in_probe_rtt == 0) &&
14397 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
14399 * We want to pace at our rate *or* faster to
14400 * fill the cwnd to the max if its not full.
14402 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
14404 if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
14405 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
14406 if ((rack->rack_hdw_pace_ena) &&
14407 (can_start_hw_pacing > 0) &&
14408 (rack->rack_hdrw_pacing == 0) &&
14409 (rack->rack_attempt_hdwr_pace == 0)) {
14411 * Lets attempt to turn on hardware pacing
14414 rack->rack_attempt_hdwr_pace = 1;
14415 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
14416 rack->rc_inp->inp_route.ro_nh->nh_ifp,
14419 &err, &rack->r_ctl.crte_prev_rate);
14420 if (rack->r_ctl.crte) {
14421 rack->rack_hdrw_pacing = 1;
14422 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz,
14423 0, rack->r_ctl.crte,
14425 rack_log_hdwr_pacing(rack,
14426 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14428 rack->r_ctl.last_hw_bw_req = rate_wanted;
14430 counter_u64_add(rack_hw_pace_init_fail, 1);
14432 } else if (rack->rack_hdrw_pacing &&
14433 (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
14434 /* Do we need to adjust our rate? */
14435 const struct tcp_hwrate_limit_table *nrte;
14437 if (rack->r_up_only &&
14438 (rate_wanted < rack->r_ctl.crte->rate)) {
14440 * We have four possible states here
14441 * having to do with the previous time
14443 * previous | this-time
14444 * A) 0 | 0 -- fill_cw not in the picture
14445 * B) 1 | 0 -- we were doing a fill-cw but now are not
14446 * C) 1 | 1 -- all rates from fill_cw
14447 * D) 0 | 1 -- we were doing non-fill and now we are filling
14449 * For case A, C and D we don't allow a drop. But for
14450 * case B where we now our on our steady rate we do
14454 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
14457 if ((rate_wanted > rack->r_ctl.crte->rate) ||
14458 (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
14459 if (rack_hw_rate_to_low &&
14460 (bw_est < rack_hw_rate_to_low)) {
14462 * The pacing rate is too low for hardware, but
14463 * do allow hardware pacing to be restarted.
14465 rack_log_hdwr_pacing(rack,
14466 bw_est, rack->r_ctl.crte->rate, __LINE__,
14468 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
14469 rack->r_ctl.crte = NULL;
14470 rack->rack_attempt_hdwr_pace = 0;
14471 rack->rack_hdrw_pacing = 0;
14472 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14475 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
14477 rack->rc_inp->inp_route.ro_nh->nh_ifp,
14480 &err, &rack->r_ctl.crte_prev_rate);
14481 if (nrte == NULL) {
14482 /* Lost the rate */
14483 rack->rack_hdrw_pacing = 0;
14484 rack->r_ctl.crte = NULL;
14485 rack_log_hdwr_pacing(rack,
14486 rate_wanted, 0, __LINE__,
14488 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14489 counter_u64_add(rack_hw_pace_lost, 1);
14490 } else if (nrte != rack->r_ctl.crte) {
14491 rack->r_ctl.crte = nrte;
14492 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted,
14496 rack_log_hdwr_pacing(rack,
14497 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14499 rack->r_ctl.last_hw_bw_req = rate_wanted;
14502 /* We just need to adjust the segment size */
14503 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
14504 rack_log_hdwr_pacing(rack,
14505 rate_wanted, rack->r_ctl.crte->rate, __LINE__,
14507 rack->r_ctl.last_hw_bw_req = rate_wanted;
14511 if ((rack->r_ctl.crte != NULL) &&
14512 (rack->r_ctl.crte->rate == rate_wanted)) {
14514 * We need to add a extra if the rates
14515 * are exactly matched. The idea is
14516 * we want the software to make sure the
14517 * queue is empty before adding more, this
14518 * gives us N MSS extra pace times where
14521 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots);
14524 if (rack_limit_time_with_srtt &&
14525 (rack->use_fixed_rate == 0) &&
14526 #ifdef NETFLIX_PEAKRATE
14527 (rack->rc_tp->t_maxpeakrate == 0) &&
14529 (rack->rack_hdrw_pacing == 0)) {
14531 * Sanity check, we do not allow the pacing delay
14532 * to be longer than the SRTT of the path. If it is
14533 * a slow path, then adding a packet should increase
14534 * the RTT and compensate for this i.e. the srtt will
14535 * be greater so the allowed pacing time will be greater.
14537 * Note this restriction is not for where a peak rate
14538 * is set, we are doing fixed pacing or hardware pacing.
14540 if (rack->rc_tp->t_srtt)
14541 srtt = rack->rc_tp->t_srtt;
14543 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
14545 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL);
14549 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm);
14551 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
14553 * If this rate is seeing enobufs when it
14554 * goes to send then either the nic is out
14555 * of gas or we are mis-estimating the time
14556 * somehow and not letting the queue empty
14557 * completely. Lets add to the pacing time.
14559 int hw_boost_delay;
14561 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
14562 if (hw_boost_delay > rack_enobuf_hw_max)
14563 hw_boost_delay = rack_enobuf_hw_max;
14564 else if (hw_boost_delay < rack_enobuf_hw_min)
14565 hw_boost_delay = rack_enobuf_hw_min;
14566 slot += hw_boost_delay;
14569 counter_u64_add(rack_calc_nonzero, 1);
14571 counter_u64_add(rack_calc_zero, 1);
14576 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
14577 tcp_seq startseq, uint32_t sb_offset)
14579 struct rack_sendmap *my_rsm = NULL;
14580 struct rack_sendmap fe;
14582 if (tp->t_state < TCPS_ESTABLISHED) {
14584 * We don't start any measurements if we are
14585 * not at least established.
14589 tp->t_flags |= TF_GPUTINPROG;
14590 rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
14591 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
14592 tp->gput_seq = startseq;
14593 rack->app_limited_needs_set = 0;
14594 if (rack->in_probe_rtt)
14595 rack->measure_saw_probe_rtt = 1;
14596 else if ((rack->measure_saw_probe_rtt) &&
14597 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
14598 rack->measure_saw_probe_rtt = 0;
14599 if (rack->rc_gp_filled)
14600 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
14602 /* Special case initial measurement */
14605 tp->gput_ts = tcp_get_usecs(&tv);
14606 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
14609 * We take a guess out into the future,
14610 * if we have no measurement and no
14611 * initial rate, we measure the first
14612 * initial-windows worth of data to
14613 * speed up getting some GP measurement and
14614 * thus start pacing.
14616 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
14617 rack->app_limited_needs_set = 1;
14618 tp->gput_ack = startseq + max(rc_init_window(rack),
14619 (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
14620 rack_log_pacing_delay_calc(rack,
14625 rack->r_ctl.rc_app_limited_cnt,
14632 * We are out somewhere in the sb
14633 * can we use the already outstanding data?
14636 if (rack->r_ctl.rc_app_limited_cnt == 0) {
14638 * Yes first one is good and in this case
14639 * the tp->gput_ts is correctly set based on
14640 * the last ack that arrived (no need to
14641 * set things up when an ack comes in).
14643 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
14644 if ((my_rsm == NULL) ||
14645 (my_rsm->r_rtr_cnt != 1)) {
14646 /* retransmission? */
14650 if (rack->r_ctl.rc_first_appl == NULL) {
14652 * If rc_first_appl is NULL
14653 * then the cnt should be 0.
14654 * This is probably an error, maybe
14655 * a KASSERT would be approprate.
14660 * If we have a marker pointer to the last one that is
14661 * app limited we can use that, but we need to set
14662 * things up so that when it gets ack'ed we record
14663 * the ack time (if its not already acked).
14665 rack->app_limited_needs_set = 1;
14667 * We want to get to the rsm that is either
14668 * next with space i.e. over 1 MSS or the one
14669 * after that (after the app-limited).
14671 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
14672 rack->r_ctl.rc_first_appl);
14674 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
14675 /* Have to use the next one */
14676 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
14679 /* Use after the first MSS of it is acked */
14680 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
14684 if ((my_rsm == NULL) ||
14685 (my_rsm->r_rtr_cnt != 1)) {
14687 * Either its a retransmit or
14688 * the last is the app-limited one.
14693 tp->gput_seq = my_rsm->r_start;
14695 if (my_rsm->r_flags & RACK_ACKED) {
14697 * This one has been acked use the arrival ack time
14699 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
14700 rack->app_limited_needs_set = 0;
14702 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
14703 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
14704 rack_log_pacing_delay_calc(rack,
14709 rack->r_ctl.rc_app_limited_cnt,
14717 * We don't know how long we may have been
14718 * idle or if this is the first-send. Lets
14719 * setup the flag so we will trim off
14720 * the first ack'd data so we get a true
14723 rack->app_limited_needs_set = 1;
14724 tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
14725 /* Find this guy so we can pull the send time */
14726 fe.r_start = startseq;
14727 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe);
14729 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)];
14730 if (my_rsm->r_flags & RACK_ACKED) {
14732 * Unlikely since its probably what was
14733 * just transmitted (but I am paranoid).
14735 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
14736 rack->app_limited_needs_set = 0;
14738 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
14739 /* This also is unlikely */
14740 tp->gput_seq = my_rsm->r_start;
14744 * TSNH unless we have some send-map limit,
14745 * and even at that it should not be hitting
14746 * that limit (we should have stopped sending).
14751 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
14753 rack_log_pacing_delay_calc(rack,
14758 rack->r_ctl.rc_app_limited_cnt,
14759 9, __LINE__, NULL);
14762 static inline uint32_t
14763 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use,
14764 uint32_t avail, int32_t sb_offset)
14769 if (tp->snd_wnd > cwnd_to_use)
14770 sendwin = cwnd_to_use;
14772 sendwin = tp->snd_wnd;
14773 if (ctf_outstanding(tp) >= tp->snd_wnd) {
14774 /* We never want to go over our peers rcv-window */
14779 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
14780 if (flight >= sendwin) {
14782 * We have in flight what we are allowed by cwnd (if
14783 * it was rwnd blocking it would have hit above out
14788 len = sendwin - flight;
14789 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
14790 /* We would send too much (beyond the rwnd) */
14791 len = tp->snd_wnd - ctf_outstanding(tp);
14793 if ((len + sb_offset) > avail) {
14795 * We don't have that much in the SB, how much is
14798 len = avail - sb_offset;
14805 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
14806 unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
14807 int rsm_is_null, int optlen, int line, uint16_t mode)
14809 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
14810 union tcp_log_stackspecific log;
14813 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
14814 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
14815 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
14816 log.u_bbr.flex1 = error;
14817 log.u_bbr.flex2 = flags;
14818 log.u_bbr.flex3 = rsm_is_null;
14819 log.u_bbr.flex4 = ipoptlen;
14820 log.u_bbr.flex5 = tp->rcv_numsacks;
14821 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
14822 log.u_bbr.flex7 = optlen;
14823 log.u_bbr.flex8 = rack->r_fsb_inited;
14824 log.u_bbr.applimited = rack->r_fast_output;
14825 log.u_bbr.bw_inuse = rack_get_bw(rack);
14826 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
14827 log.u_bbr.cwnd_gain = mode;
14828 log.u_bbr.pkts_out = orig_len;
14829 log.u_bbr.lt_epoch = len;
14830 log.u_bbr.delivered = line;
14831 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
14832 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
14833 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
14834 len, &log, false, NULL, NULL, 0, &tv);
14839 static struct mbuf *
14840 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
14841 struct rack_fast_send_blk *fsb,
14842 int32_t seglimit, int32_t segsize)
14845 struct ktls_session *tls, *ntls;
14846 struct mbuf *start;
14848 struct mbuf *m, *n, **np, *smb;
14851 int32_t len = *plen;
14853 int32_t len_cp = 0;
14854 uint32_t mlen, frags;
14856 soff = off = the_off;
14861 if (hw_tls && (m->m_flags & M_EXTPG))
14862 tls = m->m_epg_tls;
14874 if (m->m_flags & M_EXTPG)
14875 ntls = m->m_epg_tls;
14880 * Avoid mixing TLS records with handshake
14881 * data or TLS records from different
14891 mlen = min(len, m->m_len - off);
14894 * For M_EXTPG mbufs, add 3 segments
14895 * + 1 in case we are crossing page boundaries
14896 * + 2 in case the TLS hdr/trailer are used
14897 * It is cheaper to just add the segments
14898 * than it is to take the cache miss to look
14899 * at the mbuf ext_pgs state in detail.
14901 if (m->m_flags & M_EXTPG) {
14902 fragsize = min(segsize, PAGE_SIZE);
14905 fragsize = segsize;
14909 /* Break if we really can't fit anymore. */
14910 if ((frags + 1) >= seglimit) {
14916 * Reduce size if you can't copy the whole
14917 * mbuf. If we can't copy the whole mbuf, also
14918 * adjust len so the loop will end after this
14921 if ((frags + howmany(mlen, fragsize)) >= seglimit) {
14922 mlen = (seglimit - frags - 1) * fragsize;
14924 *plen = len_cp + len;
14926 frags += howmany(mlen, fragsize);
14930 KASSERT(seglimit > 0,
14931 ("%s: seglimit went too low", __func__));
14933 n = m_get(M_NOWAIT, m->m_type);
14939 len_cp += n->m_len;
14940 if (m->m_flags & (M_EXT|M_EXTPG)) {
14941 n->m_data = m->m_data + off;
14944 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
14951 if (len || (soff == smb->m_len)) {
14953 * We have more so we move forward or
14954 * we have consumed the entire mbuf and
14955 * len has fell to 0.
14967 * Save off the size of the mbuf. We do
14968 * this so that we can recognize when it
14969 * has been trimmed by sbcut() as acks
14972 fsb->o_m_len = smb->m_len;
14975 * This is the case where the next mbuf went to NULL. This
14976 * means with this copy we have sent everything in the sb.
14977 * In theory we could clear the fast_output flag, but lets
14978 * not since its possible that we could get more added
14979 * and acks that call the extend function which would let
14994 * This is a copy of m_copym(), taking the TSO segment size/limit
14995 * constraints into account, and advancing the sndptr as it goes.
14997 static struct mbuf *
14998 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
14999 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
15001 struct mbuf *m, *n;
15004 soff = rack->r_ctl.fsb.off;
15005 m = rack->r_ctl.fsb.m;
15006 if (rack->r_ctl.fsb.o_m_len != m->m_len) {
15008 * The mbuf had the front of it chopped off by an ack
15009 * we need to adjust the soff/off by that difference.
15013 delta = rack->r_ctl.fsb.o_m_len - m->m_len;
15016 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
15017 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
15018 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
15020 rack, *plen, m, m->m_len));
15021 /* Save off the right location before we copy and advance */
15023 *s_mb = rack->r_ctl.fsb.m;
15024 n = rack_fo_base_copym(m, soff, plen,
15026 seglimit, segsize);
15031 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
15032 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len)
15035 * Enter the fast retransmit path. We are given that a sched_pin is
15036 * in place (if accounting is compliled in) and the cycle count taken
15037 * at the entry is in the ts_val. The concept her is that the rsm
15038 * now holds the mbuf offsets and such so we can directly transmit
15039 * without a lot of overhead, the len field is already set for
15040 * us to prohibit us from sending too much (usually its 1MSS).
15042 struct ip *ip = NULL;
15043 struct udphdr *udp = NULL;
15044 struct tcphdr *th = NULL;
15045 struct mbuf *m = NULL;
15048 struct tcp_log_buffer *lgb;
15049 #ifdef TCP_ACCOUNTING
15055 u_char opt[TCP_MAXOLEN];
15056 uint32_t hdrlen, optlen;
15057 int32_t slot, segsiz, max_val, tso = 0, error, flags, ulen = 0;
15059 uint32_t if_hw_tsomaxsegcount = 0, startseq;
15060 uint32_t if_hw_tsomaxsegsize;
15062 struct ip6_hdr *ip6 = NULL;
15064 if (rack->r_is_v6) {
15065 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15066 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15070 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15071 hdrlen = sizeof(struct tcpiphdr);
15073 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15076 if (rsm->r_flags & RACK_TLP)
15078 startseq = rsm->r_start;
15079 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15080 inp = rack->rc_inp;
15082 flags = tcp_outflags[tp->t_state];
15083 if (flags & (TH_SYN|TH_RST)) {
15086 if (rsm->r_flags & RACK_HAS_FIN) {
15087 /* We can't send a FIN here */
15090 if (flags & TH_FIN) {
15091 /* We never send a FIN */
15094 if (tp->t_flags & TF_RCVD_TSTMP) {
15095 to.to_tsval = ms_cts + tp->ts_offset;
15096 to.to_tsecr = tp->ts_recent;
15097 to.to_flags = TOF_TS;
15099 optlen = tcp_addoptions(&to, opt);
15101 udp = rack->r_ctl.fsb.udp;
15103 hdrlen += sizeof(struct udphdr);
15104 if (rack->r_ctl.rc_pace_max_segs)
15105 max_val = rack->r_ctl.rc_pace_max_segs;
15106 else if (rack->rc_user_set_max_segs)
15107 max_val = rack->rc_user_set_max_segs * segsiz;
15110 if ((tp->t_flags & TF_TSO) &&
15116 if (MHLEN < hdrlen + max_linkhdr)
15117 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15120 m = m_gethdr(M_NOWAIT, MT_DATA);
15123 m->m_data += max_linkhdr;
15125 th = rack->r_ctl.fsb.th;
15126 /* Establish the len to send */
15129 if ((tso) && (len + optlen > tp->t_maxseg)) {
15130 uint32_t if_hw_tsomax;
15133 /* extract TSO information */
15134 if_hw_tsomax = tp->t_tsomax;
15135 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15136 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15138 * Check if we should limit by maximum payload
15141 if (if_hw_tsomax != 0) {
15142 /* compute maximum TSO length */
15143 max_len = (if_hw_tsomax - hdrlen -
15145 if (max_len <= 0) {
15147 } else if (len > max_len) {
15151 if (len <= segsiz) {
15153 * In case there are too many small fragments don't
15161 if ((tso == 0) && (len > segsiz))
15163 us_cts = tcp_get_usecs(tv);
15165 (len <= MHLEN - hdrlen - max_linkhdr)) {
15168 th->th_seq = htonl(rsm->r_start);
15169 th->th_ack = htonl(tp->rcv_nxt);
15170 if(rsm->r_flags & RACK_HAD_PUSH)
15172 th->th_flags = flags;
15173 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15174 if (th->th_win == 0) {
15175 tp->t_sndzerowin++;
15176 tp->t_flags |= TF_RXWIN0SENT;
15178 tp->t_flags &= ~TF_RXWIN0SENT;
15179 if (rsm->r_flags & RACK_TLP) {
15181 * TLP should not count in retran count, but
15184 counter_u64_add(rack_tlp_retran, 1);
15185 counter_u64_add(rack_tlp_retran_bytes, len);
15187 tp->t_sndrexmitpack++;
15188 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
15189 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
15192 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
15195 if (rsm->m == NULL)
15197 if (rsm->orig_m_len != rsm->m->m_len) {
15198 /* Fix up the orig_m_len and possibly the mbuf offset */
15199 rack_adjust_orig_mlen(rsm);
15201 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize);
15202 if (len <= segsiz) {
15204 * Must have ran out of mbufs for the copy
15205 * shorten it to no longer need tso. Lets
15206 * not put on sendalot since we are low on
15211 if ((m->m_next == NULL) || (len <= 0)){
15216 ulen = hdrlen + len - sizeof(struct ip6_hdr);
15218 ulen = hdrlen + len - sizeof(struct ip);
15219 udp->uh_ulen = htons(ulen);
15221 m->m_pkthdr.rcvif = (struct ifnet *)0;
15222 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
15224 if (rack->r_is_v6) {
15226 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15227 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15228 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15229 th->th_sum = htons(0);
15230 UDPSTAT_INC(udps_opackets);
15232 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15233 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15234 th->th_sum = in6_cksum_pseudo(ip6,
15235 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15240 #if defined(INET6) && defined(INET)
15246 m->m_pkthdr.csum_flags = CSUM_UDP;
15247 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15248 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15249 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15250 th->th_sum = htons(0);
15251 UDPSTAT_INC(udps_opackets);
15253 m->m_pkthdr.csum_flags = CSUM_TCP;
15254 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15255 th->th_sum = in_pseudo(ip->ip_src.s_addr,
15256 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15257 IPPROTO_TCP + len + optlen));
15259 /* IP version must be set here for ipv4/ipv6 checking later */
15260 KASSERT(ip->ip_v == IPVERSION,
15261 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
15265 KASSERT(len > tp->t_maxseg - optlen,
15266 ("%s: len <= tso_segsz tp:%p", __func__, tp));
15267 m->m_pkthdr.csum_flags |= CSUM_TSO;
15268 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15271 if (rack->r_is_v6) {
15272 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15273 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15274 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15275 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15277 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15280 #if defined(INET) && defined(INET6)
15285 ip->ip_len = htons(m->m_pkthdr.len);
15286 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15287 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15288 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15289 if (tp->t_port == 0 || len < V_tcp_minmss) {
15290 ip->ip_off |= htons(IP_DF);
15293 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15297 /* Time to copy in our header */
15298 cpto = mtod(m, uint8_t *);
15299 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
15300 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
15302 bcopy(opt, th + 1, optlen);
15303 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
15305 th->th_off = sizeof(struct tcphdr) >> 2;
15307 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
15308 union tcp_log_stackspecific log;
15310 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15311 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
15312 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
15313 if (rack->rack_no_prr)
15314 log.u_bbr.flex1 = 0;
15316 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15317 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
15318 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
15319 log.u_bbr.flex4 = max_val;
15320 log.u_bbr.flex5 = 0;
15321 /* Save off the early/late values */
15322 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15323 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
15324 log.u_bbr.bw_inuse = rack_get_bw(rack);
15325 log.u_bbr.flex8 = 1;
15326 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15327 log.u_bbr.flex7 = 55;
15328 log.u_bbr.pkts_out = tp->t_maxseg;
15329 log.u_bbr.timeStamp = cts;
15330 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15331 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
15332 log.u_bbr.delivered = 0;
15333 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15334 len, &log, false, NULL, NULL, 0, tv);
15338 if (rack->r_is_v6) {
15339 error = ip6_output(m, NULL,
15341 0, NULL, NULL, inp);
15344 #if defined(INET) && defined(INET6)
15349 error = ip_output(m, NULL,
15356 lgb->tlb_errno = error;
15362 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
15363 rsm, RACK_SENT_FP, rsm->m, rsm->soff);
15364 if (doing_tlp && (rack->fast_rsm_hack == 0)) {
15365 rack->rc_tlp_in_progress = 1;
15366 rack->r_ctl.rc_tlp_cnt_out++;
15368 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
15369 rack->forced_ack = 0; /* If we send something zap the FA flag */
15370 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
15371 rack->r_ctl.retran_during_recovery += len;
15375 idx = (len / segsiz) + 3;
15376 if (idx >= TCP_MSS_ACCT_ATIMER)
15377 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
15379 counter_u64_add(rack_out_size[idx], 1);
15381 if (tp->t_rtttime == 0) {
15382 tp->t_rtttime = ticks;
15383 tp->t_rtseq = startseq;
15384 KMOD_TCPSTAT_INC(tcps_segstimed);
15386 counter_u64_add(rack_fto_rsm_send, 1);
15387 if (error && (error == ENOBUFS)) {
15388 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
15389 if (rack->rc_enobuf < 0x7f)
15391 if (slot < (10 * HPTS_USEC_IN_MSEC))
15392 slot = 10 * HPTS_USEC_IN_MSEC;
15394 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz);
15396 (rack->rc_always_pace == 0) ||
15397 (rack->r_rr_config == 1)) {
15399 * We have no pacing set or we
15400 * are using old-style rack or
15401 * we are overriden to use the old 1ms pacing.
15403 slot = rack->r_ctl.rc_min_to;
15405 rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
15406 if (rack->r_must_retran) {
15407 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
15408 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
15410 * We have retransmitted all we need.
15412 rack->r_must_retran = 0;
15413 rack->r_ctl.rc_out_at_rto = 0;
15416 #ifdef TCP_ACCOUNTING
15417 crtsc = get_cyclecount();
15418 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15419 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
15421 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
15422 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15423 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
15425 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
15426 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15427 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
15429 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz));
15440 rack_sndbuf_autoscale(struct tcp_rack *rack)
15443 * Automatic sizing of send socket buffer. Often the send buffer
15444 * size is not optimally adjusted to the actual network conditions
15445 * at hand (delay bandwidth product). Setting the buffer size too
15446 * small limits throughput on links with high bandwidth and high
15447 * delay (eg. trans-continental/oceanic links). Setting the
15448 * buffer size too big consumes too much real kernel memory,
15449 * especially with many connections on busy servers.
15451 * The criteria to step up the send buffer one notch are:
15452 * 1. receive window of remote host is larger than send buffer
15453 * (with a fudge factor of 5/4th);
15454 * 2. send buffer is filled to 7/8th with data (so we actually
15455 * have data to make use of it);
15456 * 3. send buffer fill has not hit maximal automatic size;
15457 * 4. our send window (slow start and cogestion controlled) is
15458 * larger than sent but unacknowledged data in send buffer.
15460 * Note that the rack version moves things much faster since
15461 * we want to avoid hitting cache lines in the rack_fast_output()
15462 * path so this is called much less often and thus moves
15463 * the SB forward by a percentage.
15467 uint32_t sendwin, scaleup;
15470 so = rack->rc_inp->inp_socket;
15471 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
15472 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
15473 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
15474 sbused(&so->so_snd) >=
15475 (so->so_snd.sb_hiwat / 8 * 7) &&
15476 sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
15477 sendwin >= (sbused(&so->so_snd) -
15478 (tp->snd_nxt - tp->snd_una))) {
15479 if (rack_autosndbuf_inc)
15480 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
15482 scaleup = V_tcp_autosndbuf_inc;
15483 if (scaleup < V_tcp_autosndbuf_inc)
15484 scaleup = V_tcp_autosndbuf_inc;
15485 scaleup += so->so_snd.sb_hiwat;
15486 if (scaleup > V_tcp_autosndbuf_max)
15487 scaleup = V_tcp_autosndbuf_max;
15488 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread))
15489 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
15495 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
15496 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
15499 * Enter to do fast output. We are given that the sched_pin is
15500 * in place (if accounting is compiled in) and the cycle count taken
15501 * at entry is in place in ts_val. The idea here is that
15502 * we know how many more bytes needs to be sent (presumably either
15503 * during pacing or to fill the cwnd and that was greater than
15504 * the max-burst). We have how much to send and all the info we
15505 * need to just send.
15507 struct ip *ip = NULL;
15508 struct udphdr *udp = NULL;
15509 struct tcphdr *th = NULL;
15510 struct mbuf *m, *s_mb;
15513 struct tcp_log_buffer *lgb;
15514 #ifdef TCP_ACCOUNTING
15518 u_char opt[TCP_MAXOLEN];
15519 uint32_t hdrlen, optlen;
15521 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, flags, ulen = 0;
15522 uint32_t us_cts, s_soff;
15523 uint32_t if_hw_tsomaxsegcount = 0, startseq;
15524 uint32_t if_hw_tsomaxsegsize;
15525 uint16_t add_flag = RACK_SENT_FP;
15527 struct ip6_hdr *ip6 = NULL;
15529 if (rack->r_is_v6) {
15530 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
15531 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
15535 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
15536 hdrlen = sizeof(struct tcpiphdr);
15538 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
15542 startseq = tp->snd_max;
15543 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
15544 inp = rack->rc_inp;
15545 len = rack->r_ctl.fsb.left_to_send;
15547 flags = rack->r_ctl.fsb.tcp_flags;
15548 if (tp->t_flags & TF_RCVD_TSTMP) {
15549 to.to_tsval = ms_cts + tp->ts_offset;
15550 to.to_tsecr = tp->ts_recent;
15551 to.to_flags = TOF_TS;
15553 optlen = tcp_addoptions(&to, opt);
15555 udp = rack->r_ctl.fsb.udp;
15557 hdrlen += sizeof(struct udphdr);
15558 if (rack->r_ctl.rc_pace_max_segs)
15559 max_val = rack->r_ctl.rc_pace_max_segs;
15560 else if (rack->rc_user_set_max_segs)
15561 max_val = rack->rc_user_set_max_segs * segsiz;
15564 if ((tp->t_flags & TF_TSO) &&
15571 if (MHLEN < hdrlen + max_linkhdr)
15572 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15575 m = m_gethdr(M_NOWAIT, MT_DATA);
15578 m->m_data += max_linkhdr;
15580 th = rack->r_ctl.fsb.th;
15581 /* Establish the len to send */
15584 if ((tso) && (len + optlen > tp->t_maxseg)) {
15585 uint32_t if_hw_tsomax;
15588 /* extract TSO information */
15589 if_hw_tsomax = tp->t_tsomax;
15590 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
15591 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
15593 * Check if we should limit by maximum payload
15596 if (if_hw_tsomax != 0) {
15597 /* compute maximum TSO length */
15598 max_len = (if_hw_tsomax - hdrlen -
15600 if (max_len <= 0) {
15602 } else if (len > max_len) {
15606 if (len <= segsiz) {
15608 * In case there are too many small fragments don't
15616 if ((tso == 0) && (len > segsiz))
15618 us_cts = tcp_get_usecs(tv);
15620 (len <= MHLEN - hdrlen - max_linkhdr)) {
15623 sb_offset = tp->snd_max - tp->snd_una;
15624 th->th_seq = htonl(tp->snd_max);
15625 th->th_ack = htonl(tp->rcv_nxt);
15626 th->th_flags = flags;
15627 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
15628 if (th->th_win == 0) {
15629 tp->t_sndzerowin++;
15630 tp->t_flags |= TF_RXWIN0SENT;
15632 tp->t_flags &= ~TF_RXWIN0SENT;
15633 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
15634 KMOD_TCPSTAT_INC(tcps_sndpack);
15635 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
15637 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
15640 if (rack->r_ctl.fsb.m == NULL)
15643 /* s_mb and s_soff are saved for rack_log_output */
15644 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, &s_mb, &s_soff);
15645 if (len <= segsiz) {
15647 * Must have ran out of mbufs for the copy
15648 * shorten it to no longer need tso. Lets
15649 * not put on sendalot since we are low on
15654 if (rack->r_ctl.fsb.rfo_apply_push &&
15655 (len == rack->r_ctl.fsb.left_to_send)) {
15656 th->th_flags |= TH_PUSH;
15657 add_flag |= RACK_HAD_PUSH;
15659 if ((m->m_next == NULL) || (len <= 0)){
15664 ulen = hdrlen + len - sizeof(struct ip6_hdr);
15666 ulen = hdrlen + len - sizeof(struct ip);
15667 udp->uh_ulen = htons(ulen);
15669 m->m_pkthdr.rcvif = (struct ifnet *)0;
15670 if (tp->t_state == TCPS_ESTABLISHED &&
15671 (tp->t_flags2 & TF2_ECN_PERMIT)) {
15673 * If the peer has ECN, mark data packets with ECN capable
15674 * transmission (ECT). Ignore pure ack packets,
15677 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max)) {
15680 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
15683 ip->ip_tos |= IPTOS_ECN_ECT0;
15684 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
15686 * Reply with proper ECN notifications.
15687 * Only set CWR on new data segments.
15689 if (tp->t_flags2 & TF2_ECN_SND_CWR) {
15691 tp->t_flags2 &= ~TF2_ECN_SND_CWR;
15694 if (tp->t_flags2 & TF2_ECN_SND_ECE)
15697 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
15699 if (rack->r_is_v6) {
15701 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
15702 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15703 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
15704 th->th_sum = htons(0);
15705 UDPSTAT_INC(udps_opackets);
15707 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
15708 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15709 th->th_sum = in6_cksum_pseudo(ip6,
15710 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
15715 #if defined(INET6) && defined(INET)
15721 m->m_pkthdr.csum_flags = CSUM_UDP;
15722 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
15723 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
15724 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
15725 th->th_sum = htons(0);
15726 UDPSTAT_INC(udps_opackets);
15728 m->m_pkthdr.csum_flags = CSUM_TCP;
15729 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
15730 th->th_sum = in_pseudo(ip->ip_src.s_addr,
15731 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
15732 IPPROTO_TCP + len + optlen));
15734 /* IP version must be set here for ipv4/ipv6 checking later */
15735 KASSERT(ip->ip_v == IPVERSION,
15736 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
15740 KASSERT(len > tp->t_maxseg - optlen,
15741 ("%s: len <= tso_segsz tp:%p", __func__, tp));
15742 m->m_pkthdr.csum_flags |= CSUM_TSO;
15743 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
15746 if (rack->r_is_v6) {
15747 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
15748 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
15749 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
15750 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15752 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15755 #if defined(INET) && defined(INET6)
15760 ip->ip_len = htons(m->m_pkthdr.len);
15761 ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
15762 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
15763 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
15764 if (tp->t_port == 0 || len < V_tcp_minmss) {
15765 ip->ip_off |= htons(IP_DF);
15768 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
15772 /* Time to copy in our header */
15773 cpto = mtod(m, uint8_t *);
15774 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
15775 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
15777 bcopy(opt, th + 1, optlen);
15778 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
15780 th->th_off = sizeof(struct tcphdr) >> 2;
15782 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
15783 union tcp_log_stackspecific log;
15785 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
15786 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
15787 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
15788 if (rack->rack_no_prr)
15789 log.u_bbr.flex1 = 0;
15791 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
15792 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
15793 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
15794 log.u_bbr.flex4 = max_val;
15795 log.u_bbr.flex5 = 0;
15796 /* Save off the early/late values */
15797 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
15798 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
15799 log.u_bbr.bw_inuse = rack_get_bw(rack);
15800 log.u_bbr.flex8 = 0;
15801 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
15802 log.u_bbr.flex7 = 44;
15803 log.u_bbr.pkts_out = tp->t_maxseg;
15804 log.u_bbr.timeStamp = cts;
15805 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
15806 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
15807 log.u_bbr.delivered = 0;
15808 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
15809 len, &log, false, NULL, NULL, 0, tv);
15813 if (rack->r_is_v6) {
15814 error = ip6_output(m, NULL,
15816 0, NULL, NULL, inp);
15819 #if defined(INET) && defined(INET6)
15824 error = ip_output(m, NULL,
15830 lgb->tlb_errno = error;
15838 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
15839 NULL, add_flag, s_mb, s_soff);
15841 if (tp->snd_una == tp->snd_max) {
15842 rack->r_ctl.rc_tlp_rxt_last_time = cts;
15843 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
15844 tp->t_acktime = ticks;
15846 rack->forced_ack = 0; /* If we send something zap the FA flag */
15848 if ((tp->t_flags & TF_GPUTINPROG) == 0)
15849 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
15850 tp->snd_max += len;
15851 tp->snd_nxt = tp->snd_max;
15855 idx = (len / segsiz) + 3;
15856 if (idx >= TCP_MSS_ACCT_ATIMER)
15857 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
15859 counter_u64_add(rack_out_size[idx], 1);
15861 if (len <= rack->r_ctl.fsb.left_to_send)
15862 rack->r_ctl.fsb.left_to_send -= len;
15864 rack->r_ctl.fsb.left_to_send = 0;
15865 if (rack->r_ctl.fsb.left_to_send < segsiz) {
15866 rack->r_fast_output = 0;
15867 rack->r_ctl.fsb.left_to_send = 0;
15868 /* At the end of fast_output scale up the sb */
15869 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd);
15870 rack_sndbuf_autoscale(rack);
15871 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd);
15873 if (tp->t_rtttime == 0) {
15874 tp->t_rtttime = ticks;
15875 tp->t_rtseq = startseq;
15876 KMOD_TCPSTAT_INC(tcps_segstimed);
15878 if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
15883 th = rack->r_ctl.fsb.th;
15887 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
15888 counter_u64_add(rack_fto_send, 1);
15889 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz);
15890 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
15891 #ifdef TCP_ACCOUNTING
15892 crtsc = get_cyclecount();
15893 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15894 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
15896 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru);
15897 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15898 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
15900 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
15901 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
15902 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
15904 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz));
15911 rack->r_fast_output = 0;
15916 rack_output(struct tcpcb *tp)
15920 uint32_t sb_offset, s_moff = 0;
15921 int32_t len, flags, error = 0;
15922 struct mbuf *m, *s_mb = NULL;
15924 uint32_t if_hw_tsomaxsegcount = 0;
15925 uint32_t if_hw_tsomaxsegsize;
15926 int32_t segsiz, minseg;
15927 long tot_len_this_send = 0;
15929 struct ip *ip = NULL;
15932 struct ipovly *ipov = NULL;
15934 struct udphdr *udp = NULL;
15935 struct tcp_rack *rack;
15939 uint8_t wanted_cookie = 0;
15940 u_char opt[TCP_MAXOLEN];
15941 unsigned ipoptlen, optlen, hdrlen, ulen=0;
15944 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
15945 unsigned ipsec_optlen = 0;
15948 int32_t idle, sendalot;
15949 int32_t sub_from_prr = 0;
15950 volatile int32_t sack_rxmit;
15951 struct rack_sendmap *rsm = NULL;
15955 int32_t sup_rack = 0;
15956 uint32_t cts, ms_cts, delayed, early;
15957 uint16_t add_flag = RACK_SENT_SP;
15958 uint8_t hpts_calling, doing_tlp = 0;
15959 uint32_t cwnd_to_use, pace_max_seg;
15960 int32_t do_a_prefetch = 0;
15961 int32_t prefetch_rsm = 0;
15962 int32_t orig_len = 0;
15964 int32_t prefetch_so_done = 0;
15965 struct tcp_log_buffer *lgb;
15967 struct sockbuf *sb;
15968 uint64_t ts_val = 0;
15969 #ifdef TCP_ACCOUNTING
15973 struct ip6_hdr *ip6 = NULL;
15976 uint8_t filled_all = 0;
15977 bool hw_tls = false;
15979 /* setup and take the cache hits here */
15980 rack = (struct tcp_rack *)tp->t_fb_ptr;
15981 #ifdef TCP_ACCOUNTING
15983 ts_val = get_cyclecount();
15985 hpts_calling = rack->rc_inp->inp_hpts_calls;
15986 NET_EPOCH_ASSERT();
15987 INP_WLOCK_ASSERT(rack->rc_inp);
15989 if (tp->t_flags & TF_TOE) {
15990 #ifdef TCP_ACCOUNTING
15993 return (tcp_offload_output(tp));
15997 * For TFO connections in SYN_RECEIVED, only allow the initial
15998 * SYN|ACK and those sent by the retransmit timer.
16000 if (IS_FASTOPEN(tp->t_flags) &&
16001 (tp->t_state == TCPS_SYN_RECEIVED) &&
16002 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
16003 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */
16004 #ifdef TCP_ACCOUNTING
16010 if (rack->r_state) {
16011 /* Use the cache line loaded if possible */
16012 isipv6 = rack->r_is_v6;
16014 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
16018 cts = tcp_get_usecs(&tv);
16019 ms_cts = tcp_tv_to_mssectick(&tv);
16020 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
16021 rack->rc_inp->inp_in_hpts) {
16023 * We are on the hpts for some timer but not hptsi output.
16024 * Remove from the hpts unconditionally.
16026 rack_timer_cancel(tp, rack, cts, __LINE__);
16028 /* Are we pacing and late? */
16029 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16030 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
16031 /* We are delayed */
16032 delayed = cts - rack->r_ctl.rc_last_output_to;
16036 /* Do the timers, which may override the pacer */
16037 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
16038 if (rack_process_timers(tp, rack, cts, hpts_calling)) {
16039 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
16040 #ifdef TCP_ACCOUNTING
16046 if (rack->rc_in_persist) {
16047 if (rack->rc_inp->inp_in_hpts == 0) {
16048 /* Timer is not running */
16049 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
16051 #ifdef TCP_ACCOUNTING
16056 if ((rack->r_timer_override) ||
16057 (rack->rc_ack_can_sendout_data) ||
16059 (tp->t_state < TCPS_ESTABLISHED)) {
16060 rack->rc_ack_can_sendout_data = 0;
16061 if (rack->rc_inp->inp_in_hpts)
16062 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
16063 } else if (rack->rc_inp->inp_in_hpts) {
16065 * On the hpts you can't pass even if ACKNOW is on, we will
16066 * when the hpts fires.
16068 #ifdef TCP_ACCOUNTING
16069 crtsc = get_cyclecount();
16070 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16071 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
16073 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val));
16074 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
16075 tp->tcp_cnt_counters[SND_BLOCKED]++;
16077 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1);
16080 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
16083 rack->rc_inp->inp_hpts_calls = 0;
16084 /* Finish out both pacing early and late accounting */
16085 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
16086 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
16087 early = rack->r_ctl.rc_last_output_to - cts;
16091 rack->r_ctl.rc_agg_delayed += delayed;
16093 } else if (early) {
16094 rack->r_ctl.rc_agg_early += early;
16097 /* Now that early/late accounting is done turn off the flag */
16098 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
16099 rack->r_wanted_output = 0;
16100 rack->r_timer_override = 0;
16101 if ((tp->t_state != rack->r_state) &&
16102 TCPS_HAVEESTABLISHED(tp->t_state)) {
16103 rack_set_state(tp, rack);
16105 if ((rack->r_fast_output) &&
16106 (tp->rcv_numsacks == 0)) {
16110 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
16114 inp = rack->rc_inp;
16115 so = inp->inp_socket;
16120 inp = rack->rc_inp;
16122 * For TFO connections in SYN_SENT or SYN_RECEIVED,
16123 * only allow the initial SYN or SYN|ACK and those sent
16124 * by the retransmit timer.
16126 if (IS_FASTOPEN(tp->t_flags) &&
16127 ((tp->t_state == TCPS_SYN_RECEIVED) ||
16128 (tp->t_state == TCPS_SYN_SENT)) &&
16129 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
16130 (tp->t_rxtshift == 0)) { /* not a retransmit */
16131 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16132 so = inp->inp_socket;
16134 goto just_return_nolock;
16137 * Determine length of data that should be transmitted, and flags
16138 * that will be used. If there is some data or critical controls
16139 * (SYN, RST) to send, then transmit; otherwise, investigate
16142 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
16143 if (tp->t_idle_reduce) {
16144 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
16145 rack_cc_after_idle(rack, tp);
16147 tp->t_flags &= ~TF_LASTIDLE;
16149 if (tp->t_flags & TF_MORETOCOME) {
16150 tp->t_flags |= TF_LASTIDLE;
16154 if ((tp->snd_una == tp->snd_max) &&
16155 rack->r_ctl.rc_went_idle_time &&
16156 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) {
16157 idle = cts - rack->r_ctl.rc_went_idle_time;
16158 if (idle > rack_min_probertt_hold) {
16159 /* Count as a probe rtt */
16160 if (rack->in_probe_rtt == 0) {
16161 rack->r_ctl.rc_lower_rtt_us_cts = cts;
16162 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
16163 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
16164 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
16166 rack_exit_probertt(rack, cts);
16171 if (rack_use_fsb && (rack->r_fsb_inited == 0))
16172 rack_init_fsb_block(tp, rack);
16175 * If we've recently taken a timeout, snd_max will be greater than
16176 * snd_nxt. There may be SACK information that allows us to avoid
16177 * resending already delivered data. Adjust snd_nxt accordingly.
16180 cts = tcp_get_usecs(&tv);
16181 ms_cts = tcp_tv_to_mssectick(&tv);
16184 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
16186 if (rack->r_ctl.rc_pace_max_segs == 0)
16187 pace_max_seg = rack->rc_user_set_max_segs * segsiz;
16189 pace_max_seg = rack->r_ctl.rc_pace_max_segs;
16190 sb_offset = tp->snd_max - tp->snd_una;
16191 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
16192 flags = tcp_outflags[tp->t_state];
16193 while (rack->rc_free_cnt < rack_free_cache) {
16194 rsm = rack_alloc(rack);
16196 if (inp->inp_hpts_calls)
16197 /* Retry in a ms */
16198 slot = (1 * HPTS_USEC_IN_MSEC);
16199 so = inp->inp_socket;
16201 goto just_return_nolock;
16203 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
16204 rack->rc_free_cnt++;
16207 if (inp->inp_hpts_calls)
16208 inp->inp_hpts_calls = 0;
16212 if (flags & TH_RST) {
16213 SOCKBUF_LOCK(&inp->inp_socket->so_snd);
16214 so = inp->inp_socket;
16218 if (rack->r_ctl.rc_resend) {
16219 /* Retransmit timer */
16220 rsm = rack->r_ctl.rc_resend;
16221 rack->r_ctl.rc_resend = NULL;
16222 rsm->r_flags &= ~RACK_TLP;
16223 len = rsm->r_end - rsm->r_start;
16226 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16227 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16228 __func__, __LINE__,
16229 rsm->r_start, tp->snd_una, tp, rack, rsm));
16230 sb_offset = rsm->r_start - tp->snd_una;
16233 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
16234 /* We have a retransmit that takes precedence */
16235 rsm->r_flags &= ~RACK_TLP;
16236 if ((!IN_FASTRECOVERY(tp->t_flags)) &&
16237 ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
16238 /* Enter recovery if not induced by a time-out */
16239 rack->r_ctl.rc_rsm_start = rsm->r_start;
16240 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
16241 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
16242 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una);
16245 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
16246 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
16247 tp, rack, rsm, rsm->r_start, tp->snd_una);
16250 len = rsm->r_end - rsm->r_start;
16251 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16252 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16253 __func__, __LINE__,
16254 rsm->r_start, tp->snd_una, tp, rack, rsm));
16255 sb_offset = rsm->r_start - tp->snd_una;
16261 KMOD_TCPSTAT_INC(tcps_sack_rexmits);
16262 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
16264 counter_u64_add(rack_rtm_prr_retran, 1);
16266 } else if (rack->r_ctl.rc_tlpsend) {
16267 /* Tail loss probe */
16273 * Check if we can do a TLP with a RACK'd packet
16274 * this can happen if we are not doing the rack
16275 * cheat and we skipped to a TLP and it
16278 rsm = rack->r_ctl.rc_tlpsend;
16279 rsm->r_flags |= RACK_TLP;
16281 rack->r_ctl.rc_tlpsend = NULL;
16283 tlen = rsm->r_end - rsm->r_start;
16286 tp->t_sndtlppack++;
16287 tp->t_sndtlpbyte += tlen;
16288 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
16289 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
16290 __func__, __LINE__,
16291 rsm->r_start, tp->snd_una, tp, rack, rsm));
16292 sb_offset = rsm->r_start - tp->snd_una;
16293 cwin = min(tp->snd_wnd, tlen);
16296 if (rack->r_must_retran &&
16299 * Non-Sack and we had a RTO or MTU change, we
16300 * need to retransmit until we reach
16301 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto).
16303 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
16304 int sendwin, flight;
16306 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
16307 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
16308 if (flight >= sendwin) {
16309 so = inp->inp_socket;
16311 goto just_return_nolock;
16313 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
16314 KASSERT(rsm != NULL, ("rsm is NULL rack:%p r_must_retran set", rack));
16317 rack->r_must_retran = 0;
16318 rack->r_ctl.rc_out_at_rto = 0;
16319 rack->r_must_retran = 0;
16320 so = inp->inp_socket;
16322 goto just_return_nolock;
16325 len = rsm->r_end - rsm->r_start;
16327 sb_offset = rsm->r_start - tp->snd_una;
16331 /* We must be done if there is nothing outstanding */
16332 rack->r_must_retran = 0;
16333 rack->r_ctl.rc_out_at_rto = 0;
16337 * Enforce a connection sendmap count limit if set
16338 * as long as we are not retransmiting.
16340 if ((rsm == NULL) &&
16341 (rack->do_detection == 0) &&
16342 (V_tcp_map_entries_limit > 0) &&
16343 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
16344 counter_u64_add(rack_to_alloc_limited, 1);
16345 if (!rack->alloc_limit_reported) {
16346 rack->alloc_limit_reported = 1;
16347 counter_u64_add(rack_alloc_limited_conns, 1);
16349 so = inp->inp_socket;
16351 goto just_return_nolock;
16353 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
16354 /* we are retransmitting the fin */
16358 * When retransmitting data do *not* include the
16359 * FIN. This could happen from a TLP probe.
16365 /* For debugging */
16366 rack->r_ctl.rc_rsm_at_retran = rsm;
16368 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo &&
16369 ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
16372 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len);
16376 so = inp->inp_socket;
16378 if (do_a_prefetch == 0) {
16379 kern_prefetch(sb, &do_a_prefetch);
16382 #ifdef NETFLIX_SHARED_CWND
16383 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
16384 rack->rack_enable_scwnd) {
16385 /* We are doing cwnd sharing */
16386 if (rack->gp_ready &&
16387 (rack->rack_attempted_scwnd == 0) &&
16388 (rack->r_ctl.rc_scw == NULL) &&
16390 /* The pcbid is in, lets make an attempt */
16391 counter_u64_add(rack_try_scwnd, 1);
16392 rack->rack_attempted_scwnd = 1;
16393 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
16394 &rack->r_ctl.rc_scw_index,
16397 if (rack->r_ctl.rc_scw &&
16398 (rack->rack_scwnd_is_idle == 1) &&
16399 sbavail(&so->so_snd)) {
16400 /* we are no longer out of data */
16401 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
16402 rack->rack_scwnd_is_idle = 0;
16404 if (rack->r_ctl.rc_scw) {
16405 /* First lets update and get the cwnd */
16406 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
16407 rack->r_ctl.rc_scw_index,
16408 tp->snd_cwnd, tp->snd_wnd, segsiz);
16413 * Get standard flags, and add SYN or FIN if requested by 'hidden'
16416 if (tp->t_flags & TF_NEEDFIN)
16418 if (tp->t_flags & TF_NEEDSYN)
16420 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
16422 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
16424 kern_prefetch(end_rsm, &prefetch_rsm);
16429 * If snd_nxt == snd_max and we have transmitted a FIN, the
16430 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
16431 * negative length. This can also occur when TCP opens up its
16432 * congestion window while receiving additional duplicate acks after
16433 * fast-retransmit because TCP will reset snd_nxt to snd_max after
16434 * the fast-retransmit.
16436 * In the normal retransmit-FIN-only case, however, snd_nxt will be
16437 * set to snd_una, the sb_offset will be 0, and the length may wind
16440 * If sack_rxmit is true we are retransmitting from the scoreboard
16441 * in which case len is already set.
16443 if ((sack_rxmit == 0) &&
16444 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
16447 avail = sbavail(sb);
16448 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
16449 sb_offset = tp->snd_nxt - tp->snd_una;
16452 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
16453 if (rack->r_ctl.rc_tlp_new_data) {
16454 /* TLP is forcing out new data */
16455 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
16456 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
16458 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
16459 if (tp->snd_wnd > sb_offset)
16460 len = tp->snd_wnd - sb_offset;
16464 len = rack->r_ctl.rc_tlp_new_data;
16466 rack->r_ctl.rc_tlp_new_data = 0;
16469 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
16471 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) {
16473 * For prr=off, we need to send only 1 MSS
16474 * at a time. We do this because another sack could
16475 * be arriving that causes us to send retransmits and
16476 * we don't want to be on a long pace due to a larger send
16477 * that keeps us from sending out the retransmit.
16482 uint32_t outstanding;
16484 * We are inside of a Fast recovery episode, this
16485 * is caused by a SACK or 3 dup acks. At this point
16486 * we have sent all the retransmissions and we rely
16487 * on PRR to dictate what we will send in the form of
16491 outstanding = tp->snd_max - tp->snd_una;
16492 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
16493 if (tp->snd_wnd > outstanding) {
16494 len = tp->snd_wnd - outstanding;
16495 /* Check to see if we have the data */
16496 if ((sb_offset + len) > avail) {
16497 /* It does not all fit */
16498 if (avail > sb_offset)
16499 len = avail - sb_offset;
16506 } else if (avail > sb_offset) {
16507 len = avail - sb_offset;
16512 if (len > rack->r_ctl.rc_prr_sndcnt) {
16513 len = rack->r_ctl.rc_prr_sndcnt;
16517 counter_u64_add(rack_rtm_prr_newdata, 1);
16520 if (len > segsiz) {
16522 * We should never send more than a MSS when
16523 * retransmitting or sending new data in prr
16524 * mode unless the override flag is on. Most
16525 * likely the PRR algorithm is not going to
16526 * let us send a lot as well :-)
16528 if (rack->r_ctl.rc_prr_sendalot == 0) {
16531 } else if (len < segsiz) {
16533 * Do we send any? The idea here is if the
16534 * send empty's the socket buffer we want to
16535 * do it. However if not then lets just wait
16536 * for our prr_sndcnt to get bigger.
16540 leftinsb = sbavail(sb) - sb_offset;
16541 if (leftinsb > len) {
16542 /* This send does not empty the sb */
16547 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
16549 * If you have not established
16550 * and are not doing FAST OPEN
16553 if ((sack_rxmit == 0) &&
16554 (!IS_FASTOPEN(tp->t_flags))){
16559 if (prefetch_so_done == 0) {
16560 kern_prefetch(so, &prefetch_so_done);
16561 prefetch_so_done = 1;
16564 * Lop off SYN bit if it has already been sent. However, if this is
16565 * SYN-SENT state and if segment contains data and if we don't know
16566 * that foreign host supports TAO, suppress sending segment.
16568 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
16569 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
16571 * When sending additional segments following a TFO SYN|ACK,
16572 * do not include the SYN bit.
16574 if (IS_FASTOPEN(tp->t_flags) &&
16575 (tp->t_state == TCPS_SYN_RECEIVED))
16579 * Be careful not to send data and/or FIN on SYN segments. This
16580 * measure is needed to prevent interoperability problems with not
16581 * fully conformant TCP implementations.
16583 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
16588 * On TFO sockets, ensure no data is sent in the following cases:
16590 * - When retransmitting SYN|ACK on a passively-created socket
16592 * - When retransmitting SYN on an actively created socket
16594 * - When sending a zero-length cookie (cookie request) on an
16595 * actively created socket
16597 * - When the socket is in the CLOSED state (RST is being sent)
16599 if (IS_FASTOPEN(tp->t_flags) &&
16600 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
16601 ((tp->t_state == TCPS_SYN_SENT) &&
16602 (tp->t_tfo_client_cookie_len == 0)) ||
16603 (flags & TH_RST))) {
16607 /* Without fast-open there should never be data sent on a SYN */
16608 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
16609 tp->snd_nxt = tp->iss;
16612 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
16613 /* We only send 1 MSS if we have a DSACK block */
16614 add_flag |= RACK_SENT_W_DSACK;
16620 * If FIN has been sent but not acked, but we haven't been
16621 * called to retransmit, len will be < 0. Otherwise, window
16622 * shrank after we sent into it. If window shrank to 0,
16623 * cancel pending retransmit, pull snd_nxt back to (closed)
16624 * window, and set the persist timer if it isn't already
16625 * going. If the window didn't close completely, just wait
16628 * We also do a general check here to ensure that we will
16629 * set the persist timer when we have data to send, but a
16630 * 0-byte window. This makes sure the persist timer is set
16631 * even if the packet hits one of the "goto send" lines
16635 if ((tp->snd_wnd == 0) &&
16636 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
16637 (tp->snd_una == tp->snd_max) &&
16638 (sb_offset < (int)sbavail(sb))) {
16639 rack_enter_persist(tp, rack, cts);
16641 } else if ((rsm == NULL) &&
16642 (doing_tlp == 0) &&
16643 (len < pace_max_seg)) {
16645 * We are not sending a maximum sized segment for
16646 * some reason. Should we not send anything (think
16647 * sws or persists)?
16649 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
16650 (TCPS_HAVEESTABLISHED(tp->t_state)) &&
16652 (len < (int)(sbavail(sb) - sb_offset))) {
16654 * Here the rwnd is less than
16655 * the minimum pacing size, this is not a retransmit,
16656 * we are established and
16657 * the send is not the last in the socket buffer
16658 * we send nothing, and we may enter persists
16659 * if nothing is outstanding.
16662 if (tp->snd_max == tp->snd_una) {
16664 * Nothing out we can
16665 * go into persists.
16667 rack_enter_persist(tp, rack, cts);
16669 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
16670 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
16671 (len < (int)(sbavail(sb) - sb_offset)) &&
16674 * Here we are not retransmitting, and
16675 * the cwnd is not so small that we could
16676 * not send at least a min size (rxt timer
16677 * not having gone off), We have 2 segments or
16678 * more already in flight, its not the tail end
16679 * of the socket buffer and the cwnd is blocking
16680 * us from sending out a minimum pacing segment size.
16681 * Lets not send anything.
16684 } else if (((tp->snd_wnd - ctf_outstanding(tp)) <
16685 min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
16686 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
16687 (len < (int)(sbavail(sb) - sb_offset)) &&
16688 (TCPS_HAVEESTABLISHED(tp->t_state))) {
16690 * Here we have a send window but we have
16691 * filled it up and we can't send another pacing segment.
16692 * We also have in flight more than 2 segments
16693 * and we are not completing the sb i.e. we allow
16694 * the last bytes of the sb to go out even if
16695 * its not a full pacing segment.
16698 } else if ((rack->r_ctl.crte != NULL) &&
16699 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
16700 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
16701 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
16702 (len < (int)(sbavail(sb) - sb_offset))) {
16704 * Here we are doing hardware pacing, this is not a TLP,
16705 * we are not sending a pace max segment size, there is rwnd
16706 * room to send at least N pace_max_seg, the cwnd is greater
16707 * than or equal to a full pacing segments plus 4 mss and we have 2 or
16708 * more segments in flight and its not the tail of the socket buffer.
16710 * We don't want to send instead we need to get more ack's in to
16711 * allow us to send a full pacing segment. Normally, if we are pacing
16712 * about the right speed, we should have finished our pacing
16713 * send as most of the acks have come back if we are at the
16714 * right rate. This is a bit fuzzy since return path delay
16715 * can delay the acks, which is why we want to make sure we
16716 * have cwnd space to have a bit more than a max pace segments in flight.
16718 * If we have not gotten our acks back we are pacing at too high a
16719 * rate delaying will not hurt and will bring our GP estimate down by
16720 * injecting the delay. If we don't do this we will send
16721 * 2 MSS out in response to the acks being clocked in which
16722 * defeats the point of hw-pacing (i.e. to help us get
16723 * larger TSO's out).
16730 /* len will be >= 0 after this point. */
16731 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
16732 rack_sndbuf_autoscale(rack);
16734 * Decide if we can use TCP Segmentation Offloading (if supported by
16737 * TSO may only be used if we are in a pure bulk sending state. The
16738 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
16739 * options prevent using TSO. With TSO the TCP header is the same
16740 * (except for the sequence number) for all generated packets. This
16741 * makes it impossible to transmit any options which vary per
16742 * generated segment or packet.
16744 * IPv4 handling has a clear separation of ip options and ip header
16745 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
16746 * the right thing below to provide length of just ip options and thus
16747 * checking for ipoptlen is enough to decide if ip options are present.
16750 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16752 * Pre-calculate here as we save another lookup into the darknesses
16753 * of IPsec that way and can actually decide if TSO is ok.
16756 if (isipv6 && IPSEC_ENABLED(ipv6))
16757 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
16763 if (IPSEC_ENABLED(ipv4))
16764 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
16768 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
16769 ipoptlen += ipsec_optlen;
16771 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
16772 (tp->t_port == 0) &&
16773 ((tp->t_flags & TF_SIGNATURE) == 0) &&
16774 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
16778 uint32_t outstanding;
16780 outstanding = tp->snd_max - tp->snd_una;
16781 if (tp->t_flags & TF_SENTFIN) {
16783 * If we sent a fin, snd_max is 1 higher than
16789 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
16792 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
16797 recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
16798 (long)TCP_MAXWIN << tp->rcv_scale);
16801 * Sender silly window avoidance. We transmit under the following
16802 * conditions when len is non-zero:
16804 * - We have a full segment (or more with TSO) - This is the last
16805 * buffer in a write()/send() and we are either idle or running
16806 * NODELAY - we've timed out (e.g. persist timer) - we have more
16807 * then 1/2 the maximum send window's worth of data (receiver may be
16808 * limited the window size) - we need to retransmit
16811 if (len >= segsiz) {
16815 * NOTE! on localhost connections an 'ack' from the remote
16816 * end may occur synchronously with the output and cause us
16817 * to flush a buffer queued with moretocome. XXX
16820 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
16821 (idle || (tp->t_flags & TF_NODELAY)) &&
16822 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
16823 (tp->t_flags & TF_NOPUSH) == 0) {
16827 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */
16831 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
16835 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
16843 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
16844 (ctf_outstanding(tp) < (segsiz * 2))) {
16846 * We have less than two MSS outstanding (delayed ack)
16847 * and our rwnd will not let us send a full sized
16848 * MSS. Lets go ahead and let this small segment
16849 * out because we want to try to have at least two
16850 * packets inflight to not be caught by delayed ack.
16857 * Sending of standalone window updates.
16859 * Window updates are important when we close our window due to a
16860 * full socket buffer and are opening it again after the application
16861 * reads data from it. Once the window has opened again and the
16862 * remote end starts to send again the ACK clock takes over and
16863 * provides the most current window information.
16865 * We must avoid the silly window syndrome whereas every read from
16866 * the receive buffer, no matter how small, causes a window update
16867 * to be sent. We also should avoid sending a flurry of window
16868 * updates when the socket buffer had queued a lot of data and the
16869 * application is doing small reads.
16871 * Prevent a flurry of pointless window updates by only sending an
16872 * update when we can increase the advertized window by more than
16873 * 1/4th of the socket buffer capacity. When the buffer is getting
16874 * full or is very small be more aggressive and send an update
16875 * whenever we can increase by two mss sized segments. In all other
16876 * situations the ACK's to new incoming data will carry further
16877 * window increases.
16879 * Don't send an independent window update if a delayed ACK is
16880 * pending (it will get piggy-backed on it) or the remote side
16881 * already has done a half-close and won't send more data. Skip
16882 * this if the connection is in T/TCP half-open state.
16884 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
16885 !(tp->t_flags & TF_DELACK) &&
16886 !TCPS_HAVERCVDFIN(tp->t_state)) {
16888 * "adv" is the amount we could increase the window, taking
16889 * into account that we are limited by TCP_MAXWIN <<
16896 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
16897 oldwin = (tp->rcv_adv - tp->rcv_nxt);
16901 /* We can't increase the window */
16908 * If the new window size ends up being the same as or less
16909 * than the old size when it is scaled, then don't force
16912 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
16915 if (adv >= (int32_t)(2 * segsiz) &&
16916 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
16917 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
16918 so->so_rcv.sb_hiwat <= 8 * segsiz)) {
16922 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
16930 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
16931 * is also a catch-all for the retransmit timer timeout case.
16933 if (tp->t_flags & TF_ACKNOW) {
16937 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
16942 * If our state indicates that FIN should be sent and we have not
16943 * yet done so, then we need to send.
16945 if ((flags & TH_FIN) &&
16946 (tp->snd_nxt == tp->snd_una)) {
16951 * No reason to send a segment, just return.
16954 SOCKBUF_UNLOCK(sb);
16955 just_return_nolock:
16957 int app_limited = CTF_JR_SENT_DATA;
16959 if (tot_len_this_send > 0) {
16960 /* Make sure snd_nxt is up to max */
16961 rack->r_ctl.fsb.recwin = recwin;
16962 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
16963 if ((error == 0) &&
16965 ((flags & (TH_SYN|TH_FIN)) == 0) &&
16967 (tp->snd_nxt == tp->snd_max) &&
16968 (tp->rcv_numsacks == 0) &&
16969 rack->r_fsb_inited &&
16970 TCPS_HAVEESTABLISHED(tp->t_state) &&
16971 (rack->r_must_retran == 0) &&
16972 ((tp->t_flags & TF_NEEDFIN) == 0) &&
16973 (len > 0) && (orig_len > 0) &&
16974 (orig_len > len) &&
16975 ((orig_len - len) >= segsiz) &&
16977 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
16978 /* We can send at least one more MSS using our fsb */
16980 rack->r_fast_output = 1;
16981 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
16982 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
16983 rack->r_ctl.fsb.tcp_flags = flags;
16984 rack->r_ctl.fsb.left_to_send = orig_len - len;
16985 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
16986 ("rack:%p left_to_send:%u sbavail:%u out:%u",
16987 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
16988 (tp->snd_max - tp->snd_una)));
16989 if (rack->r_ctl.fsb.left_to_send < segsiz)
16990 rack->r_fast_output = 0;
16992 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
16993 rack->r_ctl.fsb.rfo_apply_push = 1;
16995 rack->r_ctl.fsb.rfo_apply_push = 0;
16998 rack->r_fast_output = 0;
17001 rack_log_fsb(rack, tp, so, flags,
17002 ipoptlen, orig_len, len, 0,
17003 1, optlen, __LINE__, 1);
17004 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
17005 tp->snd_nxt = tp->snd_max;
17007 int end_window = 0;
17008 uint32_t seq = tp->gput_ack;
17010 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17013 * Mark the last sent that we just-returned (hinting
17014 * that delayed ack may play a role in any rtt measurement).
17016 rsm->r_just_ret = 1;
17018 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
17019 rack->r_ctl.rc_agg_delayed = 0;
17022 rack->r_ctl.rc_agg_early = 0;
17023 if ((ctf_outstanding(tp) +
17024 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
17025 minseg)) >= tp->snd_wnd) {
17026 /* We are limited by the rwnd */
17027 app_limited = CTF_JR_RWND_LIMITED;
17028 if (IN_FASTRECOVERY(tp->t_flags))
17029 rack->r_ctl.rc_prr_sndcnt = 0;
17030 } else if (ctf_outstanding(tp) >= sbavail(sb)) {
17031 /* We are limited by whats available -- app limited */
17032 app_limited = CTF_JR_APP_LIMITED;
17033 if (IN_FASTRECOVERY(tp->t_flags))
17034 rack->r_ctl.rc_prr_sndcnt = 0;
17035 } else if ((idle == 0) &&
17036 ((tp->t_flags & TF_NODELAY) == 0) &&
17037 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
17040 * No delay is not on and the
17041 * user is sending less than 1MSS. This
17042 * brings out SWS avoidance so we
17043 * don't send. Another app-limited case.
17045 app_limited = CTF_JR_APP_LIMITED;
17046 } else if (tp->t_flags & TF_NOPUSH) {
17048 * The user has requested no push of
17049 * the last segment and we are
17050 * at the last segment. Another app
17053 app_limited = CTF_JR_APP_LIMITED;
17054 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
17056 app_limited = CTF_JR_CWND_LIMITED;
17057 } else if (IN_FASTRECOVERY(tp->t_flags) &&
17058 (rack->rack_no_prr == 0) &&
17059 (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
17060 app_limited = CTF_JR_PRR;
17062 /* Now why here are we not sending? */
17065 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
17068 app_limited = CTF_JR_ASSESSING;
17071 * App limited in some fashion, for our pacing GP
17072 * measurements we don't want any gap (even cwnd).
17073 * Close down the measurement window.
17075 if (rack_cwnd_block_ends_measure &&
17076 ((app_limited == CTF_JR_CWND_LIMITED) ||
17077 (app_limited == CTF_JR_PRR))) {
17079 * The reason we are not sending is
17080 * the cwnd (or prr). We have been configured
17081 * to end the measurement window in
17085 } else if (rack_rwnd_block_ends_measure &&
17086 (app_limited == CTF_JR_RWND_LIMITED)) {
17088 * We are rwnd limited and have been
17089 * configured to end the measurement
17090 * window in this case.
17093 } else if (app_limited == CTF_JR_APP_LIMITED) {
17095 * A true application limited period, we have
17099 } else if (app_limited == CTF_JR_ASSESSING) {
17101 * In the assessing case we hit the end of
17102 * the if/else and had no known reason
17103 * This will panic us under invariants..
17105 * If we get this out in logs we need to
17106 * investagate which reason we missed.
17113 if ((tp->t_flags & TF_GPUTINPROG) &&
17114 SEQ_GT(tp->gput_ack, tp->snd_max)) {
17115 /* Mark the last packet has app limited */
17116 tp->gput_ack = tp->snd_max;
17119 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree);
17120 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
17121 if (rack->r_ctl.rc_app_limited_cnt == 0)
17122 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
17125 * Go out to the end app limited and mark
17126 * this new one as next and move the end_appl up
17129 if (rack->r_ctl.rc_end_appl)
17130 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
17131 rack->r_ctl.rc_end_appl = rsm;
17133 rsm->r_flags |= RACK_APP_LIMITED;
17134 rack->r_ctl.rc_app_limited_cnt++;
17137 rack_log_pacing_delay_calc(rack,
17138 rack->r_ctl.rc_app_limited_cnt, seq,
17139 tp->gput_ack, 0, 0, 4, __LINE__, NULL);
17143 /* set the rack tcb into the slot N */
17144 counter_u64_add(rack_paced_segments, 1);
17145 } else if (tot_len_this_send) {
17146 counter_u64_add(rack_unpaced_segments, 1);
17148 /* Check if we need to go into persists or not */
17149 if ((tp->snd_max == tp->snd_una) &&
17150 TCPS_HAVEESTABLISHED(tp->t_state) &&
17152 (sbavail(sb) > tp->snd_wnd) &&
17153 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
17154 /* Yes lets make sure to move to persist before timer-start */
17155 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime);
17157 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
17158 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
17160 #ifdef NETFLIX_SHARED_CWND
17161 if ((sbavail(sb) == 0) &&
17162 rack->r_ctl.rc_scw) {
17163 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
17164 rack->rack_scwnd_is_idle = 1;
17167 #ifdef TCP_ACCOUNTING
17168 if (tot_len_this_send > 0) {
17169 crtsc = get_cyclecount();
17170 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17171 tp->tcp_cnt_counters[SND_OUT_DATA]++;
17173 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
17174 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17175 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
17177 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val));
17178 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17179 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
17181 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz));
17183 crtsc = get_cyclecount();
17184 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17185 tp->tcp_cnt_counters[SND_LIMITED]++;
17187 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1);
17188 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17189 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
17191 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val));
17198 if (rsm || sack_rxmit)
17199 counter_u64_add(rack_nfto_resend, 1);
17201 counter_u64_add(rack_non_fto_send, 1);
17202 if ((flags & TH_FIN) &&
17205 * We do not transmit a FIN
17206 * with data outstanding. We
17207 * need to make it so all data
17212 /* Enforce stack imposed max seg size if we have one */
17213 if (rack->r_ctl.rc_pace_max_segs &&
17214 (len > rack->r_ctl.rc_pace_max_segs)) {
17216 len = rack->r_ctl.rc_pace_max_segs;
17218 SOCKBUF_LOCK_ASSERT(sb);
17221 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
17223 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
17226 * Before ESTABLISHED, force sending of initial options unless TCP
17227 * set not to do any options. NOTE: we assume that the IP/TCP header
17228 * plus TCP options always fit in a single mbuf, leaving room for a
17229 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
17230 * + optlen <= MCLBYTES
17235 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
17238 hdrlen = sizeof(struct tcpiphdr);
17241 * Compute options for segment. We only have to care about SYN and
17242 * established connection segments. Options for SYN-ACK segments
17243 * are handled in TCP syncache.
17246 if ((tp->t_flags & TF_NOOPT) == 0) {
17247 /* Maximum segment size. */
17248 if (flags & TH_SYN) {
17249 tp->snd_nxt = tp->iss;
17250 to.to_mss = tcp_mssopt(&inp->inp_inc);
17252 to.to_mss -= V_tcp_udp_tunneling_overhead;
17253 to.to_flags |= TOF_MSS;
17256 * On SYN or SYN|ACK transmits on TFO connections,
17257 * only include the TFO option if it is not a
17258 * retransmit, as the presence of the TFO option may
17259 * have caused the original SYN or SYN|ACK to have
17260 * been dropped by a middlebox.
17262 if (IS_FASTOPEN(tp->t_flags) &&
17263 (tp->t_rxtshift == 0)) {
17264 if (tp->t_state == TCPS_SYN_RECEIVED) {
17265 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
17267 (u_int8_t *)&tp->t_tfo_cookie.server;
17268 to.to_flags |= TOF_FASTOPEN;
17270 } else if (tp->t_state == TCPS_SYN_SENT) {
17272 tp->t_tfo_client_cookie_len;
17274 tp->t_tfo_cookie.client;
17275 to.to_flags |= TOF_FASTOPEN;
17278 * If we wind up having more data to
17279 * send with the SYN than can fit in
17280 * one segment, don't send any more
17281 * until the SYN|ACK comes back from
17288 /* Window scaling. */
17289 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
17290 to.to_wscale = tp->request_r_scale;
17291 to.to_flags |= TOF_SCALE;
17294 if ((tp->t_flags & TF_RCVD_TSTMP) ||
17295 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
17296 to.to_tsval = ms_cts + tp->ts_offset;
17297 to.to_tsecr = tp->ts_recent;
17298 to.to_flags |= TOF_TS;
17300 /* Set receive buffer autosizing timestamp. */
17301 if (tp->rfbuf_ts == 0 &&
17302 (so->so_rcv.sb_flags & SB_AUTOSIZE))
17303 tp->rfbuf_ts = tcp_ts_getticks();
17304 /* Selective ACK's. */
17305 if (tp->t_flags & TF_SACK_PERMIT) {
17306 if (flags & TH_SYN)
17307 to.to_flags |= TOF_SACKPERM;
17308 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
17309 tp->rcv_numsacks > 0) {
17310 to.to_flags |= TOF_SACK;
17311 to.to_nsacks = tp->rcv_numsacks;
17312 to.to_sacks = (u_char *)tp->sackblks;
17315 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
17316 /* TCP-MD5 (RFC2385). */
17317 if (tp->t_flags & TF_SIGNATURE)
17318 to.to_flags |= TOF_SIGNATURE;
17319 #endif /* TCP_SIGNATURE */
17321 /* Processing the options. */
17322 hdrlen += optlen = tcp_addoptions(&to, opt);
17324 * If we wanted a TFO option to be added, but it was unable
17325 * to fit, ensure no data is sent.
17327 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
17328 !(to.to_flags & TOF_FASTOPEN))
17332 if (V_tcp_udp_tunneling_port == 0) {
17333 /* The port was removed?? */
17334 SOCKBUF_UNLOCK(&so->so_snd);
17335 #ifdef TCP_ACCOUNTING
17336 crtsc = get_cyclecount();
17337 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17338 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
17340 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
17341 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
17342 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
17344 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
17347 return (EHOSTUNREACH);
17349 hdrlen += sizeof(struct udphdr);
17353 ipoptlen = ip6_optlen(tp->t_inpcb);
17356 if (tp->t_inpcb->inp_options)
17357 ipoptlen = tp->t_inpcb->inp_options->m_len -
17358 offsetof(struct ipoption, ipopt_list);
17361 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
17362 ipoptlen += ipsec_optlen;
17366 * Adjust data length if insertion of options will bump the packet
17367 * length beyond the t_maxseg length. Clear the FIN bit because we
17368 * cut off the tail of the segment.
17370 if (len + optlen + ipoptlen > tp->t_maxseg) {
17372 uint32_t if_hw_tsomax;
17376 /* extract TSO information */
17377 if_hw_tsomax = tp->t_tsomax;
17378 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
17379 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
17380 KASSERT(ipoptlen == 0,
17381 ("%s: TSO can't do IP options", __func__));
17384 * Check if we should limit by maximum payload
17387 if (if_hw_tsomax != 0) {
17388 /* compute maximum TSO length */
17389 max_len = (if_hw_tsomax - hdrlen -
17391 if (max_len <= 0) {
17393 } else if (len > max_len) {
17400 * Prevent the last segment from being fractional
17401 * unless the send sockbuf can be emptied:
17403 max_len = (tp->t_maxseg - optlen);
17404 if ((sb_offset + len) < sbavail(sb)) {
17405 moff = len % (u_int)max_len;
17412 * In case there are too many small fragments don't
17415 if (len <= segsiz) {
17420 * Send the FIN in a separate segment after the bulk
17421 * sending is done. We don't trust the TSO
17422 * implementations to clear the FIN flag on all but
17423 * the last segment.
17425 if (tp->t_flags & TF_NEEDFIN) {
17430 if (optlen + ipoptlen >= tp->t_maxseg) {
17432 * Since we don't have enough space to put
17433 * the IP header chain and the TCP header in
17434 * one packet as required by RFC 7112, don't
17435 * send it. Also ensure that at least one
17436 * byte of the payload can be put into the
17439 SOCKBUF_UNLOCK(&so->so_snd);
17444 len = tp->t_maxseg - optlen - ipoptlen;
17451 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
17452 ("%s: len > IP_MAXPACKET", __func__));
17455 if (max_linkhdr + hdrlen > MCLBYTES)
17457 if (max_linkhdr + hdrlen > MHLEN)
17459 panic("tcphdr too big");
17463 * This KASSERT is here to catch edge cases at a well defined place.
17464 * Before, those had triggered (random) panic conditions further
17467 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
17469 (flags & TH_FIN) &&
17472 * We have outstanding data, don't send a fin by itself!.
17477 * Grab a header mbuf, attaching a copy of data to be transmitted,
17478 * and initialize the header from the template for sends on this
17481 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0;
17486 if (rack->r_ctl.rc_pace_max_segs)
17487 max_val = rack->r_ctl.rc_pace_max_segs;
17488 else if (rack->rc_user_set_max_segs)
17489 max_val = rack->rc_user_set_max_segs * segsiz;
17493 * We allow a limit on sending with hptsi.
17495 if (len > max_val) {
17500 if (MHLEN < hdrlen + max_linkhdr)
17501 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
17504 m = m_gethdr(M_NOWAIT, MT_DATA);
17507 SOCKBUF_UNLOCK(sb);
17512 m->m_data += max_linkhdr;
17516 * Start the m_copy functions from the closest mbuf to the
17517 * sb_offset in the socket buffer chain.
17519 mb = sbsndptr_noadv(sb, sb_offset, &moff);
17522 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
17523 m_copydata(mb, moff, (int)len,
17524 mtod(m, caddr_t)+hdrlen);
17525 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
17526 sbsndptr_adv(sb, mb, len);
17529 struct sockbuf *msb;
17531 if (SEQ_LT(tp->snd_nxt, tp->snd_max))
17535 m->m_next = tcp_m_copym(
17537 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
17538 ((rsm == NULL) ? hw_tls : 0)
17539 #ifdef NETFLIX_COPY_ARGS
17543 if (len <= (tp->t_maxseg - optlen)) {
17545 * Must have ran out of mbufs for the copy
17546 * shorten it to no longer need tso. Lets
17547 * not put on sendalot since we are low on
17552 if (m->m_next == NULL) {
17553 SOCKBUF_UNLOCK(sb);
17560 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
17561 if (rsm && (rsm->r_flags & RACK_TLP)) {
17563 * TLP should not count in retran count, but
17566 counter_u64_add(rack_tlp_retran, 1);
17567 counter_u64_add(rack_tlp_retran_bytes, len);
17569 tp->t_sndrexmitpack++;
17570 KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
17571 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
17574 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
17578 KMOD_TCPSTAT_INC(tcps_sndpack);
17579 KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
17581 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
17586 * If we're sending everything we've got, set PUSH. (This
17587 * will keep happy those implementations which only give
17588 * data to the user when a buffer fills or a PUSH comes in.)
17590 if (sb_offset + len == sbused(sb) &&
17592 !(flags & TH_SYN)) {
17594 add_flag |= RACK_HAD_PUSH;
17597 SOCKBUF_UNLOCK(sb);
17599 SOCKBUF_UNLOCK(sb);
17600 if (tp->t_flags & TF_ACKNOW)
17601 KMOD_TCPSTAT_INC(tcps_sndacks);
17602 else if (flags & (TH_SYN | TH_FIN | TH_RST))
17603 KMOD_TCPSTAT_INC(tcps_sndctrl);
17605 KMOD_TCPSTAT_INC(tcps_sndwinup);
17607 m = m_gethdr(M_NOWAIT, MT_DATA);
17614 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
17616 M_ALIGN(m, hdrlen);
17619 m->m_data += max_linkhdr;
17622 SOCKBUF_UNLOCK_ASSERT(sb);
17623 m->m_pkthdr.rcvif = (struct ifnet *)0;
17625 mac_inpcb_create_mbuf(inp, m);
17627 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
17630 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
17633 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
17634 th = rack->r_ctl.fsb.th;
17635 udp = rack->r_ctl.fsb.udp;
17638 ulen = hdrlen + len - sizeof(struct ip6_hdr);
17640 ulen = hdrlen + len - sizeof(struct ip);
17641 udp->uh_ulen = htons(ulen);
17646 ip6 = mtod(m, struct ip6_hdr *);
17648 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr));
17649 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
17650 udp->uh_dport = tp->t_port;
17651 ulen = hdrlen + len - sizeof(struct ip6_hdr);
17652 udp->uh_ulen = htons(ulen);
17653 th = (struct tcphdr *)(udp + 1);
17655 th = (struct tcphdr *)(ip6 + 1);
17656 tcpip_fillheaders(inp, tp->t_port, ip6, th);
17660 ip = mtod(m, struct ip *);
17662 ipov = (struct ipovly *)ip;
17665 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip));
17666 udp->uh_sport = htons(V_tcp_udp_tunneling_port);
17667 udp->uh_dport = tp->t_port;
17668 ulen = hdrlen + len - sizeof(struct ip);
17669 udp->uh_ulen = htons(ulen);
17670 th = (struct tcphdr *)(udp + 1);
17672 th = (struct tcphdr *)(ip + 1);
17673 tcpip_fillheaders(inp, tp->t_port, ip, th);
17677 * Fill in fields, remembering maximum advertised window for use in
17678 * delaying messages about window sizes. If resending a FIN, be sure
17679 * not to use a new sequence number.
17681 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
17682 tp->snd_nxt == tp->snd_max)
17685 * If we are starting a connection, send ECN setup SYN packet. If we
17686 * are on a retransmit, we may resend those bits a number of times
17689 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
17690 if (tp->t_rxtshift >= 1) {
17691 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
17692 flags |= TH_ECE | TH_CWR;
17694 flags |= TH_ECE | TH_CWR;
17696 /* Handle parallel SYN for ECN */
17697 if ((tp->t_state == TCPS_SYN_RECEIVED) &&
17698 (tp->t_flags2 & TF2_ECN_SND_ECE)) {
17700 tp->t_flags2 &= ~TF2_ECN_SND_ECE;
17702 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
17703 (tp->t_flags2 & TF2_ECN_PERMIT)) {
17705 * If the peer has ECN, mark data packets with ECN capable
17706 * transmission (ECT). Ignore pure ack packets,
17709 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
17710 (sack_rxmit == 0)) {
17713 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
17716 ip->ip_tos |= IPTOS_ECN_ECT0;
17717 KMOD_TCPSTAT_INC(tcps_ecn_ect0);
17719 * Reply with proper ECN notifications.
17720 * Only set CWR on new data segments.
17722 if (tp->t_flags2 & TF2_ECN_SND_CWR) {
17724 tp->t_flags2 &= ~TF2_ECN_SND_CWR;
17727 if (tp->t_flags2 & TF2_ECN_SND_ECE)
17731 * If we are doing retransmissions, then snd_nxt will not reflect
17732 * the first unsent octet. For ACK only packets, we do not want the
17733 * sequence number of the retransmitted packet, we want the sequence
17734 * number of the next unsent octet. So, if there is no data (and no
17735 * SYN or FIN), use snd_max instead of snd_nxt when filling in
17736 * ti_seq. But if we are in persist state, snd_max might reflect
17737 * one byte beyond the right edge of the window, so use snd_nxt in
17738 * that case, since we know we aren't doing a retransmission.
17739 * (retransmit and persist are mutually exclusive...)
17741 if (sack_rxmit == 0) {
17742 if (len || (flags & (TH_SYN | TH_FIN))) {
17743 th->th_seq = htonl(tp->snd_nxt);
17744 rack_seq = tp->snd_nxt;
17746 th->th_seq = htonl(tp->snd_max);
17747 rack_seq = tp->snd_max;
17750 th->th_seq = htonl(rsm->r_start);
17751 rack_seq = rsm->r_start;
17753 th->th_ack = htonl(tp->rcv_nxt);
17754 th->th_flags = flags;
17756 * Calculate receive window. Don't shrink window, but avoid silly
17758 * If a RST segment is sent, advertise a window of zero.
17760 if (flags & TH_RST) {
17763 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
17764 recwin < (long)segsiz) {
17767 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
17768 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
17769 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
17773 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
17774 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is
17775 * handled in syncache.
17777 if (flags & TH_SYN)
17778 th->th_win = htons((u_short)
17779 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
17781 /* Avoid shrinking window with window scaling. */
17782 recwin = roundup2(recwin, 1 << tp->rcv_scale);
17783 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
17786 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
17787 * window. This may cause the remote transmitter to stall. This
17788 * flag tells soreceive() to disable delayed acknowledgements when
17789 * draining the buffer. This can occur if the receiver is
17790 * attempting to read more data than can be buffered prior to
17791 * transmitting on the connection.
17793 if (th->th_win == 0) {
17794 tp->t_sndzerowin++;
17795 tp->t_flags |= TF_RXWIN0SENT;
17797 tp->t_flags &= ~TF_RXWIN0SENT;
17798 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */
17799 /* Now are we using fsb?, if so copy the template data to the mbuf */
17800 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
17803 cpto = mtod(m, uint8_t *);
17804 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
17806 * We have just copied in:
17808 * <optional udphdr>
17809 * tcphdr (no options)
17811 * We need to grab the correct pointers into the mbuf
17812 * for both the tcp header, and possibly the udp header (if tunneling).
17813 * We do this by using the offset in the copy buffer and adding it
17814 * to the mbuf base pointer (cpto).
17818 ip6 = mtod(m, struct ip6_hdr *);
17821 ip = mtod(m, struct ip *);
17822 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
17823 /* If we have a udp header lets set it into the mbuf as well */
17825 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
17827 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
17828 if (to.to_flags & TOF_SIGNATURE) {
17830 * Calculate MD5 signature and put it into the place
17831 * determined before.
17832 * NOTE: since TCP options buffer doesn't point into
17833 * mbuf's data, calculate offset and use it.
17835 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
17836 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
17838 * Do not send segment if the calculation of MD5
17839 * digest has failed.
17846 bcopy(opt, th + 1, optlen);
17847 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
17850 * Put TCP length in extended header, and then checksum extended
17853 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
17857 * ip6_plen is not need to be filled now, and will be filled
17861 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
17862 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
17863 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
17864 th->th_sum = htons(0);
17865 UDPSTAT_INC(udps_opackets);
17867 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
17868 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
17869 th->th_sum = in6_cksum_pseudo(ip6,
17870 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
17875 #if defined(INET6) && defined(INET)
17881 m->m_pkthdr.csum_flags = CSUM_UDP;
17882 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
17883 udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
17884 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
17885 th->th_sum = htons(0);
17886 UDPSTAT_INC(udps_opackets);
17888 m->m_pkthdr.csum_flags = CSUM_TCP;
17889 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
17890 th->th_sum = in_pseudo(ip->ip_src.s_addr,
17891 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
17892 IPPROTO_TCP + len + optlen));
17894 /* IP version must be set here for ipv4/ipv6 checking later */
17895 KASSERT(ip->ip_v == IPVERSION,
17896 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
17900 * Enable TSO and specify the size of the segments. The TCP pseudo
17901 * header checksum is always provided. XXX: Fixme: This is currently
17902 * not the case for IPv6.
17905 KASSERT(len > tp->t_maxseg - optlen,
17906 ("%s: len <= tso_segsz", __func__));
17907 m->m_pkthdr.csum_flags |= CSUM_TSO;
17908 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
17910 KASSERT(len + hdrlen == m_length(m, NULL),
17911 ("%s: mbuf chain different than expected: %d + %u != %u",
17912 __func__, len, hdrlen, m_length(m, NULL)));
17915 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
17916 hhook_run_tcp_est_out(tp, th, &to, len, tso);
17918 /* We're getting ready to send; log now. */
17919 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
17920 union tcp_log_stackspecific log;
17922 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
17923 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
17924 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
17925 if (rack->rack_no_prr)
17926 log.u_bbr.flex1 = 0;
17928 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
17929 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
17930 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
17931 log.u_bbr.flex4 = orig_len;
17933 log.u_bbr.flex5 = 0x80000000;
17935 log.u_bbr.flex5 = 0;
17936 /* Save off the early/late values */
17937 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
17938 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
17939 log.u_bbr.bw_inuse = rack_get_bw(rack);
17940 if (rsm || sack_rxmit) {
17942 log.u_bbr.flex8 = 2;
17944 log.u_bbr.flex8 = 1;
17946 log.u_bbr.flex8 = 0;
17948 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
17949 log.u_bbr.flex7 = mark;
17950 log.u_bbr.flex7 <<= 8;
17951 log.u_bbr.flex7 |= pass;
17952 log.u_bbr.pkts_out = tp->t_maxseg;
17953 log.u_bbr.timeStamp = cts;
17954 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
17955 log.u_bbr.lt_epoch = cwnd_to_use;
17956 log.u_bbr.delivered = sendalot;
17957 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
17958 len, &log, false, NULL, NULL, 0, &tv);
17963 * Fill in IP length and desired time to live and send to IP level.
17964 * There should be a better way to handle ttl and tos; we could keep
17965 * them in the template, but need a way to checksum without them.
17968 * m->m_pkthdr.len should have been set before cksum calcuration,
17969 * because in6_cksum() need it.
17974 * we separately set hoplimit for every segment, since the
17975 * user might want to change the value via setsockopt. Also,
17976 * desired default hop limit might be changed via Neighbor
17979 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
17982 * Set the packet size here for the benefit of DTrace
17983 * probes. ip6_output() will set it properly; it's supposed
17984 * to include the option header lengths as well.
17986 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
17988 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
17989 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
17991 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
17993 if (tp->t_state == TCPS_SYN_SENT)
17994 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
17996 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
17997 /* TODO: IPv6 IP6TOS_ECT bit on */
17998 error = ip6_output(m,
17999 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18000 inp->in6p_outputopts,
18005 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
18008 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
18009 mtu = inp->inp_route6.ro_nh->nh_mtu;
18012 #if defined(INET) && defined(INET6)
18017 ip->ip_len = htons(m->m_pkthdr.len);
18019 if (inp->inp_vflag & INP_IPV6PROTO)
18020 ip->ip_ttl = in6_selecthlim(inp, NULL);
18022 rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
18024 * If we do path MTU discovery, then we set DF on every
18025 * packet. This might not be the best thing to do according
18026 * to RFC3390 Section 2. However the tcp hostcache migitates
18027 * the problem so it affects only the first tcp connection
18030 * NB: Don't set DF on small MTU/MSS to have a safe
18033 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
18034 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
18035 if (tp->t_port == 0 || len < V_tcp_minmss) {
18036 ip->ip_off |= htons(IP_DF);
18039 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
18042 if (tp->t_state == TCPS_SYN_SENT)
18043 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
18045 TCP_PROBE5(send, NULL, tp, ip, tp, th);
18047 error = ip_output(m,
18048 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
18054 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
18056 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
18057 mtu = inp->inp_route.ro_nh->nh_mtu;
18063 lgb->tlb_errno = error;
18067 * In transmit state, time the transmission and arrange for the
18068 * retransmit. In persist state, just set snd_max.
18071 rack->forced_ack = 0; /* If we send something zap the FA flag */
18072 if (rsm && (doing_tlp == 0)) {
18073 /* Set we retransmitted */
18074 rack->rc_gp_saw_rec = 1;
18076 if (cwnd_to_use > tp->snd_ssthresh) {
18077 /* Set we sent in CA */
18078 rack->rc_gp_saw_ca = 1;
18080 /* Set we sent in SS */
18081 rack->rc_gp_saw_ss = 1;
18084 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
18085 (tp->t_flags & TF_SACK_PERMIT) &&
18086 tp->rcv_numsacks > 0)
18087 tcp_clean_dsack_blocks(tp);
18088 tot_len_this_send += len;
18090 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
18091 else if (len == 1) {
18092 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
18093 } else if (len > 1) {
18096 idx = (len / segsiz) + 3;
18097 if (idx >= TCP_MSS_ACCT_ATIMER)
18098 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
18100 counter_u64_add(rack_out_size[idx], 1);
18103 if ((rack->rack_no_prr == 0) &&
18106 if (rack->r_ctl.rc_prr_sndcnt >= len)
18107 rack->r_ctl.rc_prr_sndcnt -= len;
18109 rack->r_ctl.rc_prr_sndcnt = 0;
18112 if (doing_tlp && (rsm == NULL)) {
18113 /* New send doing a TLP */
18114 add_flag |= RACK_TLP;
18115 tp->t_sndtlppack++;
18116 tp->t_sndtlpbyte += len;
18118 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
18119 rack_to_usec_ts(&tv),
18120 rsm, add_flag, s_mb, s_moff);
18123 if ((error == 0) &&
18125 (tp->snd_una == tp->snd_max))
18126 rack->r_ctl.rc_tlp_rxt_last_time = cts;
18128 tcp_seq startseq = tp->snd_nxt;
18130 /* Track our lost count */
18131 if (rsm && (doing_tlp == 0))
18132 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
18134 * Advance snd_nxt over sequence space of this segment.
18137 /* We don't log or do anything with errors */
18139 if (doing_tlp == 0) {
18142 * Not a retransmission of some
18143 * sort, new data is going out so
18144 * clear our TLP count and flag.
18146 rack->rc_tlp_in_progress = 0;
18147 rack->r_ctl.rc_tlp_cnt_out = 0;
18151 * We have just sent a TLP, mark that it is true
18152 * and make sure our in progress is set so we
18153 * continue to check the count.
18155 rack->rc_tlp_in_progress = 1;
18156 rack->r_ctl.rc_tlp_cnt_out++;
18158 if (flags & (TH_SYN | TH_FIN)) {
18159 if (flags & TH_SYN)
18161 if (flags & TH_FIN) {
18163 tp->t_flags |= TF_SENTFIN;
18166 /* In the ENOBUFS case we do *not* update snd_max */
18170 tp->snd_nxt += len;
18171 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
18172 if (tp->snd_una == tp->snd_max) {
18174 * Update the time we just added data since
18175 * none was outstanding.
18177 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
18178 tp->t_acktime = ticks;
18180 tp->snd_max = tp->snd_nxt;
18182 * Time this transmission if not a retransmission and
18183 * not currently timing anything.
18184 * This is only relevant in case of switching back to
18187 if (tp->t_rtttime == 0) {
18188 tp->t_rtttime = ticks;
18189 tp->t_rtseq = startseq;
18190 KMOD_TCPSTAT_INC(tcps_segstimed);
18193 ((tp->t_flags & TF_GPUTINPROG) == 0))
18194 rack_start_gp_measurement(tp, rack, startseq, sb_offset);
18197 * If we are doing FO we need to update the mbuf position and subtract
18198 * this happens when the peer sends us duplicate information and
18199 * we thus want to send a DSACK.
18201 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
18202 * turned off? If not then we are going to echo multiple DSACK blocks
18203 * out (with the TSO), which we should not be doing.
18205 if (rack->r_fast_output && len) {
18206 if (rack->r_ctl.fsb.left_to_send > len)
18207 rack->r_ctl.fsb.left_to_send -= len;
18209 rack->r_ctl.fsb.left_to_send = 0;
18210 if (rack->r_ctl.fsb.left_to_send < segsiz)
18211 rack->r_fast_output = 0;
18212 if (rack->r_fast_output) {
18213 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18214 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18220 rack->r_ctl.rc_agg_delayed = 0;
18223 rack->r_ctl.rc_agg_early = 0;
18224 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */
18226 * Failures do not advance the seq counter above. For the
18227 * case of ENOBUFS we will fall out and retry in 1ms with
18228 * the hpts. Everything else will just have to retransmit
18231 * In any case, we do not want to loop around for another
18232 * send without a good reason.
18237 tp->t_softerror = error;
18238 #ifdef TCP_ACCOUNTING
18239 crtsc = get_cyclecount();
18240 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18241 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18243 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18244 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18245 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18247 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18253 * Pace us right away to retry in a some
18256 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
18257 if (rack->rc_enobuf < 0x7f)
18259 if (slot < (10 * HPTS_USEC_IN_MSEC))
18260 slot = 10 * HPTS_USEC_IN_MSEC;
18261 if (rack->r_ctl.crte != NULL) {
18262 counter_u64_add(rack_saw_enobuf_hw, 1);
18263 tcp_rl_log_enobuf(rack->r_ctl.crte);
18265 counter_u64_add(rack_saw_enobuf, 1);
18269 * For some reason the interface we used initially
18270 * to send segments changed to another or lowered
18271 * its MTU. If TSO was active we either got an
18272 * interface without TSO capabilits or TSO was
18273 * turned off. If we obtained mtu from ip_output()
18274 * then update it and try again.
18277 tp->t_flags &= ~TF_TSO;
18279 tcp_mss_update(tp, -1, mtu, NULL, NULL);
18282 slot = 10 * HPTS_USEC_IN_MSEC;
18283 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18284 #ifdef TCP_ACCOUNTING
18285 crtsc = get_cyclecount();
18286 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18287 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18289 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18290 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18291 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18293 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18298 counter_u64_add(rack_saw_enetunreach, 1);
18302 if (TCPS_HAVERCVDSYN(tp->t_state)) {
18303 tp->t_softerror = error;
18307 slot = 10 * HPTS_USEC_IN_MSEC;
18308 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
18309 #ifdef TCP_ACCOUNTING
18310 crtsc = get_cyclecount();
18311 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18312 tp->tcp_cnt_counters[SND_OUT_FAIL]++;
18314 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1);
18315 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18316 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
18318 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val));
18324 rack->rc_enobuf = 0;
18325 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
18326 rack->r_ctl.retran_during_recovery += len;
18328 KMOD_TCPSTAT_INC(tcps_sndtotal);
18331 * Data sent (as far as we can tell). If this advertises a larger
18332 * window than any other segment, then remember the size of the
18333 * advertised window. Any pending ACK has now been sent.
18335 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
18336 tp->rcv_adv = tp->rcv_nxt + recwin;
18338 tp->last_ack_sent = tp->rcv_nxt;
18339 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
18342 /* Do we need to turn off sendalot? */
18343 if (rack->r_ctl.rc_pace_max_segs &&
18344 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
18345 /* We hit our max. */
18347 } else if ((rack->rc_user_set_max_segs) &&
18348 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
18349 /* We hit the user defined max */
18353 if ((error == 0) && (flags & TH_FIN))
18354 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
18355 if (flags & TH_RST) {
18357 * We don't send again after sending a RST.
18362 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
18363 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
18365 * Get our pacing rate, if an error
18366 * occurred in sending (ENOBUF) we would
18367 * hit the else if with slot preset. Other
18370 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
18373 (rsm->r_flags & RACK_HAS_SYN) == 0 &&
18374 rack->use_rack_rr) {
18375 /* Its a retransmit and we use the rack cheat? */
18377 (rack->rc_always_pace == 0) ||
18378 (rack->r_rr_config == 1)) {
18380 * We have no pacing set or we
18381 * are using old-style rack or
18382 * we are overriden to use the old 1ms pacing.
18384 slot = rack->r_ctl.rc_min_to;
18387 /* We have sent clear the flag */
18388 rack->r_ent_rec_ns = 0;
18389 if (rack->r_must_retran) {
18391 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
18392 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
18394 * We have retransmitted all.
18396 rack->r_must_retran = 0;
18397 rack->r_ctl.rc_out_at_rto = 0;
18399 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
18401 * Sending new data will also kill
18404 rack->r_must_retran = 0;
18405 rack->r_ctl.rc_out_at_rto = 0;
18408 rack->r_ctl.fsb.recwin = recwin;
18409 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
18410 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
18412 * We hit an RTO and now have past snd_max at the RTO
18413 * clear all the WAS flags.
18415 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
18418 /* set the rack tcb into the slot N */
18419 counter_u64_add(rack_paced_segments, 1);
18420 if ((error == 0) &&
18422 ((flags & (TH_SYN|TH_FIN)) == 0) &&
18424 (tp->snd_nxt == tp->snd_max) &&
18426 (tp->rcv_numsacks == 0) &&
18427 rack->r_fsb_inited &&
18428 TCPS_HAVEESTABLISHED(tp->t_state) &&
18429 (rack->r_must_retran == 0) &&
18430 ((tp->t_flags & TF_NEEDFIN) == 0) &&
18431 (len > 0) && (orig_len > 0) &&
18432 (orig_len > len) &&
18433 ((orig_len - len) >= segsiz) &&
18435 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
18436 /* We can send at least one more MSS using our fsb */
18438 rack->r_fast_output = 1;
18439 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18440 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18441 rack->r_ctl.fsb.tcp_flags = flags;
18442 rack->r_ctl.fsb.left_to_send = orig_len - len;
18443 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
18444 ("rack:%p left_to_send:%u sbavail:%u out:%u",
18445 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
18446 (tp->snd_max - tp->snd_una)));
18447 if (rack->r_ctl.fsb.left_to_send < segsiz)
18448 rack->r_fast_output = 0;
18450 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
18451 rack->r_ctl.fsb.rfo_apply_push = 1;
18453 rack->r_ctl.fsb.rfo_apply_push = 0;
18456 rack->r_fast_output = 0;
18457 rack_log_fsb(rack, tp, so, flags,
18458 ipoptlen, orig_len, len, error,
18459 (rsm == NULL), optlen, __LINE__, 2);
18460 } else if (sendalot) {
18464 counter_u64_add(rack_unpaced_segments, 1);
18466 if ((error == 0) &&
18468 ((flags & (TH_SYN|TH_FIN)) == 0) &&
18471 (tp->rcv_numsacks == 0) &&
18472 (tp->snd_nxt == tp->snd_max) &&
18473 (rack->r_must_retran == 0) &&
18474 rack->r_fsb_inited &&
18475 TCPS_HAVEESTABLISHED(tp->t_state) &&
18476 ((tp->t_flags & TF_NEEDFIN) == 0) &&
18477 (len > 0) && (orig_len > 0) &&
18478 (orig_len > len) &&
18479 ((orig_len - len) >= segsiz) &&
18481 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
18482 /* we can use fast_output for more */
18484 rack->r_fast_output = 1;
18485 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
18486 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
18487 rack->r_ctl.fsb.tcp_flags = flags;
18488 rack->r_ctl.fsb.left_to_send = orig_len - len;
18489 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
18490 ("rack:%p left_to_send:%u sbavail:%u out:%u",
18491 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
18492 (tp->snd_max - tp->snd_una)));
18493 if (rack->r_ctl.fsb.left_to_send < segsiz) {
18494 rack->r_fast_output = 0;
18496 if (rack->r_fast_output) {
18497 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
18498 rack->r_ctl.fsb.rfo_apply_push = 1;
18500 rack->r_ctl.fsb.rfo_apply_push = 0;
18501 rack_log_fsb(rack, tp, so, flags,
18502 ipoptlen, orig_len, len, error,
18503 (rsm == NULL), optlen, __LINE__, 3);
18505 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
18515 counter_u64_add(rack_unpaced_segments, 1);
18517 /* Assure when we leave that snd_nxt will point to top */
18518 if (SEQ_GT(tp->snd_max, tp->snd_nxt))
18519 tp->snd_nxt = tp->snd_max;
18520 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
18521 #ifdef TCP_ACCOUNTING
18522 crtsc = get_cyclecount() - ts_val;
18523 if (tot_len_this_send) {
18524 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18525 tp->tcp_cnt_counters[SND_OUT_DATA]++;
18527 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1);
18528 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18529 tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
18531 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc);
18532 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18533 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
18535 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz));
18537 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18538 tp->tcp_cnt_counters[SND_OUT_ACK]++;
18540 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1);
18541 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
18542 tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
18544 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc);
18548 if (error == ENOBUFS)
18554 rack_update_seg(struct tcp_rack *rack)
18558 orig_val = rack->r_ctl.rc_pace_max_segs;
18559 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
18560 if (orig_val != rack->r_ctl.rc_pace_max_segs)
18561 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL);
18565 rack_mtu_change(struct tcpcb *tp)
18568 * The MSS may have changed
18570 struct tcp_rack *rack;
18572 rack = (struct tcp_rack *)tp->t_fb_ptr;
18573 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
18575 * The MTU has changed we need to resend everything
18576 * since all we have sent is lost. We first fix
18577 * up the mtu though.
18579 rack_set_pace_segments(tp, rack, __LINE__, NULL);
18580 /* We treat this like a full retransmit timeout without the cwnd adjustment */
18581 rack_remxt_tmr(tp);
18582 rack->r_fast_output = 0;
18583 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
18584 rack->r_ctl.rc_sacked);
18585 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
18586 rack->r_must_retran = 1;
18589 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
18590 /* We don't use snd_nxt to retransmit */
18591 tp->snd_nxt = tp->snd_max;
18595 rack_set_profile(struct tcp_rack *rack, int prof)
18599 /* pace_always=1 */
18600 if (rack->rc_always_pace == 0) {
18601 if (tcp_can_enable_pacing() == 0)
18604 rack->rc_always_pace = 1;
18605 if (rack->use_fixed_rate || rack->gp_ready)
18606 rack_set_cc_pacing(rack);
18607 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18608 rack->rack_attempt_hdwr_pace = 0;
18610 if (rack_use_cmp_acks)
18611 rack->r_use_cmp_ack = 1;
18612 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
18613 rack->r_use_cmp_ack)
18614 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18616 rack->rack_enable_scwnd = 1;
18618 rack->rc_gp_dyn_mul = 1;
18620 rack->r_ctl.rack_per_of_gp_ca = 100;
18622 rack->r_rr_config = 3;
18624 rack->r_ctl.rc_no_push_at_mrtt = 2;
18626 rack->rc_pace_to_cwnd = 1;
18627 rack->rc_pace_fill_if_rttin_range = 0;
18628 rack->rtt_limit_mul = 0;
18630 rack->rack_no_prr = 1;
18632 rack->r_limit_scw = 1;
18634 rack->r_ctl.rack_per_of_gp_rec = 90;
18637 } else if (prof == 3) {
18638 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */
18639 /* pace_always=1 */
18640 if (rack->rc_always_pace == 0) {
18641 if (tcp_can_enable_pacing() == 0)
18644 rack->rc_always_pace = 1;
18645 if (rack->use_fixed_rate || rack->gp_ready)
18646 rack_set_cc_pacing(rack);
18647 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18648 rack->rack_attempt_hdwr_pace = 0;
18650 if (rack_use_cmp_acks)
18651 rack->r_use_cmp_ack = 1;
18652 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
18653 rack->r_use_cmp_ack)
18654 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18656 rack->rack_enable_scwnd = 1;
18658 rack->rc_gp_dyn_mul = 1;
18660 rack->r_ctl.rack_per_of_gp_ca = 100;
18662 rack->r_rr_config = 3;
18664 rack->r_ctl.rc_no_push_at_mrtt = 2;
18666 rack->rc_pace_to_cwnd = 1;
18667 rack->r_fill_less_agg = 1;
18668 rack->rc_pace_fill_if_rttin_range = 0;
18669 rack->rtt_limit_mul = 0;
18671 rack->rack_no_prr = 1;
18673 rack->r_limit_scw = 1;
18675 rack->r_ctl.rack_per_of_gp_rec = 90;
18679 } else if (prof == 2) {
18681 if (rack->rc_always_pace == 0) {
18682 if (tcp_can_enable_pacing() == 0)
18685 rack->rc_always_pace = 1;
18686 if (rack->use_fixed_rate || rack->gp_ready)
18687 rack_set_cc_pacing(rack);
18688 rack->r_use_cmp_ack = 1;
18689 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
18690 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18691 /* pace_always=1 */
18692 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18694 rack->rack_enable_scwnd = 1;
18696 rack->rc_gp_dyn_mul = 1;
18697 rack->r_ctl.rack_per_of_gp_ca = 100;
18699 rack->r_rr_config = 3;
18701 rack->r_ctl.rc_no_push_at_mrtt = 2;
18703 rack->rc_pace_to_cwnd = 1;
18704 rack->rc_pace_fill_if_rttin_range = 0;
18705 rack->rtt_limit_mul = 0;
18707 rack->rack_no_prr = 1;
18709 rack->r_limit_scw = 0;
18711 } else if (prof == 0) {
18712 /* This changes things back to the default settings */
18714 if (rack->rc_always_pace) {
18715 tcp_decrement_paced_conn();
18716 rack_undo_cc_pacing(rack);
18717 rack->rc_always_pace = 0;
18719 if (rack_pace_every_seg && tcp_can_enable_pacing()) {
18720 rack->rc_always_pace = 1;
18721 if (rack->use_fixed_rate || rack->gp_ready)
18722 rack_set_cc_pacing(rack);
18724 rack->rc_always_pace = 0;
18725 if (rack_use_cmp_acks)
18726 rack->r_use_cmp_ack = 1;
18728 rack->r_use_cmp_ack = 0;
18729 if (rack_disable_prr)
18730 rack->rack_no_prr = 1;
18732 rack->rack_no_prr = 0;
18733 if (rack_gp_no_rec_chg)
18734 rack->rc_gp_no_rec_chg = 1;
18736 rack->rc_gp_no_rec_chg = 0;
18737 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
18738 rack->r_mbuf_queue = 1;
18739 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
18740 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18741 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18743 rack->r_mbuf_queue = 0;
18744 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
18746 if (rack_enable_shared_cwnd)
18747 rack->rack_enable_scwnd = 1;
18749 rack->rack_enable_scwnd = 0;
18750 if (rack_do_dyn_mul) {
18751 /* When dynamic adjustment is on CA needs to start at 100% */
18752 rack->rc_gp_dyn_mul = 1;
18753 if (rack_do_dyn_mul >= 100)
18754 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
18756 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
18757 rack->rc_gp_dyn_mul = 0;
18759 rack->r_rr_config = 0;
18760 rack->r_ctl.rc_no_push_at_mrtt = 0;
18761 rack->rc_pace_to_cwnd = 0;
18762 rack->rc_pace_fill_if_rttin_range = 0;
18763 rack->rtt_limit_mul = 0;
18765 if (rack_enable_hw_pacing)
18766 rack->rack_hdw_pace_ena = 1;
18768 rack->rack_hdw_pace_ena = 0;
18769 if (rack_disable_prr)
18770 rack->rack_no_prr = 1;
18772 rack->rack_no_prr = 0;
18773 if (rack_limits_scwnd)
18774 rack->r_limit_scw = 1;
18776 rack->r_limit_scw = 0;
18783 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
18785 struct deferred_opt_list *dol;
18787 dol = malloc(sizeof(struct deferred_opt_list),
18788 M_TCPFSB, M_NOWAIT|M_ZERO);
18791 * No space yikes -- fail out..
18795 dol->optname = sopt_name;
18796 dol->optval = loptval;
18797 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
18802 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
18803 uint32_t optval, uint64_t loptval)
18805 struct epoch_tracker et;
18806 struct sockopt sopt;
18807 struct cc_newreno_opts opt;
18812 switch (sopt_name) {
18814 case TCP_RACK_PACING_BETA:
18815 RACK_OPTS_INC(tcp_rack_beta);
18816 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
18817 /* This only works for newreno. */
18821 if (rack->rc_pacing_cc_set) {
18823 * Set them into the real CC module
18824 * whats in the rack pcb is the old values
18825 * to be used on restoral/
18827 sopt.sopt_dir = SOPT_SET;
18828 opt.name = CC_NEWRENO_BETA;
18830 if (CC_ALGO(tp)->ctl_output != NULL)
18831 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
18838 * Not pacing yet so set it into our local
18839 * rack pcb storage.
18841 rack->r_ctl.rc_saved_beta.beta = optval;
18844 case TCP_RACK_PACING_BETA_ECN:
18845 RACK_OPTS_INC(tcp_rack_beta_ecn);
18846 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) {
18847 /* This only works for newreno. */
18851 if (rack->rc_pacing_cc_set) {
18853 * Set them into the real CC module
18854 * whats in the rack pcb is the old values
18855 * to be used on restoral/
18857 sopt.sopt_dir = SOPT_SET;
18858 opt.name = CC_NEWRENO_BETA_ECN;
18860 if (CC_ALGO(tp)->ctl_output != NULL)
18861 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt);
18866 * Not pacing yet so set it into our local
18867 * rack pcb storage.
18869 rack->r_ctl.rc_saved_beta.beta_ecn = optval;
18870 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN;
18873 case TCP_DEFER_OPTIONS:
18874 RACK_OPTS_INC(tcp_defer_opt);
18876 if (rack->gp_ready) {
18881 rack->defer_options = 1;
18883 rack->defer_options = 0;
18885 case TCP_RACK_MEASURE_CNT:
18886 RACK_OPTS_INC(tcp_rack_measure_cnt);
18887 if (optval && (optval <= 0xff)) {
18888 rack->r_ctl.req_measurements = optval;
18892 case TCP_REC_ABC_VAL:
18893 RACK_OPTS_INC(tcp_rec_abc_val);
18895 rack->r_use_labc_for_rec = 1;
18897 rack->r_use_labc_for_rec = 0;
18899 case TCP_RACK_ABC_VAL:
18900 RACK_OPTS_INC(tcp_rack_abc_val);
18901 if ((optval > 0) && (optval < 255))
18902 rack->rc_labc = optval;
18906 case TCP_HDWR_UP_ONLY:
18907 RACK_OPTS_INC(tcp_pacing_up_only);
18909 rack->r_up_only = 1;
18911 rack->r_up_only = 0;
18913 case TCP_PACING_RATE_CAP:
18914 RACK_OPTS_INC(tcp_pacing_rate_cap);
18915 rack->r_ctl.bw_rate_cap = loptval;
18917 case TCP_RACK_PROFILE:
18918 RACK_OPTS_INC(tcp_profile);
18919 error = rack_set_profile(rack, optval);
18921 case TCP_USE_CMP_ACKS:
18922 RACK_OPTS_INC(tcp_use_cmp_acks);
18923 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) {
18924 /* You can't turn it off once its on! */
18926 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
18927 rack->r_use_cmp_ack = 1;
18928 rack->r_mbuf_queue = 1;
18929 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18931 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
18932 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
18934 case TCP_SHARED_CWND_TIME_LIMIT:
18935 RACK_OPTS_INC(tcp_lscwnd);
18937 rack->r_limit_scw = 1;
18939 rack->r_limit_scw = 0;
18941 case TCP_RACK_PACE_TO_FILL:
18942 RACK_OPTS_INC(tcp_fillcw);
18944 rack->rc_pace_to_cwnd = 0;
18946 rack->rc_pace_to_cwnd = 1;
18948 rack->r_fill_less_agg = 1;
18950 if ((optval >= rack_gp_rtt_maxmul) &&
18951 rack_gp_rtt_maxmul &&
18953 rack->rc_pace_fill_if_rttin_range = 1;
18954 rack->rtt_limit_mul = optval;
18956 rack->rc_pace_fill_if_rttin_range = 0;
18957 rack->rtt_limit_mul = 0;
18960 case TCP_RACK_NO_PUSH_AT_MAX:
18961 RACK_OPTS_INC(tcp_npush);
18963 rack->r_ctl.rc_no_push_at_mrtt = 0;
18964 else if (optval < 0xff)
18965 rack->r_ctl.rc_no_push_at_mrtt = optval;
18969 case TCP_SHARED_CWND_ENABLE:
18970 RACK_OPTS_INC(tcp_rack_scwnd);
18972 rack->rack_enable_scwnd = 0;
18974 rack->rack_enable_scwnd = 1;
18976 case TCP_RACK_MBUF_QUEUE:
18977 /* Now do we use the LRO mbuf-queue feature */
18978 RACK_OPTS_INC(tcp_rack_mbufq);
18979 if (optval || rack->r_use_cmp_ack)
18980 rack->r_mbuf_queue = 1;
18982 rack->r_mbuf_queue = 0;
18983 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
18984 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
18986 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
18988 case TCP_RACK_NONRXT_CFG_RATE:
18989 RACK_OPTS_INC(tcp_rack_cfg_rate);
18991 rack->rack_rec_nonrxt_use_cr = 0;
18993 rack->rack_rec_nonrxt_use_cr = 1;
18996 RACK_OPTS_INC(tcp_rack_noprr);
18998 rack->rack_no_prr = 0;
18999 else if (optval == 1)
19000 rack->rack_no_prr = 1;
19001 else if (optval == 2)
19002 rack->no_prr_addback = 1;
19006 case TCP_TIMELY_DYN_ADJ:
19007 RACK_OPTS_INC(tcp_timely_dyn);
19009 rack->rc_gp_dyn_mul = 0;
19011 rack->rc_gp_dyn_mul = 1;
19012 if (optval >= 100) {
19014 * If the user sets something 100 or more
19015 * its the gp_ca value.
19017 rack->r_ctl.rack_per_of_gp_ca = optval;
19021 case TCP_RACK_DO_DETECTION:
19022 RACK_OPTS_INC(tcp_rack_do_detection);
19024 rack->do_detection = 0;
19026 rack->do_detection = 1;
19028 case TCP_RACK_TLP_USE:
19029 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
19033 RACK_OPTS_INC(tcp_tlp_use);
19034 rack->rack_tlp_threshold_use = optval;
19036 case TCP_RACK_TLP_REDUCE:
19037 /* RACK TLP cwnd reduction (bool) */
19038 RACK_OPTS_INC(tcp_rack_tlp_reduce);
19039 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
19041 /* Pacing related ones */
19042 case TCP_RACK_PACE_ALWAYS:
19044 * zero is old rack method, 1 is new
19045 * method using a pacing rate.
19047 RACK_OPTS_INC(tcp_rack_pace_always);
19049 if (rack->rc_always_pace) {
19052 } else if (tcp_can_enable_pacing()) {
19053 rack->rc_always_pace = 1;
19054 if (rack->use_fixed_rate || rack->gp_ready)
19055 rack_set_cc_pacing(rack);
19062 if (rack->rc_always_pace) {
19063 tcp_decrement_paced_conn();
19064 rack->rc_always_pace = 0;
19065 rack_undo_cc_pacing(rack);
19068 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
19069 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
19071 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
19072 /* A rate may be set irate or other, if so set seg size */
19073 rack_update_seg(rack);
19075 case TCP_BBR_RACK_INIT_RATE:
19076 RACK_OPTS_INC(tcp_initial_rate);
19078 /* Change from kbits per second to bytes per second */
19081 rack->r_ctl.init_rate = val;
19082 if (rack->rc_init_win != rack_default_init_window) {
19086 * Options don't always get applied
19087 * in the order you think. So in order
19088 * to assure we update a cwnd we need
19089 * to check and see if we are still
19090 * where we should raise the cwnd.
19092 win = rc_init_window(rack);
19093 if (SEQ_GT(tp->snd_max, tp->iss))
19094 snt = tp->snd_max - tp->iss;
19098 (tp->snd_cwnd < win))
19099 tp->snd_cwnd = win;
19101 if (rack->rc_always_pace)
19102 rack_update_seg(rack);
19104 case TCP_BBR_IWINTSO:
19105 RACK_OPTS_INC(tcp_initial_win);
19106 if (optval && (optval <= 0xff)) {
19109 rack->rc_init_win = optval;
19110 win = rc_init_window(rack);
19111 if (SEQ_GT(tp->snd_max, tp->iss))
19112 snt = tp->snd_max - tp->iss;
19117 #ifdef NETFLIX_PEAKRATE
19118 tp->t_maxpeakrate |
19120 rack->r_ctl.init_rate)) {
19122 * We are not past the initial window
19123 * and we have some bases for pacing,
19124 * so we need to possibly adjust up
19125 * the cwnd. Note even if we don't set
19126 * the cwnd, its still ok to raise the rc_init_win
19127 * which can be used coming out of idle when we
19128 * would have a rate.
19130 if (tp->snd_cwnd < win)
19131 tp->snd_cwnd = win;
19133 if (rack->rc_always_pace)
19134 rack_update_seg(rack);
19138 case TCP_RACK_FORCE_MSEG:
19139 RACK_OPTS_INC(tcp_rack_force_max_seg);
19141 rack->rc_force_max_seg = 1;
19143 rack->rc_force_max_seg = 0;
19145 case TCP_RACK_PACE_MAX_SEG:
19146 /* Max segments size in a pace in bytes */
19147 RACK_OPTS_INC(tcp_rack_max_seg);
19148 rack->rc_user_set_max_segs = optval;
19149 rack_set_pace_segments(tp, rack, __LINE__, NULL);
19151 case TCP_RACK_PACE_RATE_REC:
19152 /* Set the fixed pacing rate in Bytes per second ca */
19153 RACK_OPTS_INC(tcp_rack_pace_rate_rec);
19154 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19155 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19156 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19157 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19158 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19159 rack->use_fixed_rate = 1;
19160 if (rack->rc_always_pace)
19161 rack_set_cc_pacing(rack);
19162 rack_log_pacing_delay_calc(rack,
19163 rack->r_ctl.rc_fixed_pacing_rate_ss,
19164 rack->r_ctl.rc_fixed_pacing_rate_ca,
19165 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19169 case TCP_RACK_PACE_RATE_SS:
19170 /* Set the fixed pacing rate in Bytes per second ca */
19171 RACK_OPTS_INC(tcp_rack_pace_rate_ss);
19172 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19173 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
19174 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19175 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19176 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19177 rack->use_fixed_rate = 1;
19178 if (rack->rc_always_pace)
19179 rack_set_cc_pacing(rack);
19180 rack_log_pacing_delay_calc(rack,
19181 rack->r_ctl.rc_fixed_pacing_rate_ss,
19182 rack->r_ctl.rc_fixed_pacing_rate_ca,
19183 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19187 case TCP_RACK_PACE_RATE_CA:
19188 /* Set the fixed pacing rate in Bytes per second ca */
19189 RACK_OPTS_INC(tcp_rack_pace_rate_ca);
19190 rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
19191 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
19192 rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
19193 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
19194 rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
19195 rack->use_fixed_rate = 1;
19196 if (rack->rc_always_pace)
19197 rack_set_cc_pacing(rack);
19198 rack_log_pacing_delay_calc(rack,
19199 rack->r_ctl.rc_fixed_pacing_rate_ss,
19200 rack->r_ctl.rc_fixed_pacing_rate_ca,
19201 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
19204 case TCP_RACK_GP_INCREASE_REC:
19205 RACK_OPTS_INC(tcp_gp_inc_rec);
19206 rack->r_ctl.rack_per_of_gp_rec = optval;
19207 rack_log_pacing_delay_calc(rack,
19208 rack->r_ctl.rack_per_of_gp_ss,
19209 rack->r_ctl.rack_per_of_gp_ca,
19210 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19213 case TCP_RACK_GP_INCREASE_CA:
19214 RACK_OPTS_INC(tcp_gp_inc_ca);
19218 * We don't allow any reduction
19224 rack->r_ctl.rack_per_of_gp_ca = ca;
19225 rack_log_pacing_delay_calc(rack,
19226 rack->r_ctl.rack_per_of_gp_ss,
19227 rack->r_ctl.rack_per_of_gp_ca,
19228 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19231 case TCP_RACK_GP_INCREASE_SS:
19232 RACK_OPTS_INC(tcp_gp_inc_ss);
19236 * We don't allow any reduction
19242 rack->r_ctl.rack_per_of_gp_ss = ss;
19243 rack_log_pacing_delay_calc(rack,
19244 rack->r_ctl.rack_per_of_gp_ss,
19245 rack->r_ctl.rack_per_of_gp_ca,
19246 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
19249 case TCP_RACK_RR_CONF:
19250 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
19251 if (optval && optval <= 3)
19252 rack->r_rr_config = optval;
19254 rack->r_rr_config = 0;
19256 case TCP_HDWR_RATE_CAP:
19257 RACK_OPTS_INC(tcp_hdwr_rate_cap);
19259 if (rack->r_rack_hw_rate_caps == 0)
19260 rack->r_rack_hw_rate_caps = 1;
19264 rack->r_rack_hw_rate_caps = 0;
19267 case TCP_BBR_HDWR_PACE:
19268 RACK_OPTS_INC(tcp_hdwr_pacing);
19270 if (rack->rack_hdrw_pacing == 0) {
19271 rack->rack_hdw_pace_ena = 1;
19272 rack->rack_attempt_hdwr_pace = 0;
19276 rack->rack_hdw_pace_ena = 0;
19278 if (rack->r_ctl.crte != NULL) {
19279 rack->rack_hdrw_pacing = 0;
19280 rack->rack_attempt_hdwr_pace = 0;
19281 tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
19282 rack->r_ctl.crte = NULL;
19287 /* End Pacing related ones */
19288 case TCP_RACK_PRR_SENDALOT:
19289 /* Allow PRR to send more than one seg */
19290 RACK_OPTS_INC(tcp_rack_prr_sendalot);
19291 rack->r_ctl.rc_prr_sendalot = optval;
19293 case TCP_RACK_MIN_TO:
19294 /* Minimum time between rack t-o's in ms */
19295 RACK_OPTS_INC(tcp_rack_min_to);
19296 rack->r_ctl.rc_min_to = optval;
19298 case TCP_RACK_EARLY_SEG:
19299 /* If early recovery max segments */
19300 RACK_OPTS_INC(tcp_rack_early_seg);
19301 rack->r_ctl.rc_early_recovery_segs = optval;
19303 case TCP_RACK_REORD_THRESH:
19304 /* RACK reorder threshold (shift amount) */
19305 RACK_OPTS_INC(tcp_rack_reord_thresh);
19306 if ((optval > 0) && (optval < 31))
19307 rack->r_ctl.rc_reorder_shift = optval;
19311 case TCP_RACK_REORD_FADE:
19312 /* Does reordering fade after ms time */
19313 RACK_OPTS_INC(tcp_rack_reord_fade);
19314 rack->r_ctl.rc_reorder_fade = optval;
19316 case TCP_RACK_TLP_THRESH:
19317 /* RACK TLP theshold i.e. srtt+(srtt/N) */
19318 RACK_OPTS_INC(tcp_rack_tlp_thresh);
19320 rack->r_ctl.rc_tlp_threshold = optval;
19324 case TCP_BBR_USE_RACK_RR:
19325 RACK_OPTS_INC(tcp_rack_rr);
19327 rack->use_rack_rr = 1;
19329 rack->use_rack_rr = 0;
19331 case TCP_FAST_RSM_HACK:
19332 RACK_OPTS_INC(tcp_rack_fastrsm_hack);
19334 rack->fast_rsm_hack = 1;
19336 rack->fast_rsm_hack = 0;
19338 case TCP_RACK_PKT_DELAY:
19339 /* RACK added ms i.e. rack-rtt + reord + N */
19340 RACK_OPTS_INC(tcp_rack_pkt_delay);
19341 rack->r_ctl.rc_pkt_delay = optval;
19344 RACK_OPTS_INC(tcp_rack_delayed_ack);
19346 tp->t_delayed_ack = 0;
19348 tp->t_delayed_ack = 1;
19349 if (tp->t_flags & TF_DELACK) {
19350 tp->t_flags &= ~TF_DELACK;
19351 tp->t_flags |= TF_ACKNOW;
19352 NET_EPOCH_ENTER(et);
19354 NET_EPOCH_EXIT(et);
19358 case TCP_BBR_RACK_RTT_USE:
19359 RACK_OPTS_INC(tcp_rack_rtt_use);
19360 if ((optval != USE_RTT_HIGH) &&
19361 (optval != USE_RTT_LOW) &&
19362 (optval != USE_RTT_AVG))
19365 rack->r_ctl.rc_rate_sample_method = optval;
19367 case TCP_DATA_AFTER_CLOSE:
19368 RACK_OPTS_INC(tcp_data_after_close);
19370 rack->rc_allow_data_af_clo = 1;
19372 rack->rc_allow_data_af_clo = 0;
19377 #ifdef NETFLIX_STATS
19378 tcp_log_socket_option(tp, sopt_name, optval, error);
19385 rack_apply_deferred_options(struct tcp_rack *rack)
19387 struct deferred_opt_list *dol, *sdol;
19390 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
19391 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
19392 /* Disadvantage of deferal is you loose the error return */
19393 s_optval = (uint32_t)dol->optval;
19394 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval);
19395 free(dol, M_TCPDO);
19400 * rack_ctloutput() must drop the inpcb lock before performing copyin on
19401 * socket option arguments. When it re-acquires the lock after the copy, it
19402 * has to revalidate that the connection is still valid for the socket
19406 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
19407 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
19410 int32_t error = 0, optval;
19412 switch (sopt->sopt_name) {
19413 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */
19414 /* Pacing related ones */
19415 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */
19416 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */
19417 case TCP_BBR_IWINTSO: /* URL:tso_iwin */
19418 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */
19419 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */
19420 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */
19421 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/
19422 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */
19423 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */
19424 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */
19425 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */
19426 case TCP_RACK_RR_CONF: /* URL:rrr_conf */
19427 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */
19428 case TCP_HDWR_RATE_CAP: /* URL: hdwrcap boolean */
19429 case TCP_PACING_RATE_CAP: /* URL:cap-- used by side-channel */
19430 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */
19431 /* End pacing related */
19432 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */
19433 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
19434 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */
19435 case TCP_RACK_MIN_TO: /* URL:min_to */
19436 case TCP_RACK_EARLY_SEG: /* URL:early_seg */
19437 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */
19438 case TCP_RACK_REORD_FADE: /* URL:reord_fade */
19439 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */
19440 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */
19441 case TCP_RACK_TLP_USE: /* URL:tlp_use */
19442 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */
19443 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */
19444 case TCP_RACK_DO_DETECTION: /* URL:detect */
19445 case TCP_NO_PRR: /* URL:noprr */
19446 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */
19447 case TCP_DATA_AFTER_CLOSE: /* no URL */
19448 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */
19449 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */
19450 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */
19451 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */
19452 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */
19453 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */
19454 case TCP_RACK_PROFILE: /* URL:profile */
19455 case TCP_USE_CMP_ACKS: /* URL:cmpack */
19456 case TCP_RACK_ABC_VAL: /* URL:labc */
19457 case TCP_REC_ABC_VAL: /* URL:reclabc */
19458 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */
19459 case TCP_DEFER_OPTIONS: /* URL:defer */
19460 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */
19461 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */
19464 /* Filter off all unknown options to the base stack */
19465 return (tcp_default_ctloutput(so, sopt, inp, tp));
19469 if (sopt->sopt_name == TCP_PACING_RATE_CAP) {
19470 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
19472 * We truncate it down to 32 bits for the socket-option trace this
19473 * means rates > 34Gbps won't show right, but thats probably ok.
19475 optval = (uint32_t)loptval;
19477 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
19478 /* Save it in 64 bit form too */
19484 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
19486 return (ECONNRESET);
19488 if (rack->defer_options && (rack->gp_ready == 0) &&
19489 (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
19490 (sopt->sopt_name != TCP_RACK_PACING_BETA) &&
19491 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
19492 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
19493 /* Options are beind deferred */
19494 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
19498 /* No memory to defer, fail */
19503 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval);
19509 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
19512 INP_WLOCK_ASSERT(tp->t_inpcb);
19513 bzero(ti, sizeof(*ti));
19515 ti->tcpi_state = tp->t_state;
19516 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
19517 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
19518 if (tp->t_flags & TF_SACK_PERMIT)
19519 ti->tcpi_options |= TCPI_OPT_SACK;
19520 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
19521 ti->tcpi_options |= TCPI_OPT_WSCALE;
19522 ti->tcpi_snd_wscale = tp->snd_scale;
19523 ti->tcpi_rcv_wscale = tp->rcv_scale;
19525 if (tp->t_flags2 & TF2_ECN_PERMIT)
19526 ti->tcpi_options |= TCPI_OPT_ECN;
19527 if (tp->t_flags & TF_FASTOPEN)
19528 ti->tcpi_options |= TCPI_OPT_TFO;
19529 /* still kept in ticks is t_rcvtime */
19530 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
19531 /* Since we hold everything in precise useconds this is easy */
19532 ti->tcpi_rtt = tp->t_srtt;
19533 ti->tcpi_rttvar = tp->t_rttvar;
19534 ti->tcpi_rto = tp->t_rxtcur;
19535 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
19536 ti->tcpi_snd_cwnd = tp->snd_cwnd;
19538 * FreeBSD-specific extension fields for tcp_info.
19540 ti->tcpi_rcv_space = tp->rcv_wnd;
19541 ti->tcpi_rcv_nxt = tp->rcv_nxt;
19542 ti->tcpi_snd_wnd = tp->snd_wnd;
19543 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */
19544 ti->tcpi_snd_nxt = tp->snd_nxt;
19545 ti->tcpi_snd_mss = tp->t_maxseg;
19546 ti->tcpi_rcv_mss = tp->t_maxseg;
19547 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
19548 ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
19549 ti->tcpi_snd_zerowin = tp->t_sndzerowin;
19550 #ifdef NETFLIX_STATS
19551 ti->tcpi_total_tlp = tp->t_sndtlppack;
19552 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
19553 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
19556 if (tp->t_flags & TF_TOE) {
19557 ti->tcpi_options |= TCPI_OPT_TOE;
19558 tcp_offload_tcp_info(tp, ti);
19564 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
19565 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
19567 int32_t error, optval;
19568 uint64_t val, loptval;
19569 struct tcp_info ti;
19571 * Because all our options are either boolean or an int, we can just
19572 * pull everything into optval and then unlock and copy. If we ever
19573 * add a option that is not a int, then this will have quite an
19574 * impact to this routine.
19577 switch (sopt->sopt_name) {
19579 /* First get the info filled */
19580 rack_fill_info(tp, &ti);
19581 /* Fix up the rtt related fields if needed */
19583 error = sooptcopyout(sopt, &ti, sizeof ti);
19586 * Beta is the congestion control value for NewReno that influences how
19587 * much of a backoff happens when loss is detected. It is normally set
19588 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
19589 * when you exit recovery.
19591 case TCP_RACK_PACING_BETA:
19592 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
19594 else if (rack->rc_pacing_cc_set == 0)
19595 optval = rack->r_ctl.rc_saved_beta.beta;
19598 * Reach out into the CC data and report back what
19599 * I have previously set. Yeah it looks hackish but
19600 * we don't want to report the saved values.
19602 if (tp->ccv->cc_data)
19603 optval = ((struct newreno *)tp->ccv->cc_data)->beta;
19609 * Beta_ecn is the congestion control value for NewReno that influences how
19610 * much of a backoff happens when a ECN mark is detected. It is normally set
19611 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
19612 * you exit recovery. Note that classic ECN has a beta of 50, it is only
19613 * ABE Ecn that uses this "less" value, but we do too with pacing :)
19616 case TCP_RACK_PACING_BETA_ECN:
19617 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0)
19619 else if (rack->rc_pacing_cc_set == 0)
19620 optval = rack->r_ctl.rc_saved_beta.beta_ecn;
19623 * Reach out into the CC data and report back what
19624 * I have previously set. Yeah it looks hackish but
19625 * we don't want to report the saved values.
19627 if (tp->ccv->cc_data)
19628 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn;
19633 case TCP_FAST_RSM_HACK:
19634 optval = rack->fast_rsm_hack;
19636 case TCP_DEFER_OPTIONS:
19637 optval = rack->defer_options;
19639 case TCP_RACK_MEASURE_CNT:
19640 optval = rack->r_ctl.req_measurements;
19642 case TCP_REC_ABC_VAL:
19643 optval = rack->r_use_labc_for_rec;
19645 case TCP_RACK_ABC_VAL:
19646 optval = rack->rc_labc;
19648 case TCP_HDWR_UP_ONLY:
19649 optval= rack->r_up_only;
19651 case TCP_PACING_RATE_CAP:
19652 loptval = rack->r_ctl.bw_rate_cap;
19654 case TCP_RACK_PROFILE:
19655 /* You cannot retrieve a profile, its write only */
19658 case TCP_USE_CMP_ACKS:
19659 optval = rack->r_use_cmp_ack;
19661 case TCP_RACK_PACE_TO_FILL:
19662 optval = rack->rc_pace_to_cwnd;
19663 if (optval && rack->r_fill_less_agg)
19666 case TCP_RACK_NO_PUSH_AT_MAX:
19667 optval = rack->r_ctl.rc_no_push_at_mrtt;
19669 case TCP_SHARED_CWND_ENABLE:
19670 optval = rack->rack_enable_scwnd;
19672 case TCP_RACK_NONRXT_CFG_RATE:
19673 optval = rack->rack_rec_nonrxt_use_cr;
19676 if (rack->rack_no_prr == 1)
19678 else if (rack->no_prr_addback == 1)
19683 case TCP_RACK_DO_DETECTION:
19684 optval = rack->do_detection;
19686 case TCP_RACK_MBUF_QUEUE:
19687 /* Now do we use the LRO mbuf-queue feature */
19688 optval = rack->r_mbuf_queue;
19690 case TCP_TIMELY_DYN_ADJ:
19691 optval = rack->rc_gp_dyn_mul;
19693 case TCP_BBR_IWINTSO:
19694 optval = rack->rc_init_win;
19696 case TCP_RACK_TLP_REDUCE:
19697 /* RACK TLP cwnd reduction (bool) */
19698 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
19700 case TCP_BBR_RACK_INIT_RATE:
19701 val = rack->r_ctl.init_rate;
19702 /* convert to kbits per sec */
19705 optval = (uint32_t)val;
19707 case TCP_RACK_FORCE_MSEG:
19708 optval = rack->rc_force_max_seg;
19710 case TCP_RACK_PACE_MAX_SEG:
19711 /* Max segments in a pace */
19712 optval = rack->rc_user_set_max_segs;
19714 case TCP_RACK_PACE_ALWAYS:
19715 /* Use the always pace method */
19716 optval = rack->rc_always_pace;
19718 case TCP_RACK_PRR_SENDALOT:
19719 /* Allow PRR to send more than one seg */
19720 optval = rack->r_ctl.rc_prr_sendalot;
19722 case TCP_RACK_MIN_TO:
19723 /* Minimum time between rack t-o's in ms */
19724 optval = rack->r_ctl.rc_min_to;
19726 case TCP_RACK_EARLY_SEG:
19727 /* If early recovery max segments */
19728 optval = rack->r_ctl.rc_early_recovery_segs;
19730 case TCP_RACK_REORD_THRESH:
19731 /* RACK reorder threshold (shift amount) */
19732 optval = rack->r_ctl.rc_reorder_shift;
19734 case TCP_RACK_REORD_FADE:
19735 /* Does reordering fade after ms time */
19736 optval = rack->r_ctl.rc_reorder_fade;
19738 case TCP_BBR_USE_RACK_RR:
19739 /* Do we use the rack cheat for rxt */
19740 optval = rack->use_rack_rr;
19742 case TCP_RACK_RR_CONF:
19743 optval = rack->r_rr_config;
19745 case TCP_HDWR_RATE_CAP:
19746 optval = rack->r_rack_hw_rate_caps;
19748 case TCP_BBR_HDWR_PACE:
19749 optval = rack->rack_hdw_pace_ena;
19751 case TCP_RACK_TLP_THRESH:
19752 /* RACK TLP theshold i.e. srtt+(srtt/N) */
19753 optval = rack->r_ctl.rc_tlp_threshold;
19755 case TCP_RACK_PKT_DELAY:
19756 /* RACK added ms i.e. rack-rtt + reord + N */
19757 optval = rack->r_ctl.rc_pkt_delay;
19759 case TCP_RACK_TLP_USE:
19760 optval = rack->rack_tlp_threshold_use;
19762 case TCP_RACK_PACE_RATE_CA:
19763 optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
19765 case TCP_RACK_PACE_RATE_SS:
19766 optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
19768 case TCP_RACK_PACE_RATE_REC:
19769 optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
19771 case TCP_RACK_GP_INCREASE_SS:
19772 optval = rack->r_ctl.rack_per_of_gp_ca;
19774 case TCP_RACK_GP_INCREASE_CA:
19775 optval = rack->r_ctl.rack_per_of_gp_ss;
19777 case TCP_BBR_RACK_RTT_USE:
19778 optval = rack->r_ctl.rc_rate_sample_method;
19781 optval = tp->t_delayed_ack;
19783 case TCP_DATA_AFTER_CLOSE:
19784 optval = rack->rc_allow_data_af_clo;
19786 case TCP_SHARED_CWND_TIME_LIMIT:
19787 optval = rack->r_limit_scw;
19790 return (tcp_default_ctloutput(so, sopt, inp, tp));
19795 if (TCP_PACING_RATE_CAP)
19796 error = sooptcopyout(sopt, &loptval, sizeof loptval);
19798 error = sooptcopyout(sopt, &optval, sizeof optval);
19804 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
19806 int32_t error = EINVAL;
19807 struct tcp_rack *rack;
19809 rack = (struct tcp_rack *)tp->t_fb_ptr;
19810 if (rack == NULL) {
19814 if (sopt->sopt_dir == SOPT_SET) {
19815 return (rack_set_sockopt(so, sopt, inp, tp, rack));
19816 } else if (sopt->sopt_dir == SOPT_GET) {
19817 return (rack_get_sockopt(so, sopt, inp, tp, rack));
19825 rack_pru_options(struct tcpcb *tp, int flags)
19827 if (flags & PRUS_OOB)
19828 return (EOPNOTSUPP);
19832 static struct tcp_function_block __tcp_rack = {
19833 .tfb_tcp_block_name = __XSTRING(STACKNAME),
19834 .tfb_tcp_output = rack_output,
19835 .tfb_do_queued_segments = ctf_do_queued_segments,
19836 .tfb_do_segment_nounlock = rack_do_segment_nounlock,
19837 .tfb_tcp_do_segment = rack_do_segment,
19838 .tfb_tcp_ctloutput = rack_ctloutput,
19839 .tfb_tcp_fb_init = rack_init,
19840 .tfb_tcp_fb_fini = rack_fini,
19841 .tfb_tcp_timer_stop_all = rack_stopall,
19842 .tfb_tcp_timer_activate = rack_timer_activate,
19843 .tfb_tcp_timer_active = rack_timer_active,
19844 .tfb_tcp_timer_stop = rack_timer_stop,
19845 .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
19846 .tfb_tcp_handoff_ok = rack_handoff_ok,
19847 .tfb_tcp_mtu_chg = rack_mtu_change,
19848 .tfb_pru_options = rack_pru_options,
19852 static const char *rack_stack_names[] = {
19853 __XSTRING(STACKNAME),
19855 __XSTRING(STACKALIAS),
19860 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
19862 memset(mem, 0, size);
19867 rack_dtor(void *mem, int32_t size, void *arg)
19872 static bool rack_mod_inited = false;
19875 tcp_addrack(module_t mod, int32_t type, void *data)
19882 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
19883 sizeof(struct rack_sendmap),
19884 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
19886 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
19887 sizeof(struct tcp_rack),
19888 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
19890 sysctl_ctx_init(&rack_sysctl_ctx);
19891 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
19892 SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
19895 __XSTRING(STACKALIAS),
19897 __XSTRING(STACKNAME),
19899 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
19901 if (rack_sysctl_root == NULL) {
19902 printf("Failed to add sysctl node\n");
19906 rack_init_sysctls();
19907 num_stacks = nitems(rack_stack_names);
19908 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
19909 rack_stack_names, &num_stacks);
19911 printf("Failed to register %s stack name for "
19912 "%s module\n", rack_stack_names[num_stacks],
19913 __XSTRING(MODNAME));
19914 sysctl_ctx_free(&rack_sysctl_ctx);
19916 uma_zdestroy(rack_zone);
19917 uma_zdestroy(rack_pcb_zone);
19918 rack_counter_destroy();
19919 printf("Failed to register rack module -- err:%d\n", err);
19922 tcp_lro_reg_mbufq();
19923 rack_mod_inited = true;
19926 err = deregister_tcp_functions(&__tcp_rack, true, false);
19929 err = deregister_tcp_functions(&__tcp_rack, false, true);
19932 if (rack_mod_inited) {
19933 uma_zdestroy(rack_zone);
19934 uma_zdestroy(rack_pcb_zone);
19935 sysctl_ctx_free(&rack_sysctl_ctx);
19936 rack_counter_destroy();
19937 rack_mod_inited = false;
19939 tcp_lro_dereg_mbufq();
19943 return (EOPNOTSUPP);
19948 static moduledata_t tcp_rack = {
19949 .name = __XSTRING(MODNAME),
19950 .evhand = tcp_addrack,
19954 MODULE_VERSION(MODNAME, 1);
19955 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
19956 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);