]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_stacks/rack.c
This fixes several bugs that Larry Rosenman helped me find in
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_stacks / rack.c
1 /*-
2  * Copyright (c) 2016-2018
3  *      Netflix Inc.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_ipsec.h"
34 #include "opt_tcpdebug.h"
35
36 #include <sys/param.h>
37 #include <sys/module.h>
38 #include <sys/kernel.h>
39 #ifdef TCP_HHOOK
40 #include <sys/hhook.h>
41 #endif
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/mbuf.h>
47 #include <sys/proc.h>           /* for proc0 declaration */
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #ifdef NETFLIX_STATS
53 #include <sys/stats.h>
54 #endif
55 #include <sys/refcount.h>
56 #include <sys/queue.h>
57 #include <sys/smp.h>
58 #include <sys/kthread.h>
59 #include <sys/kern_prefetch.h>
60
61 #include <vm/uma.h>
62
63 #include <net/route.h>
64 #include <net/vnet.h>
65
66 #define TCPSTATES               /* for logging */
67
68 #include <netinet/in.h>
69 #include <netinet/in_kdtrace.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip_icmp.h>    /* required for icmp_var.h */
73 #include <netinet/icmp_var.h>   /* for ICMP_BANDLIM */
74 #include <netinet/ip_var.h>
75 #include <netinet/ip6.h>
76 #include <netinet6/in6_pcb.h>
77 #include <netinet6/ip6_var.h>
78 #define TCPOUTFLAGS
79 #include <netinet/tcp.h>
80 #include <netinet/tcp_fsm.h>
81 #include <netinet/tcp_log_buf.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/tcp_hpts.h>
86 #include <netinet/tcpip.h>
87 #include <netinet/cc/cc.h>
88 #ifdef NETFLIX_CWV
89 #include <netinet/tcp_newcwv.h>
90 #endif
91 #include <netinet/tcp_fastopen.h>
92 #ifdef TCPDEBUG
93 #include <netinet/tcp_debug.h>
94 #endif                          /* TCPDEBUG */
95 #ifdef TCP_OFFLOAD
96 #include <netinet/tcp_offload.h>
97 #endif
98 #ifdef INET6
99 #include <netinet6/tcp6_var.h>
100 #endif
101
102 #include <netipsec/ipsec_support.h>
103
104 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
105 #include <netipsec/ipsec.h>
106 #include <netipsec/ipsec6.h>
107 #endif                          /* IPSEC */
108
109 #include <netinet/udp.h>
110 #include <netinet/udp_var.h>
111 #include <machine/in_cksum.h>
112
113 #ifdef MAC
114 #include <security/mac/mac_framework.h>
115 #endif
116 #include "sack_filter.h"
117 #include "tcp_rack.h"
118 #include "rack_bbr_common.h"
119
120 uma_zone_t rack_zone;
121 uma_zone_t rack_pcb_zone;
122
123 #ifndef TICKS2SBT
124 #define TICKS2SBT(__t)  (tick_sbt * ((sbintime_t)(__t)))
125 #endif
126
127 struct sysctl_ctx_list rack_sysctl_ctx;
128 struct sysctl_oid *rack_sysctl_root;
129
130 #define CUM_ACKED 1
131 #define SACKED 2
132
133 /*
134  * The RACK module incorporates a number of
135  * TCP ideas that have been put out into the IETF
136  * over the last few years:
137  * - Matt Mathis's Rate Halving which slowly drops
138  *    the congestion window so that the ack clock can
139  *    be maintained during a recovery.
140  * - Yuchung Cheng's RACK TCP (for which its named) that
141  *    will stop us using the number of dup acks and instead
142  *    use time as the gage of when we retransmit.
143  * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
144  *    of Dukkipati et.al.
145  * RACK depends on SACK, so if an endpoint arrives that
146  * cannot do SACK the state machine below will shuttle the
147  * connection back to using the "default" TCP stack that is
148  * in FreeBSD.
149  *
150  * To implement RACK the original TCP stack was first decomposed
151  * into a functional state machine with individual states
152  * for each of the possible TCP connection states. The do_segement
153  * functions role in life is to mandate the connection supports SACK
154  * initially and then assure that the RACK state matches the conenction
155  * state before calling the states do_segment function. Each
156  * state is simplified due to the fact that the original do_segment
157  * has been decomposed and we *know* what state we are in (no
158  * switches on the state) and all tests for SACK are gone. This
159  * greatly simplifies what each state does.
160  *
161  * TCP output is also over-written with a new version since it
162  * must maintain the new rack scoreboard.
163  *
164  */
165 static int32_t rack_precache = 1;
166 static int32_t rack_tlp_thresh = 1;
167 static int32_t rack_reorder_thresh = 2;
168 static int32_t rack_reorder_fade = 60000;       /* 0 - never fade, def 60,000
169                                                  * - 60 seconds */
170 static int32_t rack_pkt_delay = 1;
171 static int32_t rack_inc_var = 0;/* For TLP */
172 static int32_t rack_reduce_largest_on_idle = 0;
173 static int32_t rack_min_pace_time = 0;
174 static int32_t rack_min_pace_time_seg_req=6;
175 static int32_t rack_early_recovery = 1;
176 static int32_t rack_early_recovery_max_seg = 6;
177 static int32_t rack_send_a_lot_in_prr = 1;
178 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */
179 static int32_t rack_tlp_in_recovery = 1;        /* Can we do TLP in recovery? */
180 static int32_t rack_verbose_logging = 0;
181 static int32_t rack_ignore_data_after_close = 1;
182 /*
183  * Currently regular tcp has a rto_min of 30ms
184  * the backoff goes 12 times so that ends up
185  * being a total of 122.850 seconds before a
186  * connection is killed.
187  */
188 static int32_t rack_tlp_min = 10;
189 static int32_t rack_rto_min = 30;       /* 30ms same as main freebsd */
190 static int32_t rack_rto_max = 30000;    /* 30 seconds */
191 static const int32_t rack_free_cache = 2;
192 static int32_t rack_hptsi_segments = 40;
193 static int32_t rack_rate_sample_method = USE_RTT_LOW;
194 static int32_t rack_pace_every_seg = 1;
195 static int32_t rack_delayed_ack_time = 200;     /* 200ms */
196 static int32_t rack_slot_reduction = 4;
197 static int32_t rack_lower_cwnd_at_tlp = 0;
198 static int32_t rack_use_proportional_reduce = 0;
199 static int32_t rack_proportional_rate = 10;
200 static int32_t rack_tlp_max_resend = 2;
201 static int32_t rack_limited_retran = 0;
202 static int32_t rack_always_send_oldest = 0;
203 static int32_t rack_sack_block_limit = 128;
204 static int32_t rack_use_sack_filter = 1;
205 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
206
207 /* Rack specific counters */
208 counter_u64_t rack_badfr;
209 counter_u64_t rack_badfr_bytes;
210 counter_u64_t rack_rtm_prr_retran;
211 counter_u64_t rack_rtm_prr_newdata;
212 counter_u64_t rack_timestamp_mismatch;
213 counter_u64_t rack_reorder_seen;
214 counter_u64_t rack_paced_segments;
215 counter_u64_t rack_unpaced_segments;
216 counter_u64_t rack_saw_enobuf;
217 counter_u64_t rack_saw_enetunreach;
218
219 /* Tail loss probe counters */
220 counter_u64_t rack_tlp_tot;
221 counter_u64_t rack_tlp_newdata;
222 counter_u64_t rack_tlp_retran;
223 counter_u64_t rack_tlp_retran_bytes;
224 counter_u64_t rack_tlp_retran_fail;
225 counter_u64_t rack_to_tot;
226 counter_u64_t rack_to_arm_rack;
227 counter_u64_t rack_to_arm_tlp;
228 counter_u64_t rack_to_alloc;
229 counter_u64_t rack_to_alloc_hard;
230 counter_u64_t rack_to_alloc_emerg;
231
232 counter_u64_t rack_sack_proc_all;
233 counter_u64_t rack_sack_proc_short;
234 counter_u64_t rack_sack_proc_restart;
235 counter_u64_t rack_runt_sacks;
236 counter_u64_t rack_used_tlpmethod;
237 counter_u64_t rack_used_tlpmethod2;
238 counter_u64_t rack_enter_tlp_calc;
239 counter_u64_t rack_input_idle_reduces;
240 counter_u64_t rack_tlp_does_nada;
241
242 /* Temp CPU counters */
243 counter_u64_t rack_find_high;
244
245 counter_u64_t rack_progress_drops;
246 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
247 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
248
249 static void
250 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line);
251
252 static int
253 rack_process_ack(struct mbuf *m, struct tcphdr *th,
254     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t * ti_locked,
255     uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
256 static int
257 rack_process_data(struct mbuf *m, struct tcphdr *th,
258     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
259     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
260 static void
261 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
262     struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery);
263 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
264 static struct rack_sendmap *
265 rack_check_recovery_mode(struct tcpcb *tp,
266     uint32_t tsused);
267 static void
268 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th,
269     uint32_t type);
270 static void rack_counter_destroy(void);
271 static int
272 rack_ctloutput(struct socket *so, struct sockopt *sopt,
273     struct inpcb *inp, struct tcpcb *tp);
274 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
275 static void
276 rack_do_segment(struct mbuf *m, struct tcphdr *th,
277     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
278     uint8_t iptos, int32_t ti_locked);
279 static void rack_dtor(void *mem, int32_t size, void *arg);
280 static void
281 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
282     uint32_t t, uint32_t cts);
283 static struct rack_sendmap *
284 rack_find_high_nonack(struct tcp_rack *rack,
285     struct rack_sendmap *rsm);
286 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
287 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
288 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
289 static int
290 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
291     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
292 static int32_t rack_handoff_ok(struct tcpcb *tp);
293 static int32_t rack_init(struct tcpcb *tp);
294 static void rack_init_sysctls(void);
295 static void
296 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
297     struct tcphdr *th);
298 static void
299 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
300     uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
301     uint8_t pass, struct rack_sendmap *hintrsm);
302 static void
303 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
304     struct rack_sendmap *rsm);
305 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num);
306 static int32_t rack_output(struct tcpcb *tp);
307 static void
308 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th,
309     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
310     uint8_t iptos, int32_t ti_locked, int32_t nxt_pkt, struct timeval *tv);
311
312 static uint32_t
313 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
314     struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
315     uint32_t cts);
316 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th);
317 static void rack_remxt_tmr(struct tcpcb *tp);
318 static int
319 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
320     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack);
321 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
322 static int32_t rack_stopall(struct tcpcb *tp);
323 static void
324 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type,
325     uint32_t delta);
326 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type);
327 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
328 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type);
329 static uint32_t
330 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
331     struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp);
332 static void
333 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
334     struct rack_sendmap *rsm, uint32_t ts);
335 static int
336 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
337     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type);
338 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
339 static void
340 rack_challenge_ack(struct mbuf *m, struct tcphdr *th,
341     struct tcpcb *tp, int32_t * ti_locked, int32_t * ret_val);
342 static int
343 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
344     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
345     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
346 static int
347 rack_do_closing(struct mbuf *m, struct tcphdr *th,
348     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
349     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
350 static void rack_do_drop(struct mbuf *m, struct tcpcb *tp, int32_t * ti_locked);
351 static void
352 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp,
353     struct tcphdr *th, int32_t * ti_locked, int32_t thflags, int32_t tlen, int32_t * ret_val);
354 static void
355 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp,
356     struct tcphdr *th, int32_t * ti_locked, int32_t rstreason, int32_t tlen);
357 static int
358 rack_do_established(struct mbuf *m, struct tcphdr *th,
359     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
360     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
361 static int
362 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
363     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
364     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t nxt_pkt);
365 static int
366 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
367     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
368     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
369 static int
370 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
371     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
372     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
373 static int
374 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
375     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
376     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
377 static int
378 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
379     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
380     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
381 static int
382 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
383     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
384     int32_t tlen, int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
385 static int
386 rack_drop_checks(struct tcpopt *to, struct mbuf *m,
387     struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * ti_locked, int32_t * thf,
388     int32_t * drop_hdrlen, int32_t * ret_val);
389 static int
390 rack_process_rst(struct mbuf *m, struct tcphdr *th,
391     struct socket *so, struct tcpcb *tp, int32_t * ti_locked);
392 struct rack_sendmap *
393 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
394     uint32_t tsused);
395 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt);
396 static void
397      tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th);
398
399 static int
400 rack_ts_check(struct mbuf *m, struct tcphdr *th,
401     struct tcpcb *tp, int32_t * ti_locked, int32_t tlen, int32_t thflags, int32_t * ret_val);
402
403 int32_t rack_clear_counter=0;
404
405
406 static int
407 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
408 {
409         uint32_t stat;
410         int32_t error;
411
412         error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
413         if (error || req->newptr == NULL)
414                 return error;
415
416         error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
417         if (error)
418                 return (error);
419         if (stat == 1) {
420 #ifdef INVARIANTS
421                 printf("Clearing RACK counters\n");
422 #endif
423                 counter_u64_zero(rack_badfr);
424                 counter_u64_zero(rack_badfr_bytes);
425                 counter_u64_zero(rack_rtm_prr_retran);
426                 counter_u64_zero(rack_rtm_prr_newdata);
427                 counter_u64_zero(rack_timestamp_mismatch);
428                 counter_u64_zero(rack_reorder_seen);
429                 counter_u64_zero(rack_tlp_tot);
430                 counter_u64_zero(rack_tlp_newdata);
431                 counter_u64_zero(rack_tlp_retran);
432                 counter_u64_zero(rack_tlp_retran_bytes);
433                 counter_u64_zero(rack_tlp_retran_fail);
434                 counter_u64_zero(rack_to_tot);
435                 counter_u64_zero(rack_to_arm_rack);
436                 counter_u64_zero(rack_to_arm_tlp);
437                 counter_u64_zero(rack_paced_segments);
438                 counter_u64_zero(rack_unpaced_segments);
439                 counter_u64_zero(rack_saw_enobuf);
440                 counter_u64_zero(rack_saw_enetunreach);
441                 counter_u64_zero(rack_to_alloc_hard);
442                 counter_u64_zero(rack_to_alloc_emerg);
443                 counter_u64_zero(rack_sack_proc_all);
444                 counter_u64_zero(rack_sack_proc_short);
445                 counter_u64_zero(rack_sack_proc_restart);
446                 counter_u64_zero(rack_to_alloc);
447                 counter_u64_zero(rack_find_high);
448                 counter_u64_zero(rack_runt_sacks);
449                 counter_u64_zero(rack_used_tlpmethod);
450                 counter_u64_zero(rack_used_tlpmethod2);
451                 counter_u64_zero(rack_enter_tlp_calc);
452                 counter_u64_zero(rack_progress_drops);
453                 counter_u64_zero(rack_tlp_does_nada);
454         }
455         rack_clear_counter = 0;
456         return (0);
457 }
458
459
460
461 static void
462 rack_init_sysctls()
463 {
464         SYSCTL_ADD_S32(&rack_sysctl_ctx,
465             SYSCTL_CHILDREN(rack_sysctl_root),
466             OID_AUTO, "rate_sample_method", CTLFLAG_RW,
467             &rack_rate_sample_method , USE_RTT_LOW,
468             "What method should we use for rate sampling 0=high, 1=low ");
469         SYSCTL_ADD_S32(&rack_sysctl_ctx,
470             SYSCTL_CHILDREN(rack_sysctl_root),
471             OID_AUTO, "data_after_close", CTLFLAG_RW,
472             &rack_ignore_data_after_close, 0,
473             "Do we hold off sending a RST until all pending data is ack'd");
474         SYSCTL_ADD_S32(&rack_sysctl_ctx,
475             SYSCTL_CHILDREN(rack_sysctl_root),
476             OID_AUTO, "tlpmethod", CTLFLAG_RW,
477             &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
478             "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
479         SYSCTL_ADD_S32(&rack_sysctl_ctx,
480             SYSCTL_CHILDREN(rack_sysctl_root),
481             OID_AUTO, "min_pace_time", CTLFLAG_RW,
482             &rack_min_pace_time, 0,
483             "Should we enforce a minimum pace time of 1ms");
484         SYSCTL_ADD_S32(&rack_sysctl_ctx,
485             SYSCTL_CHILDREN(rack_sysctl_root),
486             OID_AUTO, "min_pace_segs", CTLFLAG_RW,
487             &rack_min_pace_time_seg_req, 6,
488             "How many segments have to be in the len to enforce min-pace-time");
489         SYSCTL_ADD_S32(&rack_sysctl_ctx,
490             SYSCTL_CHILDREN(rack_sysctl_root),
491             OID_AUTO, "idle_reduce_high", CTLFLAG_RW,
492             &rack_reduce_largest_on_idle, 0,
493             "Should we reduce the largest cwnd seen to IW on idle reduction");
494         SYSCTL_ADD_S32(&rack_sysctl_ctx,
495             SYSCTL_CHILDREN(rack_sysctl_root),
496             OID_AUTO, "bb_verbose", CTLFLAG_RW,
497             &rack_verbose_logging, 0,
498             "Should RACK black box logging be verbose");
499         SYSCTL_ADD_S32(&rack_sysctl_ctx,
500             SYSCTL_CHILDREN(rack_sysctl_root),
501             OID_AUTO, "sackfiltering", CTLFLAG_RW,
502             &rack_use_sack_filter, 1,
503             "Do we use sack filtering?");
504         SYSCTL_ADD_S32(&rack_sysctl_ctx,
505             SYSCTL_CHILDREN(rack_sysctl_root),
506             OID_AUTO, "delayed_ack", CTLFLAG_RW,
507             &rack_delayed_ack_time, 200,
508             "Delayed ack time (200ms)");
509         SYSCTL_ADD_S32(&rack_sysctl_ctx,
510             SYSCTL_CHILDREN(rack_sysctl_root),
511             OID_AUTO, "tlpminto", CTLFLAG_RW,
512             &rack_tlp_min, 10,
513             "TLP minimum timeout per the specification (10ms)");
514         SYSCTL_ADD_S32(&rack_sysctl_ctx,
515             SYSCTL_CHILDREN(rack_sysctl_root),
516             OID_AUTO, "precache", CTLFLAG_RW,
517             &rack_precache, 0,
518             "Where should we precache the mcopy (0 is not at all)");
519         SYSCTL_ADD_S32(&rack_sysctl_ctx,
520             SYSCTL_CHILDREN(rack_sysctl_root),
521             OID_AUTO, "sblklimit", CTLFLAG_RW,
522             &rack_sack_block_limit, 128,
523             "When do we start paying attention to small sack blocks");
524         SYSCTL_ADD_S32(&rack_sysctl_ctx,
525             SYSCTL_CHILDREN(rack_sysctl_root),
526             OID_AUTO, "send_oldest", CTLFLAG_RW,
527             &rack_always_send_oldest, 1,
528             "Should we always send the oldest TLP and RACK-TLP");
529         SYSCTL_ADD_S32(&rack_sysctl_ctx,
530             SYSCTL_CHILDREN(rack_sysctl_root),
531             OID_AUTO, "rack_tlp_in_recovery", CTLFLAG_RW,
532             &rack_tlp_in_recovery, 1,
533             "Can we do a TLP during recovery?");
534         SYSCTL_ADD_S32(&rack_sysctl_ctx,
535             SYSCTL_CHILDREN(rack_sysctl_root),
536             OID_AUTO, "rack_tlimit", CTLFLAG_RW,
537             &rack_limited_retran, 0,
538             "How many times can a rack timeout drive out sends");
539         SYSCTL_ADD_S32(&rack_sysctl_ctx,
540             SYSCTL_CHILDREN(rack_sysctl_root),
541             OID_AUTO, "minrto", CTLFLAG_RW,
542             &rack_rto_min, 0,
543             "Minimum RTO in ms -- set with caution below 1000 due to TLP");
544         SYSCTL_ADD_S32(&rack_sysctl_ctx,
545             SYSCTL_CHILDREN(rack_sysctl_root),
546             OID_AUTO, "maxrto", CTLFLAG_RW,
547             &rack_rto_max, 0,
548             "Maxiumum RTO in ms -- should be at least as large as min_rto");
549         SYSCTL_ADD_S32(&rack_sysctl_ctx,
550             SYSCTL_CHILDREN(rack_sysctl_root),
551             OID_AUTO, "tlp_retry", CTLFLAG_RW,
552             &rack_tlp_max_resend, 2,
553             "How many times does TLP retry a single segment or multiple with no ACK");
554         SYSCTL_ADD_S32(&rack_sysctl_ctx,
555             SYSCTL_CHILDREN(rack_sysctl_root),
556             OID_AUTO, "recovery_loss_prop", CTLFLAG_RW,
557             &rack_use_proportional_reduce, 0,
558             "Should we proportionaly reduce cwnd based on the number of losses ");
559         SYSCTL_ADD_S32(&rack_sysctl_ctx,
560             SYSCTL_CHILDREN(rack_sysctl_root),
561             OID_AUTO, "recovery_prop", CTLFLAG_RW,
562             &rack_proportional_rate, 10,
563             "What percent reduction per loss");
564         SYSCTL_ADD_S32(&rack_sysctl_ctx,
565             SYSCTL_CHILDREN(rack_sysctl_root),
566             OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
567             &rack_lower_cwnd_at_tlp, 0,
568             "When a TLP completes a retran should we enter recovery?");
569         SYSCTL_ADD_S32(&rack_sysctl_ctx,
570             SYSCTL_CHILDREN(rack_sysctl_root),
571             OID_AUTO, "hptsi_reduces", CTLFLAG_RW,
572             &rack_slot_reduction, 4,
573             "When setting a slot should we reduce by divisor");
574         SYSCTL_ADD_S32(&rack_sysctl_ctx,
575             SYSCTL_CHILDREN(rack_sysctl_root),
576             OID_AUTO, "hptsi_every_seg", CTLFLAG_RW,
577             &rack_pace_every_seg, 1,
578             "Should we pace out every segment hptsi");
579         SYSCTL_ADD_S32(&rack_sysctl_ctx,
580             SYSCTL_CHILDREN(rack_sysctl_root),
581             OID_AUTO, "hptsi_seg_max", CTLFLAG_RW,
582             &rack_hptsi_segments, 6,
583             "Should we pace out only a limited size of segments");
584         SYSCTL_ADD_S32(&rack_sysctl_ctx,
585             SYSCTL_CHILDREN(rack_sysctl_root),
586             OID_AUTO, "prr_sendalot", CTLFLAG_RW,
587             &rack_send_a_lot_in_prr, 1,
588             "Send a lot in prr");
589         SYSCTL_ADD_S32(&rack_sysctl_ctx,
590             SYSCTL_CHILDREN(rack_sysctl_root),
591             OID_AUTO, "minto", CTLFLAG_RW,
592             &rack_min_to, 1,
593             "Minimum rack timeout in milliseconds");
594         SYSCTL_ADD_S32(&rack_sysctl_ctx,
595             SYSCTL_CHILDREN(rack_sysctl_root),
596             OID_AUTO, "earlyrecoveryseg", CTLFLAG_RW,
597             &rack_early_recovery_max_seg, 6,
598             "Max segments in early recovery");
599         SYSCTL_ADD_S32(&rack_sysctl_ctx,
600             SYSCTL_CHILDREN(rack_sysctl_root),
601             OID_AUTO, "earlyrecovery", CTLFLAG_RW,
602             &rack_early_recovery, 1,
603             "Do we do early recovery with rack");
604         SYSCTL_ADD_S32(&rack_sysctl_ctx,
605             SYSCTL_CHILDREN(rack_sysctl_root),
606             OID_AUTO, "reorder_thresh", CTLFLAG_RW,
607             &rack_reorder_thresh, 2,
608             "What factor for rack will be added when seeing reordering (shift right)");
609         SYSCTL_ADD_S32(&rack_sysctl_ctx,
610             SYSCTL_CHILDREN(rack_sysctl_root),
611             OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
612             &rack_tlp_thresh, 1,
613             "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
614         SYSCTL_ADD_S32(&rack_sysctl_ctx,
615             SYSCTL_CHILDREN(rack_sysctl_root),
616             OID_AUTO, "reorder_fade", CTLFLAG_RW,
617             &rack_reorder_fade, 0,
618             "Does reorder detection fade, if so how many ms (0 means never)");
619         SYSCTL_ADD_S32(&rack_sysctl_ctx,
620             SYSCTL_CHILDREN(rack_sysctl_root),
621             OID_AUTO, "pktdelay", CTLFLAG_RW,
622             &rack_pkt_delay, 1,
623             "Extra RACK time (in ms) besides reordering thresh");
624         SYSCTL_ADD_S32(&rack_sysctl_ctx,
625             SYSCTL_CHILDREN(rack_sysctl_root),
626             OID_AUTO, "inc_var", CTLFLAG_RW,
627             &rack_inc_var, 0,
628             "Should rack add to the TLP timer the variance in rtt calculation");
629         rack_badfr = counter_u64_alloc(M_WAITOK);
630         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
631             SYSCTL_CHILDREN(rack_sysctl_root),
632             OID_AUTO, "badfr", CTLFLAG_RD,
633             &rack_badfr, "Total number of bad FRs");
634         rack_badfr_bytes = counter_u64_alloc(M_WAITOK);
635         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
636             SYSCTL_CHILDREN(rack_sysctl_root),
637             OID_AUTO, "badfr_bytes", CTLFLAG_RD,
638             &rack_badfr_bytes, "Total number of bad FRs");
639         rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK);
640         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
641             SYSCTL_CHILDREN(rack_sysctl_root),
642             OID_AUTO, "prrsndret", CTLFLAG_RD,
643             &rack_rtm_prr_retran,
644             "Total number of prr based retransmits");
645         rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK);
646         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
647             SYSCTL_CHILDREN(rack_sysctl_root),
648             OID_AUTO, "prrsndnew", CTLFLAG_RD,
649             &rack_rtm_prr_newdata,
650             "Total number of prr based new transmits");
651         rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK);
652         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
653             SYSCTL_CHILDREN(rack_sysctl_root),
654             OID_AUTO, "tsnf", CTLFLAG_RD,
655             &rack_timestamp_mismatch,
656             "Total number of timestamps that we could not find the reported ts");
657         rack_find_high = counter_u64_alloc(M_WAITOK);
658         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
659             SYSCTL_CHILDREN(rack_sysctl_root),
660             OID_AUTO, "findhigh", CTLFLAG_RD,
661             &rack_find_high,
662             "Total number of FIN causing find-high");
663         rack_reorder_seen = counter_u64_alloc(M_WAITOK);
664         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
665             SYSCTL_CHILDREN(rack_sysctl_root),
666             OID_AUTO, "reordering", CTLFLAG_RD,
667             &rack_reorder_seen,
668             "Total number of times we added delay due to reordering");
669         rack_tlp_tot = counter_u64_alloc(M_WAITOK);
670         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
671             SYSCTL_CHILDREN(rack_sysctl_root),
672             OID_AUTO, "tlp_to_total", CTLFLAG_RD,
673             &rack_tlp_tot,
674             "Total number of tail loss probe expirations");
675         rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
676         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
677             SYSCTL_CHILDREN(rack_sysctl_root),
678             OID_AUTO, "tlp_new", CTLFLAG_RD,
679             &rack_tlp_newdata,
680             "Total number of tail loss probe sending new data");
681
682         rack_tlp_retran = counter_u64_alloc(M_WAITOK);
683         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
684             SYSCTL_CHILDREN(rack_sysctl_root),
685             OID_AUTO, "tlp_retran", CTLFLAG_RD,
686             &rack_tlp_retran,
687             "Total number of tail loss probe sending retransmitted data");
688         rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
689         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
690             SYSCTL_CHILDREN(rack_sysctl_root),
691             OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
692             &rack_tlp_retran_bytes,
693             "Total bytes of tail loss probe sending retransmitted data");
694         rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK);
695         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
696             SYSCTL_CHILDREN(rack_sysctl_root),
697             OID_AUTO, "tlp_retran_fail", CTLFLAG_RD,
698             &rack_tlp_retran_fail,
699             "Total number of tail loss probe sending retransmitted data that failed (wait for t3)");
700         rack_to_tot = counter_u64_alloc(M_WAITOK);
701         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
702             SYSCTL_CHILDREN(rack_sysctl_root),
703             OID_AUTO, "rack_to_tot", CTLFLAG_RD,
704             &rack_to_tot,
705             "Total number of times the rack to expired?");
706         rack_to_arm_rack = counter_u64_alloc(M_WAITOK);
707         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
708             SYSCTL_CHILDREN(rack_sysctl_root),
709             OID_AUTO, "arm_rack", CTLFLAG_RD,
710             &rack_to_arm_rack,
711             "Total number of times the rack timer armed?");
712         rack_to_arm_tlp = counter_u64_alloc(M_WAITOK);
713         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
714             SYSCTL_CHILDREN(rack_sysctl_root),
715             OID_AUTO, "arm_tlp", CTLFLAG_RD,
716             &rack_to_arm_tlp,
717             "Total number of times the tlp timer armed?");
718         rack_paced_segments = counter_u64_alloc(M_WAITOK);
719         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
720             SYSCTL_CHILDREN(rack_sysctl_root),
721             OID_AUTO, "paced", CTLFLAG_RD,
722             &rack_paced_segments,
723             "Total number of times a segment send caused hptsi");
724         rack_unpaced_segments = counter_u64_alloc(M_WAITOK);
725         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
726             SYSCTL_CHILDREN(rack_sysctl_root),
727             OID_AUTO, "unpaced", CTLFLAG_RD,
728             &rack_unpaced_segments,
729             "Total number of times a segment did not cause hptsi");
730         rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
731         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
732             SYSCTL_CHILDREN(rack_sysctl_root),
733             OID_AUTO, "saw_enobufs", CTLFLAG_RD,
734             &rack_saw_enobuf,
735             "Total number of times a segment did not cause hptsi");
736         rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
737         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
738             SYSCTL_CHILDREN(rack_sysctl_root),
739             OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
740             &rack_saw_enetunreach,
741             "Total number of times a segment did not cause hptsi");
742         rack_to_alloc = counter_u64_alloc(M_WAITOK);
743         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
744             SYSCTL_CHILDREN(rack_sysctl_root),
745             OID_AUTO, "allocs", CTLFLAG_RD,
746             &rack_to_alloc,
747             "Total allocations of tracking structures");
748         rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
749         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
750             SYSCTL_CHILDREN(rack_sysctl_root),
751             OID_AUTO, "allochard", CTLFLAG_RD,
752             &rack_to_alloc_hard,
753             "Total allocations done with sleeping the hard way");
754         rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
755         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
756             SYSCTL_CHILDREN(rack_sysctl_root),
757             OID_AUTO, "allocemerg", CTLFLAG_RD,
758             &rack_to_alloc_emerg,
759             "Total alocations done from emergency cache");
760         rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
761         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
762             SYSCTL_CHILDREN(rack_sysctl_root),
763             OID_AUTO, "sack_long", CTLFLAG_RD,
764             &rack_sack_proc_all,
765             "Total times we had to walk whole list for sack processing");
766
767         rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
768         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
769             SYSCTL_CHILDREN(rack_sysctl_root),
770             OID_AUTO, "sack_restart", CTLFLAG_RD,
771             &rack_sack_proc_restart,
772             "Total times we had to walk whole list due to a restart");
773         rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
774         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
775             SYSCTL_CHILDREN(rack_sysctl_root),
776             OID_AUTO, "sack_short", CTLFLAG_RD,
777             &rack_sack_proc_short,
778             "Total times we took shortcut for sack processing");
779         rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK);
780         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
781             SYSCTL_CHILDREN(rack_sysctl_root),
782             OID_AUTO, "tlp_calc_entered", CTLFLAG_RD,
783             &rack_enter_tlp_calc,
784             "Total times we called calc-tlp");
785         rack_used_tlpmethod = counter_u64_alloc(M_WAITOK);
786         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
787             SYSCTL_CHILDREN(rack_sysctl_root),
788             OID_AUTO, "hit_tlp_method", CTLFLAG_RD,
789             &rack_used_tlpmethod,
790             "Total number of runt sacks");
791         rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK);
792         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
793             SYSCTL_CHILDREN(rack_sysctl_root),
794             OID_AUTO, "hit_tlp_method2", CTLFLAG_RD,
795             &rack_used_tlpmethod2,
796             "Total number of runt sacks 2");
797         rack_runt_sacks = counter_u64_alloc(M_WAITOK);
798         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
799             SYSCTL_CHILDREN(rack_sysctl_root),
800             OID_AUTO, "runtsacks", CTLFLAG_RD,
801             &rack_runt_sacks,
802             "Total number of runt sacks");
803         rack_progress_drops = counter_u64_alloc(M_WAITOK);
804         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
805             SYSCTL_CHILDREN(rack_sysctl_root),
806             OID_AUTO, "prog_drops", CTLFLAG_RD,
807             &rack_progress_drops,
808             "Total number of progress drops");
809         rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
810         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
811             SYSCTL_CHILDREN(rack_sysctl_root),
812             OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
813             &rack_input_idle_reduces,
814             "Total number of idle reductions on input");
815         rack_tlp_does_nada = counter_u64_alloc(M_WAITOK);
816         SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
817             SYSCTL_CHILDREN(rack_sysctl_root),
818             OID_AUTO, "tlp_nada", CTLFLAG_RD,
819             &rack_tlp_does_nada,
820             "Total number of nada tlp calls");
821         COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
822         SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
823             OID_AUTO, "outsize", CTLFLAG_RD,
824             rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
825         COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
826         SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
827             OID_AUTO, "opts", CTLFLAG_RD,
828             rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
829         SYSCTL_ADD_PROC(&rack_sysctl_ctx,
830             SYSCTL_CHILDREN(rack_sysctl_root),
831             OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
832             &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
833 }
834
835 static inline int32_t
836 rack_progress_timeout_check(struct tcpcb *tp)
837 {
838         if (tp->t_maxunacktime && tp->t_acktime && TSTMP_GT(ticks, tp->t_acktime)) {
839                 if ((ticks - tp->t_acktime) >= tp->t_maxunacktime) {
840                         /*
841                          * There is an assumption that the caller
842                          * will drop the connection so we will
843                          * increment the counters here.
844                          */
845                         struct tcp_rack *rack;
846                         rack = (struct tcp_rack *)tp->t_fb_ptr;
847                         counter_u64_add(rack_progress_drops, 1);
848 #ifdef NETFLIX_STATS
849                         TCPSTAT_INC(tcps_progdrops);
850 #endif
851                         rack_log_progress_event(rack, tp, ticks, PROGRESS_DROP, __LINE__);
852                         return (1);
853                 }
854         }
855         return (0);
856 }
857
858
859 static void
860 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
861 {
862         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
863                 union tcp_log_stackspecific log;
864
865                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
866                 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT);
867                 log.u_bbr.flex2 = to;
868                 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
869                 log.u_bbr.flex4 = slot;
870                 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot;
871                 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
872                 log.u_bbr.flex8 = which;
873                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
874                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
875                 TCP_LOG_EVENT(rack->rc_tp, NULL,
876                     &rack->rc_inp->inp_socket->so_rcv,
877                     &rack->rc_inp->inp_socket->so_snd,
878                     BBR_LOG_TIMERSTAR, 0,
879                     0, &log, false);
880         }
881 }
882
883 static void
884 rack_log_to_event(struct tcp_rack *rack, int32_t to_num)
885 {
886         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
887                 union tcp_log_stackspecific log;
888
889                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
890                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
891                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
892                 log.u_bbr.flex8 = to_num;
893                 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
894                 log.u_bbr.flex2 = rack->rc_rack_rtt;
895                 TCP_LOG_EVENT(rack->rc_tp, NULL,
896                     &rack->rc_inp->inp_socket->so_rcv,
897                     &rack->rc_inp->inp_socket->so_snd,
898                     BBR_LOG_RTO, 0,
899                     0, &log, false);
900         }
901 }
902
903 static void
904 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t,
905     uint32_t o_srtt, uint32_t o_var)
906 {
907         if (tp->t_logstate != TCP_LOG_STATE_OFF) {
908                 union tcp_log_stackspecific log;
909
910                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
911                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
912                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
913                 log.u_bbr.flex1 = t;
914                 log.u_bbr.flex2 = o_srtt;
915                 log.u_bbr.flex3 = o_var;
916                 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
917                 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;           
918                 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
919                 log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot;
920                 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
921                 TCP_LOG_EVENT(tp, NULL,
922                     &rack->rc_inp->inp_socket->so_rcv,
923                     &rack->rc_inp->inp_socket->so_snd,
924                     BBR_LOG_BBRRTT, 0,
925                     0, &log, false);
926         }
927 }
928
929 static void
930 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
931 {
932         /* 
933          * Log the rtt sample we are
934          * applying to the srtt algorithm in
935          * useconds.
936          */
937         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
938                 union tcp_log_stackspecific log;
939                 struct timeval tv;
940                 
941                 /* Convert our ms to a microsecond */
942                 log.u_bbr.flex1 = rtt * 1000;
943                 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
944                 TCP_LOG_EVENTP(rack->rc_tp, NULL,
945                     &rack->rc_inp->inp_socket->so_rcv,
946                     &rack->rc_inp->inp_socket->so_snd,
947                     TCP_LOG_RTT, 0,
948                     0, &log, false, &tv);
949         }
950 }
951
952
953 static inline void
954 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line)
955 {
956         if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
957                 union tcp_log_stackspecific log;
958
959                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
960                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
961                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
962                 log.u_bbr.flex1 = line;
963                 log.u_bbr.flex2 = tick;
964                 log.u_bbr.flex3 = tp->t_maxunacktime;
965                 log.u_bbr.flex4 = tp->t_acktime;
966                 log.u_bbr.flex8 = event;
967                 TCP_LOG_EVENT(tp, NULL,
968                     &rack->rc_inp->inp_socket->so_rcv,
969                     &rack->rc_inp->inp_socket->so_snd,
970                     BBR_LOG_PROGRESS, 0,
971                     0, &log, false);
972         }
973 }
974
975 static void
976 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts)
977 {
978         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
979                 union tcp_log_stackspecific log;
980
981                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
982                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
983                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
984                 log.u_bbr.flex1 = slot;
985                 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
986                 log.u_bbr.flex8 = rack->rc_in_persist;
987                 TCP_LOG_EVENT(rack->rc_tp, NULL,
988                     &rack->rc_inp->inp_socket->so_rcv,
989                     &rack->rc_inp->inp_socket->so_snd,
990                     BBR_LOG_BBRSND, 0,
991                     0, &log, false);
992         }
993 }
994
995 static void
996 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out)
997 {
998         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
999                 union tcp_log_stackspecific log;
1000                 log.u_bbr.flex1 = did_out;
1001                 log.u_bbr.flex2 = nxt_pkt;
1002                 log.u_bbr.flex3 = way_out;
1003                 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1004                 log.u_bbr.flex7 = rack->r_wanted_output;
1005                 log.u_bbr.flex8 = rack->rc_in_persist;
1006                 TCP_LOG_EVENT(rack->rc_tp, NULL,
1007                     &rack->rc_inp->inp_socket->so_rcv,
1008                     &rack->rc_inp->inp_socket->so_snd,
1009                     BBR_LOG_DOSEG_DONE, 0,
1010                     0, &log, false);
1011         }
1012 }
1013
1014
1015 static void
1016 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling)
1017 {
1018         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1019                 union tcp_log_stackspecific log;
1020
1021                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1022                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1023                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1024                 log.u_bbr.flex1 = slot;
1025                 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
1026                 log.u_bbr.flex7 = hpts_calling;
1027                 log.u_bbr.flex8 = rack->rc_in_persist;
1028                 TCP_LOG_EVENT(rack->rc_tp, NULL,
1029                     &rack->rc_inp->inp_socket->so_rcv,
1030                     &rack->rc_inp->inp_socket->so_snd,
1031                     BBR_LOG_JUSTRET, 0,
1032                     tlen, &log, false);
1033         }
1034 }
1035
1036 static void
1037 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line)
1038 {
1039         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1040                 union tcp_log_stackspecific log;
1041
1042                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1043                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
1044                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
1045                 log.u_bbr.flex1 = line;
1046                 log.u_bbr.flex2 = 0;
1047                 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
1048                 log.u_bbr.flex4 = 0;
1049                 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
1050                 log.u_bbr.flex8 = hpts_removed;
1051                 TCP_LOG_EVENT(rack->rc_tp, NULL,
1052                     &rack->rc_inp->inp_socket->so_rcv,
1053                     &rack->rc_inp->inp_socket->so_snd,
1054                     BBR_LOG_TIMERCANC, 0,
1055                     0, &log, false);
1056         }
1057 }
1058
1059 static void
1060 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
1061 {
1062         if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
1063                 union tcp_log_stackspecific log;
1064
1065                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
1066                 log.u_bbr.flex1 = timers;
1067                 log.u_bbr.flex2 = ret;
1068                 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
1069                 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
1070                 log.u_bbr.flex5 = cts;
1071                 TCP_LOG_EVENT(rack->rc_tp, NULL,
1072                     &rack->rc_inp->inp_socket->so_rcv,
1073                     &rack->rc_inp->inp_socket->so_snd,
1074                     BBR_LOG_TO_PROCESS, 0,
1075                     0, &log, false);
1076         }
1077 }
1078
1079 static void
1080 rack_counter_destroy()
1081 {
1082         counter_u64_free(rack_badfr);
1083         counter_u64_free(rack_badfr_bytes);
1084         counter_u64_free(rack_rtm_prr_retran);
1085         counter_u64_free(rack_rtm_prr_newdata);
1086         counter_u64_free(rack_timestamp_mismatch);
1087         counter_u64_free(rack_reorder_seen);
1088         counter_u64_free(rack_tlp_tot);
1089         counter_u64_free(rack_tlp_newdata);
1090         counter_u64_free(rack_tlp_retran);
1091         counter_u64_free(rack_tlp_retran_bytes);
1092         counter_u64_free(rack_tlp_retran_fail);
1093         counter_u64_free(rack_to_tot);
1094         counter_u64_free(rack_to_arm_rack);
1095         counter_u64_free(rack_to_arm_tlp);
1096         counter_u64_free(rack_paced_segments);
1097         counter_u64_free(rack_unpaced_segments);
1098         counter_u64_free(rack_saw_enobuf);
1099         counter_u64_free(rack_saw_enetunreach);
1100         counter_u64_free(rack_to_alloc_hard);
1101         counter_u64_free(rack_to_alloc_emerg);
1102         counter_u64_free(rack_sack_proc_all);
1103         counter_u64_free(rack_sack_proc_short);
1104         counter_u64_free(rack_sack_proc_restart);
1105         counter_u64_free(rack_to_alloc);
1106         counter_u64_free(rack_find_high);
1107         counter_u64_free(rack_runt_sacks);
1108         counter_u64_free(rack_enter_tlp_calc);
1109         counter_u64_free(rack_used_tlpmethod);
1110         counter_u64_free(rack_used_tlpmethod2);
1111         counter_u64_free(rack_progress_drops);
1112         counter_u64_free(rack_input_idle_reduces);
1113         counter_u64_free(rack_tlp_does_nada);
1114         COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
1115         COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
1116 }
1117
1118 static struct rack_sendmap *
1119 rack_alloc(struct tcp_rack *rack)
1120 {
1121         struct rack_sendmap *rsm;
1122
1123         counter_u64_add(rack_to_alloc, 1);
1124         rack->r_ctl.rc_num_maps_alloced++;
1125         rsm = uma_zalloc(rack_zone, M_NOWAIT);
1126         if (rsm) {
1127                 return (rsm);
1128         }
1129         if (rack->rc_free_cnt) {
1130                 counter_u64_add(rack_to_alloc_emerg, 1);
1131                 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
1132                 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next);
1133                 rack->rc_free_cnt--;
1134                 return (rsm);
1135         }
1136         return (NULL);
1137 }
1138
1139 static void
1140 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
1141 {
1142         rack->r_ctl.rc_num_maps_alloced--;
1143         if (rack->r_ctl.rc_tlpsend == rsm)
1144                 rack->r_ctl.rc_tlpsend = NULL;
1145         if (rack->r_ctl.rc_next == rsm)
1146                 rack->r_ctl.rc_next = NULL;
1147         if (rack->r_ctl.rc_sacklast == rsm)
1148                 rack->r_ctl.rc_sacklast = NULL;
1149         if (rack->rc_free_cnt < rack_free_cache) {
1150                 memset(rsm, 0, sizeof(struct rack_sendmap));
1151                 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next);
1152                 rack->rc_free_cnt++;
1153                 return;
1154         }
1155         uma_zfree(rack_zone, rsm);
1156 }
1157
1158 /*
1159  * CC wrapper hook functions
1160  */
1161 static void
1162 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs,
1163     uint16_t type, int32_t recovery)
1164 {
1165 #ifdef NETFLIX_STATS
1166         int32_t gput;
1167 #endif
1168 #ifdef NETFLIX_CWV
1169         u_long old_cwnd = tp->snd_cwnd;
1170 #endif
1171
1172         INP_WLOCK_ASSERT(tp->t_inpcb);
1173         tp->ccv->nsegs = nsegs;
1174         tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
1175         if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
1176                 uint32_t max;
1177
1178                 max = rack->r_ctl.rc_early_recovery_segs * tp->t_maxseg;
1179                 if (tp->ccv->bytes_this_ack > max) {
1180                         tp->ccv->bytes_this_ack = max;
1181                 }
1182         }
1183         if (tp->snd_cwnd <= tp->snd_wnd)
1184                 tp->ccv->flags |= CCF_CWND_LIMITED;
1185         else
1186                 tp->ccv->flags &= ~CCF_CWND_LIMITED;
1187
1188         if (type == CC_ACK) {
1189 #ifdef NETFLIX_STATS
1190                 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
1191                     ((int32_t) tp->snd_cwnd) - tp->snd_wnd);
1192                 if ((tp->t_flags & TF_GPUTINPROG) &&
1193                     SEQ_GEQ(th->th_ack, tp->gput_ack)) {
1194                         gput = (((int64_t) (th->th_ack - tp->gput_seq)) << 3) /
1195                             max(1, tcp_ts_getticks() - tp->gput_ts);
1196                         stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
1197                             gput);
1198                         /*
1199                          * XXXLAS: This is a temporary hack, and should be
1200                          * chained off VOI_TCP_GPUT when stats(9) grows an
1201                          * API to deal with chained VOIs.
1202                          */
1203                         if (tp->t_stats_gput_prev > 0)
1204                                 stats_voi_update_abs_s32(tp->t_stats,
1205                                     VOI_TCP_GPUT_ND,
1206                                     ((gput - tp->t_stats_gput_prev) * 100) /
1207                                     tp->t_stats_gput_prev);
1208                         tp->t_flags &= ~TF_GPUTINPROG;
1209                         tp->t_stats_gput_prev = gput;
1210
1211                         if (tp->t_maxpeakrate) {
1212                                 /*
1213                                  * We update t_peakrate_thr. This gives us roughly
1214                                  * one update per round trip time.
1215                                  */
1216                                 tcp_update_peakrate_thr(tp);
1217                         }
1218                 }
1219 #endif
1220                 if (tp->snd_cwnd > tp->snd_ssthresh) {
1221                         tp->t_bytes_acked += min(tp->ccv->bytes_this_ack,
1222                             nsegs * V_tcp_abc_l_var * tp->t_maxseg);
1223                         if (tp->t_bytes_acked >= tp->snd_cwnd) {
1224                                 tp->t_bytes_acked -= tp->snd_cwnd;
1225                                 tp->ccv->flags |= CCF_ABC_SENTAWND;
1226                         }
1227                 } else {
1228                         tp->ccv->flags &= ~CCF_ABC_SENTAWND;
1229                         tp->t_bytes_acked = 0;
1230                 }
1231         }
1232         if (CC_ALGO(tp)->ack_received != NULL) {
1233                 /* XXXLAS: Find a way to live without this */
1234                 tp->ccv->curack = th->th_ack;
1235                 CC_ALGO(tp)->ack_received(tp->ccv, type);
1236         }
1237 #ifdef NETFLIX_STATS
1238         stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd);
1239 #endif
1240         if (rack->r_ctl.rc_rack_largest_cwnd < tp->snd_cwnd) {
1241                 rack->r_ctl.rc_rack_largest_cwnd = tp->snd_cwnd;
1242         }
1243 #ifdef NETFLIX_CWV
1244         if (tp->cwv_enabled) {
1245                 /*
1246                  * Per RFC 7661: The behaviour in the non-validated phase is
1247                  * specified as: o  A sender determines whether to increase
1248                  * the cwnd based upon whether it is cwnd-limited (see
1249                  * Section 4.5.3): * A sender that is cwnd-limited MAY use
1250                  * the standard TCP method to increase cwnd (i.e., the
1251                  * standard method permits a TCP sender that fully utilises
1252                  * the cwnd to increase the cwnd each time it receives an
1253                  * ACK). * A sender that is not cwnd-limited MUST NOT
1254                  * increase the cwnd when ACK packets are received in this
1255                  * phase (i.e., needs to avoid growing the cwnd when it has
1256                  * not recently sent using the current size of cwnd).
1257                  */
1258                 if ((tp->snd_cwnd > old_cwnd) &&
1259                     (tp->cwv_cwnd_valid == 0) &&
1260                     (!(tp->ccv->flags & CCF_CWND_LIMITED))) {
1261                         tp->snd_cwnd = old_cwnd;
1262                 }
1263                 /* Try to update pipeAck and NCWV state */
1264                 if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1265                     !IN_RECOVERY(tp->t_flags)) {
1266                         uint32_t data = sbavail(&(tp->t_inpcb->inp_socket->so_snd));
1267
1268                         tcp_newcwv_update_pipeack(tp, data);
1269                 }
1270         }
1271 #endif
1272         /* we enforce max peak rate if it is set. */
1273         if (tp->t_peakrate_thr && tp->snd_cwnd > tp->t_peakrate_thr) {
1274                 tp->snd_cwnd = tp->t_peakrate_thr;
1275         }
1276 }
1277
1278 static void
1279 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th)
1280 {
1281         struct tcp_rack *rack;
1282
1283         rack = (struct tcp_rack *)tp->t_fb_ptr;
1284         INP_WLOCK_ASSERT(tp->t_inpcb);
1285         if (rack->r_ctl.rc_prr_sndcnt > 0)
1286                 rack->r_wanted_output++;
1287 }
1288
1289 static void
1290 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th)
1291 {
1292         struct tcp_rack *rack;
1293
1294         INP_WLOCK_ASSERT(tp->t_inpcb);
1295         rack = (struct tcp_rack *)tp->t_fb_ptr;
1296         if (CC_ALGO(tp)->post_recovery != NULL) {
1297                 tp->ccv->curack = th->th_ack;
1298                 CC_ALGO(tp)->post_recovery(tp->ccv);
1299         }
1300         /*
1301          * Here we can in theory adjust cwnd to be based on the number of
1302          * losses in the window (rack->r_ctl.rc_loss_count). This is done
1303          * based on the rack_use_proportional flag.
1304          */
1305         if (rack->r_ctl.rc_prop_reduce && rack->r_ctl.rc_prop_rate) {
1306                 int32_t reduce;
1307
1308                 reduce = (rack->r_ctl.rc_loss_count * rack->r_ctl.rc_prop_rate);
1309                 if (reduce > 50) {
1310                         reduce = 50;
1311                 }
1312                 tp->snd_cwnd -= ((reduce * tp->snd_cwnd) / 100);
1313         } else {
1314                 if (tp->snd_cwnd > tp->snd_ssthresh) {
1315                         /* Drop us down to the ssthresh (1/2 cwnd at loss) */
1316                         tp->snd_cwnd = tp->snd_ssthresh;
1317                 }
1318         }
1319         if (rack->r_ctl.rc_prr_sndcnt > 0) {
1320                 /* Suck the next prr cnt back into cwnd */
1321                 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt;
1322                 rack->r_ctl.rc_prr_sndcnt = 0;
1323         }
1324         EXIT_RECOVERY(tp->t_flags);
1325
1326
1327 #ifdef NETFLIX_CWV
1328         if (tp->cwv_enabled) {
1329                 if ((tp->cwv_cwnd_valid == 0) &&
1330                     (tp->snd_cwv.in_recovery))
1331                         tcp_newcwv_end_recovery(tp);
1332         }
1333 #endif
1334 }
1335
1336 static void
1337 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
1338 {
1339         struct tcp_rack *rack;
1340
1341         INP_WLOCK_ASSERT(tp->t_inpcb);
1342
1343         rack = (struct tcp_rack *)tp->t_fb_ptr;
1344         switch (type) {
1345         case CC_NDUPACK:
1346 /*              rack->r_ctl.rc_ssthresh_set = 1;*/
1347                 if (!IN_FASTRECOVERY(tp->t_flags)) {
1348                         rack->r_ctl.rc_tlp_rtx_out = 0;
1349                         rack->r_ctl.rc_prr_delivered = 0;
1350                         rack->r_ctl.rc_prr_out = 0;
1351                         rack->r_ctl.rc_loss_count = 0;
1352                         rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
1353                         rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
1354                         tp->snd_recover = tp->snd_max;
1355                         if (tp->t_flags & TF_ECN_PERMIT)
1356                                 tp->t_flags |= TF_ECN_SND_CWR;
1357                 }
1358                 break;
1359         case CC_ECN:
1360                 if (!IN_CONGRECOVERY(tp->t_flags)) {
1361                         TCPSTAT_INC(tcps_ecn_rcwnd);
1362                         tp->snd_recover = tp->snd_max;
1363                         if (tp->t_flags & TF_ECN_PERMIT)
1364                                 tp->t_flags |= TF_ECN_SND_CWR;
1365                 }
1366                 break;
1367         case CC_RTO:
1368                 tp->t_dupacks = 0;
1369                 tp->t_bytes_acked = 0;
1370                 EXIT_RECOVERY(tp->t_flags);
1371                 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
1372                     tp->t_maxseg) * tp->t_maxseg;
1373                 tp->snd_cwnd = tp->t_maxseg;
1374                 break;
1375         case CC_RTO_ERR:
1376                 TCPSTAT_INC(tcps_sndrexmitbad);
1377                 /* RTO was unnecessary, so reset everything. */
1378                 tp->snd_cwnd = tp->snd_cwnd_prev;
1379                 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1380                 tp->snd_recover = tp->snd_recover_prev;
1381                 if (tp->t_flags & TF_WASFRECOVERY)
1382                         ENTER_FASTRECOVERY(tp->t_flags);
1383                 if (tp->t_flags & TF_WASCRECOVERY)
1384                         ENTER_CONGRECOVERY(tp->t_flags);
1385                 tp->snd_nxt = tp->snd_max;
1386                 tp->t_badrxtwin = 0;
1387                 break;
1388         }
1389
1390         if (CC_ALGO(tp)->cong_signal != NULL) {
1391                 if (th != NULL)
1392                         tp->ccv->curack = th->th_ack;
1393                 CC_ALGO(tp)->cong_signal(tp->ccv, type);
1394         }
1395 #ifdef NETFLIX_CWV
1396         if (tp->cwv_enabled) {
1397                 if (tp->snd_cwv.in_recovery == 0 && IN_RECOVERY(tp->t_flags)) {
1398                         tcp_newcwv_enter_recovery(tp);
1399                 }
1400                 if (type == CC_RTO) {
1401                         tcp_newcwv_reset(tp);
1402                 }
1403         }
1404 #endif
1405 }
1406
1407
1408
1409 static inline void
1410 rack_cc_after_idle(struct tcpcb *tp, int reduce_largest)
1411 {
1412         uint32_t i_cwnd;
1413
1414         INP_WLOCK_ASSERT(tp->t_inpcb);
1415
1416 #ifdef NETFLIX_STATS
1417         TCPSTAT_INC(tcps_idle_restarts);
1418         if (tp->t_state == TCPS_ESTABLISHED)
1419                 TCPSTAT_INC(tcps_idle_estrestarts);
1420 #endif
1421         if (CC_ALGO(tp)->after_idle != NULL)
1422                 CC_ALGO(tp)->after_idle(tp->ccv);
1423
1424         if (tp->snd_cwnd == 1)
1425                 i_cwnd = tp->t_maxseg;          /* SYN(-ACK) lost */
1426         else if (V_tcp_initcwnd_segments)
1427                 i_cwnd = min((V_tcp_initcwnd_segments * tp->t_maxseg),
1428                     max(2 * tp->t_maxseg, V_tcp_initcwnd_segments * 1460));
1429         else if (V_tcp_do_rfc3390)
1430                 i_cwnd = min(4 * tp->t_maxseg,
1431                     max(2 * tp->t_maxseg, 4380));
1432         else {
1433                 /* Per RFC5681 Section 3.1 */
1434                 if (tp->t_maxseg > 2190)
1435                         i_cwnd = 2 * tp->t_maxseg;
1436                 else if (tp->t_maxseg > 1095)
1437                         i_cwnd = 3 * tp->t_maxseg;
1438                 else
1439                         i_cwnd = 4 * tp->t_maxseg;
1440         }
1441         if (reduce_largest) {
1442                 /*
1443                  * Do we reduce the largest cwnd to make 
1444                  * rack play nice on restart hptsi wise?
1445                  */
1446                 if (((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd  > i_cwnd)
1447                         ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd = i_cwnd;
1448         }
1449         /*
1450          * Being idle is no differnt than the initial window. If the cc
1451          * clamps it down below the initial window raise it to the initial
1452          * window.
1453          */
1454         if (tp->snd_cwnd < i_cwnd) {
1455                 tp->snd_cwnd = i_cwnd;
1456         }
1457 }
1458
1459
1460 /*
1461  * Indicate whether this ack should be delayed.  We can delay the ack if
1462  * following conditions are met:
1463  *      - There is no delayed ack timer in progress.
1464  *      - Our last ack wasn't a 0-sized window. We never want to delay
1465  *        the ack that opens up a 0-sized window.
1466  *      - LRO wasn't used for this segment. We make sure by checking that the
1467  *        segment size is not larger than the MSS.
1468  *      - Delayed acks are enabled or this is a half-synchronized T/TCP
1469  *        connection.
1470  */
1471 #define DELAY_ACK(tp, tlen)                      \
1472         (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
1473         ((tp->t_flags & TF_DELACK) == 0) &&      \
1474         (tlen <= tp->t_maxseg) &&                \
1475         (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
1476
1477 static inline void
1478 rack_calc_rwin(struct socket *so, struct tcpcb *tp)
1479 {
1480         int32_t win;
1481
1482         /*
1483          * Calculate amount of space in receive window, and then do TCP
1484          * input processing. Receive window is amount of space in rcv queue,
1485          * but not less than advertised window.
1486          */
1487         win = sbspace(&so->so_rcv);
1488         if (win < 0)
1489                 win = 0;
1490         tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
1491 }
1492
1493 static void
1494 rack_do_drop(struct mbuf *m, struct tcpcb *tp, int32_t * ti_locked)
1495 {
1496         if (*ti_locked == TI_RLOCKED) {
1497                 INP_INFO_RUNLOCK(&V_tcbinfo);
1498                 *ti_locked = TI_UNLOCKED;
1499         }
1500         /*
1501          * Drop space held by incoming segment and return.
1502          */
1503         if (tp != NULL)
1504                 INP_WUNLOCK(tp->t_inpcb);
1505         if (m)
1506                 m_freem(m);
1507 }
1508
1509 static void
1510 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t * ti_locked, int32_t rstreason, int32_t tlen)
1511 {
1512         if (*ti_locked == TI_RLOCKED) {
1513                 INP_INFO_RUNLOCK(&V_tcbinfo);
1514                 *ti_locked = TI_UNLOCKED;
1515         }
1516         if (tp != NULL) {
1517                 tcp_dropwithreset(m, th, tp, tlen, rstreason);
1518                 INP_WUNLOCK(tp->t_inpcb);
1519         } else
1520                 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
1521 }
1522
1523 /*
1524  * The value in ret_val informs the caller
1525  * if we dropped the tcb (and lock) or not.
1526  * 1 = we dropped it, 0 = the TCB is still locked
1527  * and valid.
1528  */
1529 static void
1530 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t * ti_locked, int32_t thflags, int32_t tlen, int32_t * ret_val)
1531 {
1532         /*
1533          * Generate an ACK dropping incoming segment if it occupies sequence
1534          * space, where the ACK reflects our state.
1535          *
1536          * We can now skip the test for the RST flag since all paths to this
1537          * code happen after packets containing RST have been dropped.
1538          *
1539          * In the SYN-RECEIVED state, don't send an ACK unless the segment
1540          * we received passes the SYN-RECEIVED ACK test. If it fails send a
1541          * RST.  This breaks the loop in the "LAND" DoS attack, and also
1542          * prevents an ACK storm between two listening ports that have been
1543          * sent forged SYN segments, each with the source address of the
1544          * other.
1545          */
1546         struct tcp_rack *rack;
1547
1548         if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
1549             (SEQ_GT(tp->snd_una, th->th_ack) ||
1550             SEQ_GT(th->th_ack, tp->snd_max))) {
1551                 *ret_val = 1;
1552                 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
1553                 return;
1554         } else
1555                 *ret_val = 0;
1556         if (*ti_locked == TI_RLOCKED) {
1557                 INP_INFO_RUNLOCK(&V_tcbinfo);
1558                 *ti_locked = TI_UNLOCKED;
1559         }
1560         rack = (struct tcp_rack *)tp->t_fb_ptr;
1561         rack->r_wanted_output++;
1562         tp->t_flags |= TF_ACKNOW;
1563         if (m)
1564                 m_freem(m);
1565 }
1566
1567
1568 static int
1569 rack_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp, int32_t * ti_locked)
1570 {
1571         /*
1572          * RFC5961 Section 3.2
1573          *
1574          * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in
1575          * window, we send challenge ACK.
1576          *
1577          * Note: to take into account delayed ACKs, we should test against
1578          * last_ack_sent instead of rcv_nxt. Note 2: we handle special case
1579          * of closed window, not covered by the RFC.
1580          */
1581         int dropped = 0;
1582
1583         if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) &&
1584             SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
1585             (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
1586
1587                 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1588                 KASSERT(*ti_locked == TI_RLOCKED,
1589                     ("%s: TH_RST ti_locked %d, th %p tp %p",
1590                     __func__, *ti_locked, th, tp));
1591                 KASSERT(tp->t_state != TCPS_SYN_SENT,
1592                     ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
1593                     __func__, th, tp));
1594
1595                 if (V_tcp_insecure_rst ||
1596                     (tp->last_ack_sent == th->th_seq) ||
1597                     (tp->rcv_nxt == th->th_seq) ||
1598                     ((tp->last_ack_sent - 1) == th->th_seq)) {
1599                         TCPSTAT_INC(tcps_drops);
1600                         /* Drop the connection. */
1601                         switch (tp->t_state) {
1602                         case TCPS_SYN_RECEIVED:
1603                                 so->so_error = ECONNREFUSED;
1604                                 goto close;
1605                         case TCPS_ESTABLISHED:
1606                         case TCPS_FIN_WAIT_1:
1607                         case TCPS_FIN_WAIT_2:
1608                         case TCPS_CLOSE_WAIT:
1609                         case TCPS_CLOSING:
1610                         case TCPS_LAST_ACK:
1611                                 so->so_error = ECONNRESET;
1612                 close:
1613                                 tcp_state_change(tp, TCPS_CLOSED);
1614                                 /* FALLTHROUGH */
1615                         default:
1616                                 tp = tcp_close(tp);
1617                         }
1618                         dropped = 1;
1619                         rack_do_drop(m, tp, ti_locked);
1620                 } else {
1621                         TCPSTAT_INC(tcps_badrst);
1622                         /* Send challenge ACK. */
1623                         tcp_respond(tp, mtod(m, void *), th, m,
1624                             tp->rcv_nxt, tp->snd_nxt, TH_ACK);
1625                         tp->last_ack_sent = tp->rcv_nxt;
1626                 }
1627         } else {
1628                 m_freem(m);
1629         }
1630         return (dropped);
1631 }
1632
1633 /*
1634  * The value in ret_val informs the caller
1635  * if we dropped the tcb (and lock) or not.
1636  * 1 = we dropped it, 0 = the TCB is still locked
1637  * and valid.
1638  */
1639 static void
1640 rack_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ti_locked, int32_t * ret_val)
1641 {
1642         KASSERT(*ti_locked == TI_RLOCKED,
1643             ("tcp_do_segment: TH_SYN ti_locked %d", *ti_locked));
1644         INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
1645
1646         TCPSTAT_INC(tcps_badsyn);
1647         if (V_tcp_insecure_syn &&
1648             SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
1649             SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
1650                 tp = tcp_drop(tp, ECONNRESET);
1651                 *ret_val = 1;
1652                 rack_do_drop(m, tp, ti_locked);
1653         } else {
1654                 /* Send challenge ACK. */
1655                 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
1656                     tp->snd_nxt, TH_ACK);
1657                 tp->last_ack_sent = tp->rcv_nxt;
1658                 m = NULL;
1659                 *ret_val = 0;
1660                 rack_do_drop(m, NULL, ti_locked);
1661         }
1662 }
1663
1664 /*
1665  * rack_ts_check returns 1 for you should not proceed. It places
1666  * in ret_val what should be returned 1/0 by the caller. The 1 indicates
1667  * that the TCB is unlocked and probably dropped. The 0 indicates the
1668  * TCB is still valid and locked.
1669  */
1670 static int
1671 rack_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ti_locked, int32_t tlen, int32_t thflags, int32_t * ret_val)
1672 {
1673
1674         /* Check to see if ts_recent is over 24 days old.  */
1675         if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
1676                 /*
1677                  * Invalidate ts_recent.  If this segment updates ts_recent,
1678                  * the age will be reset later and ts_recent will get a
1679                  * valid value.  If it does not, setting ts_recent to zero
1680                  * will at least satisfy the requirement that zero be placed
1681                  * in the timestamp echo reply when ts_recent isn't valid.
1682                  * The age isn't reset until we get a valid ts_recent
1683                  * because we don't want out-of-order segments to be dropped
1684                  * when ts_recent is old.
1685                  */
1686                 tp->ts_recent = 0;
1687         } else {
1688                 TCPSTAT_INC(tcps_rcvduppack);
1689                 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
1690                 TCPSTAT_INC(tcps_pawsdrop);
1691                 *ret_val = 0;
1692                 if (tlen) {
1693                         rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, ret_val);
1694                 } else {
1695                         rack_do_drop(m, NULL, ti_locked);
1696                 }
1697                 return (1);
1698         }
1699         return (0);
1700 }
1701
1702 /*
1703  * rack_drop_checks returns 1 for you should not proceed. It places
1704  * in ret_val what should be returned 1/0 by the caller. The 1 indicates
1705  * that the TCB is unlocked and probably dropped. The 0 indicates the
1706  * TCB is still valid and locked.
1707  */
1708 static int
1709 rack_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * ti_locked, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val)
1710 {
1711         int32_t todrop;
1712         int32_t thflags;
1713         int32_t tlen;
1714
1715         thflags = *thf;
1716         tlen = *tlenp;
1717         todrop = tp->rcv_nxt - th->th_seq;
1718         if (todrop > 0) {
1719                 if (thflags & TH_SYN) {
1720                         thflags &= ~TH_SYN;
1721                         th->th_seq++;
1722                         if (th->th_urp > 1)
1723                                 th->th_urp--;
1724                         else
1725                                 thflags &= ~TH_URG;
1726                         todrop--;
1727                 }
1728                 /*
1729                  * Following if statement from Stevens, vol. 2, p. 960.
1730                  */
1731                 if (todrop > tlen
1732                     || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1733                         /*
1734                          * Any valid FIN must be to the left of the window.
1735                          * At this point the FIN must be a duplicate or out
1736                          * of sequence; drop it.
1737                          */
1738                         thflags &= ~TH_FIN;
1739                         /*
1740                          * Send an ACK to resynchronize and drop any data.
1741                          * But keep on processing for RST or ACK.
1742                          */
1743                         tp->t_flags |= TF_ACKNOW;
1744                         todrop = tlen;
1745                         TCPSTAT_INC(tcps_rcvduppack);
1746                         TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
1747                 } else {
1748                         TCPSTAT_INC(tcps_rcvpartduppack);
1749                         TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
1750                 }
1751                 *drop_hdrlen += todrop; /* drop from the top afterwards */
1752                 th->th_seq += todrop;
1753                 tlen -= todrop;
1754                 if (th->th_urp > todrop)
1755                         th->th_urp -= todrop;
1756                 else {
1757                         thflags &= ~TH_URG;
1758                         th->th_urp = 0;
1759                 }
1760         }
1761         /*
1762          * If segment ends after window, drop trailing data (and PUSH and
1763          * FIN); if nothing left, just ACK.
1764          */
1765         todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
1766         if (todrop > 0) {
1767                 TCPSTAT_INC(tcps_rcvpackafterwin);
1768                 if (todrop >= tlen) {
1769                         TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
1770                         /*
1771                          * If window is closed can only take segments at
1772                          * window edge, and have to drop data and PUSH from
1773                          * incoming segments.  Continue processing, but
1774                          * remember to ack.  Otherwise, drop segment and
1775                          * ack.
1776                          */
1777                         if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
1778                                 tp->t_flags |= TF_ACKNOW;
1779                                 TCPSTAT_INC(tcps_rcvwinprobe);
1780                         } else {
1781                                 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, ret_val);
1782                                 return (1);
1783                         }
1784                 } else
1785                         TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
1786                 m_adj(m, -todrop);
1787                 tlen -= todrop;
1788                 thflags &= ~(TH_PUSH | TH_FIN);
1789         }
1790         *thf = thflags;
1791         *tlenp = tlen;
1792         return (0);
1793 }
1794
1795 static struct rack_sendmap *
1796 rack_find_lowest_rsm(struct tcp_rack *rack)
1797 {
1798         struct rack_sendmap *rsm;
1799
1800         /*
1801          * Walk the time-order transmitted list looking for an rsm that is
1802          * not acked. This will be the one that was sent the longest time
1803          * ago that is still outstanding.
1804          */
1805         TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
1806                 if (rsm->r_flags & RACK_ACKED) {
1807                         continue;
1808                 }
1809                 goto finish;
1810         }
1811 finish:
1812         return (rsm);
1813 }
1814
1815 static struct rack_sendmap *
1816 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
1817 {
1818         struct rack_sendmap *prsm;
1819
1820         /*
1821          * Walk the sequence order list backward until we hit and arrive at
1822          * the highest seq not acked. In theory when this is called it
1823          * should be the last segment (which it was not).
1824          */
1825         counter_u64_add(rack_find_high, 1);
1826         prsm = rsm;
1827         TAILQ_FOREACH_REVERSE_FROM(prsm, &rack->r_ctl.rc_map, rack_head, r_next) {
1828                 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
1829                         continue;
1830                 }
1831                 return (prsm);
1832         }
1833         return (NULL);
1834 }
1835
1836
1837 static uint32_t
1838 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
1839 {
1840         int32_t lro;
1841         uint32_t thresh;
1842
1843         /*
1844          * lro is the flag we use to determine if we have seen reordering.
1845          * If it gets set we have seen reordering. The reorder logic either
1846          * works in one of two ways:
1847          *
1848          * If reorder-fade is configured, then we track the last time we saw
1849          * re-ordering occur. If we reach the point where enough time as
1850          * passed we no longer consider reordering has occuring.
1851          *
1852          * Or if reorder-face is 0, then once we see reordering we consider
1853          * the connection to alway be subject to reordering and just set lro
1854          * to 1.
1855          *
1856          * In the end if lro is non-zero we add the extra time for
1857          * reordering in.
1858          */
1859         if (srtt == 0)
1860                 srtt = 1;
1861         if (rack->r_ctl.rc_reorder_ts) {
1862                 if (rack->r_ctl.rc_reorder_fade) {
1863                         if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
1864                                 lro = cts - rack->r_ctl.rc_reorder_ts;
1865                                 if (lro == 0) {
1866                                         /*
1867                                          * No time as passed since the last
1868                                          * reorder, mark it as reordering.
1869                                          */
1870                                         lro = 1;
1871                                 }
1872                         } else {
1873                                 /* Negative time? */
1874                                 lro = 0;
1875                         }
1876                         if (lro > rack->r_ctl.rc_reorder_fade) {
1877                                 /* Turn off reordering seen too */
1878                                 rack->r_ctl.rc_reorder_ts = 0;
1879                                 lro = 0;
1880                         }
1881                 } else {
1882                         /* Reodering does not fade */
1883                         lro = 1;
1884                 }
1885         } else {
1886                 lro = 0;
1887         }
1888         thresh = srtt + rack->r_ctl.rc_pkt_delay;
1889         if (lro) {
1890                 /* It must be set, if not you get 1/4 rtt */
1891                 if (rack->r_ctl.rc_reorder_shift)
1892                         thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
1893                 else
1894                         thresh += (srtt >> 2);
1895         } else {
1896                 thresh += 1;
1897         }
1898         /* We don't let the rack timeout be above a RTO */
1899         
1900         if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) {
1901                 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur);
1902         }
1903         /* And we don't want it above the RTO max either */
1904         if (thresh > rack_rto_max) {
1905                 thresh = rack_rto_max;
1906         }
1907         return (thresh);
1908 }
1909
1910 static uint32_t
1911 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
1912                      struct rack_sendmap *rsm, uint32_t srtt)
1913 {
1914         struct rack_sendmap *prsm;
1915         uint32_t thresh, len;
1916         int maxseg;
1917         
1918         if (srtt == 0)
1919                 srtt = 1;
1920         if (rack->r_ctl.rc_tlp_threshold)
1921                 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
1922         else
1923                 thresh = (srtt * 2);
1924         
1925         /* Get the previous sent packet, if any  */
1926         maxseg = tcp_maxseg(tp);
1927         counter_u64_add(rack_enter_tlp_calc, 1);
1928         len = rsm->r_end - rsm->r_start;
1929         if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
1930                 /* Exactly like the ID */
1931                 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= maxseg) {
1932                         uint32_t alt_thresh;
1933                         /*
1934                          * Compensate for delayed-ack with the d-ack time.
1935                          */
1936                         counter_u64_add(rack_used_tlpmethod, 1);
1937                         alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
1938                         if (alt_thresh > thresh)
1939                                 thresh = alt_thresh;
1940                 }
1941         } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
1942                 /* 2.1 behavior */
1943                 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
1944                 if (prsm && (len <= maxseg)) {
1945                         /*
1946                          * Two packets outstanding, thresh should be (2*srtt) +
1947                          * possible inter-packet delay (if any).
1948                          */
1949                         uint32_t inter_gap = 0;
1950                         int idx, nidx;
1951                         
1952                         counter_u64_add(rack_used_tlpmethod, 1);
1953                         idx = rsm->r_rtr_cnt - 1;
1954                         nidx = prsm->r_rtr_cnt - 1;
1955                         if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) {
1956                                 /* Yes it was sent later (or at the same time) */
1957                                 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
1958                         }
1959                         thresh += inter_gap;
1960                 } else  if (len <= maxseg) {
1961                         /*
1962                          * Possibly compensate for delayed-ack.
1963                          */
1964                         uint32_t alt_thresh;
1965                         
1966                         counter_u64_add(rack_used_tlpmethod2, 1);
1967                         alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
1968                         if (alt_thresh > thresh)
1969                                 thresh = alt_thresh;
1970                 }
1971         } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
1972                 /* 2.2 behavior */
1973                 if (len <= maxseg) {
1974                         uint32_t alt_thresh;
1975                         /*
1976                          * Compensate for delayed-ack with the d-ack time.
1977                          */
1978                         counter_u64_add(rack_used_tlpmethod, 1);
1979                         alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
1980                         if (alt_thresh > thresh)
1981                                 thresh = alt_thresh;
1982                 }
1983         }
1984         /* Not above an RTO */
1985         if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) {
1986                 thresh = TICKS_2_MSEC(tp->t_rxtcur);
1987         }
1988         /* Not above a RTO max */
1989         if (thresh > rack_rto_max) {
1990                 thresh = rack_rto_max;
1991         }
1992         /* Apply user supplied min TLP */
1993         if (thresh < rack_tlp_min) {
1994                 thresh = rack_tlp_min;
1995         }
1996         return (thresh);
1997 }
1998
1999 static struct rack_sendmap *
2000 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
2001 {
2002         /*
2003          * Check to see that we don't need to fall into recovery. We will
2004          * need to do so if our oldest transmit is past the time we should
2005          * have had an ack.
2006          */
2007         struct tcp_rack *rack;
2008         struct rack_sendmap *rsm;
2009         int32_t idx;
2010         uint32_t srtt_cur, srtt, thresh;
2011
2012         rack = (struct tcp_rack *)tp->t_fb_ptr;
2013         if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) {
2014                 return (NULL);
2015         }
2016         srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT;
2017         srtt = TICKS_2_MSEC(srtt_cur);
2018         if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt))
2019                 srtt = rack->rc_rack_rtt;
2020
2021         rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2022         if (rsm == NULL)
2023                 return (NULL);
2024
2025         if (rsm->r_flags & RACK_ACKED) {
2026                 rsm = rack_find_lowest_rsm(rack);
2027                 if (rsm == NULL)
2028                         return (NULL);
2029         }
2030         idx = rsm->r_rtr_cnt - 1;
2031         thresh = rack_calc_thresh_rack(rack, srtt, tsused);
2032         if (tsused < rsm->r_tim_lastsent[idx]) {
2033                 return (NULL);
2034         }
2035         if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) {
2036                 return (NULL);
2037         }
2038         /* Ok if we reach here we are over-due */
2039         rack->r_ctl.rc_rsm_start = rsm->r_start;
2040         rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
2041         rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
2042         rack_cong_signal(tp, NULL, CC_NDUPACK);
2043         return (rsm);
2044 }
2045
2046 static uint32_t
2047 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
2048 {
2049         int32_t t;
2050         int32_t tt;
2051         uint32_t ret_val;
2052
2053         t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT));
2054         TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
2055             tcp_persmin, tcp_persmax);
2056         if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
2057                 tp->t_rxtshift++;
2058         rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
2059         ret_val = (uint32_t)tt;
2060         return (ret_val);
2061 }
2062
2063 static uint32_t
2064 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2065 {
2066         /*
2067          * Start the FR timer, we do this based on getting the first one in
2068          * the rc_tmap. Note that if its NULL we must stop the timer. in all
2069          * events we need to stop the running timer (if its running) before
2070          * starting the new one.
2071          */
2072         uint32_t thresh, exp, to, srtt, time_since_sent;
2073         uint32_t srtt_cur;
2074         int32_t idx;
2075         int32_t is_tlp_timer = 0;
2076         struct rack_sendmap *rsm;
2077         
2078         if (rack->t_timers_stopped) {
2079                 /* All timers have been stopped none are to run */
2080                 return (0);
2081         }
2082         if (rack->rc_in_persist) {
2083                 /* We can't start any timer in persists */
2084                 return (rack_get_persists_timer_val(tp, rack));
2085         }
2086         if (tp->t_state < TCPS_ESTABLISHED)
2087                 goto activate_rxt;
2088         rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2089         if (rsm == NULL) {
2090                 /* Nothing on the send map */
2091 activate_rxt:
2092                 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
2093                         rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
2094                         to = TICKS_2_MSEC(tp->t_rxtcur);
2095                         if (to == 0)
2096                                 to = 1;
2097                         return (to);
2098                 }
2099                 return (0);
2100         }
2101         if (rsm->r_flags & RACK_ACKED) {
2102                 rsm = rack_find_lowest_rsm(rack);
2103                 if (rsm == NULL) {
2104                         /* No lowest? */
2105                         goto activate_rxt;
2106                 }
2107         }
2108         /* Convert from ms to usecs */
2109         if (rsm->r_flags & RACK_SACK_PASSED) {
2110                 if ((tp->t_flags & TF_SENTFIN) &&
2111                     ((tp->snd_max - tp->snd_una) == 1) &&
2112                     (rsm->r_flags & RACK_HAS_FIN)) {
2113                         /*
2114                          * We don't start a rack timer if all we have is a
2115                          * FIN outstanding.
2116                          */
2117                         goto activate_rxt;
2118                 }
2119                 if (tp->t_srtt) {
2120                         srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
2121                         srtt = TICKS_2_MSEC(srtt_cur);
2122                 } else
2123                         srtt = RACK_INITIAL_RTO;
2124
2125                 thresh = rack_calc_thresh_rack(rack, srtt, cts);
2126                 idx = rsm->r_rtr_cnt - 1;
2127                 exp = rsm->r_tim_lastsent[idx] + thresh;
2128                 if (SEQ_GEQ(exp, cts)) {
2129                         to = exp - cts;
2130                         if (to < rack->r_ctl.rc_min_to) {
2131                                 to = rack->r_ctl.rc_min_to;
2132                         }
2133                 } else {
2134                         to = rack->r_ctl.rc_min_to;
2135                 }
2136         } else {
2137                 /* Ok we need to do a TLP not RACK */
2138                 if ((rack->rc_tlp_in_progress != 0) ||
2139                     (rack->r_ctl.rc_tlp_rtx_out != 0)) {
2140                         /*
2141                          * The previous send was a TLP or a tlp_rtx is in
2142                          * process.
2143                          */
2144                         goto activate_rxt;
2145                 }
2146                 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
2147                 if (rsm == NULL) {
2148                         /* We found no rsm to TLP with. */
2149                         goto activate_rxt;
2150                 }
2151                 if (rsm->r_flags & RACK_HAS_FIN) {
2152                         /* If its a FIN we dont do TLP */
2153                         rsm = NULL;
2154                         goto activate_rxt;
2155                 }
2156                 idx = rsm->r_rtr_cnt - 1;
2157                 if (TSTMP_GT(cts,  rsm->r_tim_lastsent[idx])) 
2158                         time_since_sent = cts - rsm->r_tim_lastsent[idx];
2159                 else
2160                         time_since_sent = 0;
2161                 is_tlp_timer = 1;
2162                 if (tp->t_srtt) {
2163                         srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT);
2164                         srtt = TICKS_2_MSEC(srtt_cur);
2165                 } else
2166                         srtt = RACK_INITIAL_RTO;
2167                 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
2168                 if (thresh > time_since_sent)
2169                         to = thresh - time_since_sent;
2170                 else
2171                         to = rack->r_ctl.rc_min_to;
2172                 if (to > TCPTV_REXMTMAX) {
2173                         /*
2174                          * If the TLP time works out to larger than the max
2175                          * RTO lets not do TLP.. just RTO.
2176                          */
2177                         goto activate_rxt;
2178                 }
2179                 if (rsm->r_start != rack->r_ctl.rc_last_tlp_seq) {
2180                         /*
2181                          * The tail is no longer the last one I did a probe
2182                          * on
2183                          */
2184                         rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2185                         rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2186                 }
2187         }
2188         if (is_tlp_timer == 0) {
2189                 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
2190         } else {
2191                 if ((rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) ||
2192                     (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2193                         /*
2194                          * We have exceeded how many times we can retran the
2195                          * current TLP timer, switch to the RTO timer.
2196                          */
2197                         goto activate_rxt;
2198                 } else {
2199                         rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
2200                 }
2201         }
2202         if (to == 0)
2203                 to = 1;
2204         return (to);
2205 }
2206
2207 static void
2208 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2209 {
2210         if (rack->rc_in_persist == 0) {
2211                 if (((tp->t_flags & TF_SENTFIN) == 0) &&
2212                     (tp->snd_max - tp->snd_una) >= sbavail(&rack->rc_inp->inp_socket->so_snd))
2213                         /* Must need to send more data to enter persist */
2214                         return;
2215                 rack->r_ctl.rc_went_idle_time = cts;
2216                 rack_timer_cancel(tp, rack, cts, __LINE__);
2217                 tp->t_rxtshift = 0;
2218                 rack->rc_in_persist = 1;
2219         }
2220 }
2221
2222 static void
2223 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack)
2224 {
2225         if (rack->rc_inp->inp_in_hpts)  {
2226                 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
2227                 rack->r_ctl.rc_hpts_flags  = 0;
2228         }
2229         rack->rc_in_persist = 0;
2230         rack->r_ctl.rc_went_idle_time = 0;
2231         tp->t_flags &= ~TF_FORCEDATA;
2232         tp->t_rxtshift = 0;
2233 }
2234
2235 static void
2236 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, int32_t line,
2237     int32_t slot, uint32_t tot_len_this_send, int32_t frm_out_sbavail)
2238 {
2239         struct inpcb *inp;
2240         uint32_t delayed_ack = 0;
2241         uint32_t hpts_timeout;
2242         uint8_t stopped;
2243         uint32_t left = 0;
2244
2245         inp = tp->t_inpcb;
2246         if (inp->inp_in_hpts) {
2247                 /* A previous call is already set up */
2248                 return;
2249         }
2250         if (tp->t_state == TCPS_CLOSED) {
2251                 return;
2252         }
2253         stopped = rack->rc_tmr_stopped;
2254         if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
2255                 left = rack->r_ctl.rc_timer_exp - cts;
2256         }
2257         rack->r_ctl.rc_timer_exp = 0;
2258         if (rack->rc_inp->inp_in_hpts == 0) {
2259                 rack->r_ctl.rc_hpts_flags = 0;
2260         } 
2261         if (slot) {
2262                 /* We are hptsi too */
2263                 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
2264         } else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
2265                 /* 
2266                  * We are still left on the hpts when the to goes
2267                  * it will be for output.
2268                  */
2269                 if (TSTMP_GT(cts, rack->r_ctl.rc_last_output_to))
2270                         slot = cts - rack->r_ctl.rc_last_output_to;
2271                 else
2272                         slot = 1;
2273         }
2274         if ((tp->snd_wnd == 0) && TCPS_HAVEESTABLISHED(tp->t_state)) {
2275                 /* No send window.. we must enter persist */
2276                 rack_enter_persist(tp, rack, cts);
2277         } else if ((frm_out_sbavail &&
2278                     (frm_out_sbavail > (tp->snd_max - tp->snd_una)) &&
2279                     (tp->snd_wnd < tp->t_maxseg)) &&
2280             TCPS_HAVEESTABLISHED(tp->t_state)) {
2281                 /*
2282                  * If we have no window or we can't send a segment (and have
2283                  * data to send.. we cheat here and frm_out_sbavail is
2284                  * passed in with the sbavail(sb) only from bbr_output) and
2285                  * we are established, then we must enter persits (if not
2286                  * already in persits).
2287                  */
2288                 rack_enter_persist(tp, rack, cts);
2289         }
2290         hpts_timeout = rack_timer_start(tp, rack, cts);
2291         if (tp->t_flags & TF_DELACK) {
2292                 delayed_ack = tcp_delacktime;
2293                 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
2294         }
2295         if (delayed_ack && ((hpts_timeout == 0) ||
2296                             (delayed_ack < hpts_timeout)))
2297                 hpts_timeout = delayed_ack;
2298         else 
2299                 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2300         /*
2301          * If no timers are going to run and we will fall off the hptsi
2302          * wheel, we resort to a keep-alive timer if its configured.
2303          */
2304         if ((hpts_timeout == 0) &&
2305             (slot == 0)) {
2306                 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
2307                     (tp->t_state <= TCPS_CLOSING)) {
2308                         /*
2309                          * Ok we have no timer (persists, rack, tlp, rxt  or
2310                          * del-ack), we don't have segments being paced. So
2311                          * all that is left is the keepalive timer.
2312                          */
2313                         if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2314                                 /* Get the established keep-alive time */
2315                                 hpts_timeout = TP_KEEPIDLE(tp);
2316                         } else {
2317                                 /* Get the initial setup keep-alive time */
2318                                 hpts_timeout = TP_KEEPINIT(tp);
2319                         }
2320                         rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
2321                 }
2322         }
2323         if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
2324             (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
2325                 /*
2326                  * RACK, TLP, persists and RXT timers all are restartable
2327                  * based on actions input .. i.e we received a packet (ack
2328                  * or sack) and that changes things (rw, or snd_una etc).
2329                  * Thus we can restart them with a new value. For
2330                  * keep-alive, delayed_ack we keep track of what was left
2331                  * and restart the timer with a smaller value.
2332                  */
2333                 if (left < hpts_timeout)
2334                         hpts_timeout = left;
2335         }
2336         if (hpts_timeout) {
2337                 /*
2338                  * Hack alert for now we can't time-out over 2,147,483
2339                  * seconds (a bit more than 596 hours), which is probably ok
2340                  * :).
2341                  */
2342                 if (hpts_timeout > 0x7ffffffe)
2343                         hpts_timeout = 0x7ffffffe;
2344                 rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
2345         }
2346         if (slot) {
2347                 rack->r_ctl.rc_last_output_to = cts + slot;
2348                 if ((hpts_timeout == 0) || (hpts_timeout > slot)) {
2349                         if (rack->rc_inp->inp_in_hpts == 0)
2350                                 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(slot));
2351                         rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
2352                 } else {
2353                         /*
2354                          * Arrange for the hpts to kick back in after the
2355                          * t-o if the t-o does not cause a send.
2356                          */
2357                         if (rack->rc_inp->inp_in_hpts == 0)
2358                                 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2359                         rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2360                 }
2361         } else if (hpts_timeout) {
2362                 if (rack->rc_inp->inp_in_hpts == 0)
2363                         tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout));
2364                 rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
2365         } else {
2366                 /* No timer starting */
2367 #ifdef INVARIANTS
2368                 if (SEQ_GT(tp->snd_max, tp->snd_una)) {
2369                         panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
2370                             tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
2371                 }
2372 #endif
2373         }
2374         rack->rc_tmr_stopped = 0;
2375         if (slot)
2376                 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, cts);
2377 }
2378
2379 /*
2380  * RACK Timer, here we simply do logging and house keeping.
2381  * the normal rack_output() function will call the
2382  * appropriate thing to check if we need to do a RACK retransmit.
2383  * We return 1, saying don't proceed with rack_output only
2384  * when all timers have been stopped (destroyed PCB?).
2385  */
2386 static int
2387 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2388 {
2389         /*
2390          * This timer simply provides an internal trigger to send out data.
2391          * The check_recovery_mode call will see if there are needed
2392          * retransmissions, if so we will enter fast-recovery. The output
2393          * call may or may not do the same thing depending on sysctl
2394          * settings.
2395          */
2396         struct rack_sendmap *rsm;
2397         int32_t recovery;
2398
2399         if (tp->t_timers->tt_flags & TT_STOPPED) {
2400                 return (1);
2401         }
2402         if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2403                 /* Its not time yet */
2404                 return (0);
2405         }
2406         rack_log_to_event(rack, RACK_TO_FRM_RACK);
2407         recovery = IN_RECOVERY(tp->t_flags);
2408         counter_u64_add(rack_to_tot, 1);
2409         if (rack->r_state && (rack->r_state != tp->t_state))
2410                 rack_set_state(tp, rack);
2411         rsm = rack_check_recovery_mode(tp, cts);
2412         if (rsm) {
2413                 uint32_t rtt;
2414
2415                 rtt = rack->rc_rack_rtt;
2416                 if (rtt == 0)
2417                         rtt = 1;
2418                 if ((recovery == 0) &&
2419                     (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg)) {
2420                         /*
2421                          * The rack-timeout that enter's us into recovery
2422                          * will force out one MSS and set us up so that we
2423                          * can do one more send in 2*rtt (transitioning the
2424                          * rack timeout into a rack-tlp).
2425                          */
2426                         rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2427                 } else if ((rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg) &&
2428                     ((rsm->r_end - rsm->r_start) > rack->r_ctl.rc_prr_sndcnt)) {
2429                         /*
2430                          * When a rack timer goes, we have to send at 
2431                          * least one segment. They will be paced a min of 1ms
2432                          * apart via the next rack timer (or further
2433                          * if the rack timer dictates it).
2434                          */
2435                         rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2436                 }
2437         } else {
2438                 /* This is a case that should happen rarely if ever */
2439                 counter_u64_add(rack_tlp_does_nada, 1);
2440 #ifdef TCP_BLACKBOX
2441                 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2442 #endif
2443                 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2444         }
2445         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
2446         return (0);
2447 }
2448
2449 /*
2450  * TLP Timer, here we simply setup what segment we want to
2451  * have the TLP expire on, the normal rack_output() will then
2452  * send it out.
2453  *
2454  * We return 1, saying don't proceed with rack_output only
2455  * when all timers have been stopped (destroyed PCB?).
2456  */
2457 static int
2458 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2459 {
2460         /*
2461          * Tail Loss Probe.
2462          */
2463         struct rack_sendmap *rsm = NULL;
2464         struct socket *so;
2465         uint32_t amm, old_prr_snd = 0;
2466         uint32_t out, avail;
2467
2468         if (tp->t_timers->tt_flags & TT_STOPPED) {
2469                 return (1);
2470         }
2471         if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
2472                 /* Its not time yet */
2473                 return (0);
2474         }
2475         if (rack_progress_timeout_check(tp)) {
2476                 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
2477                 return (1);
2478         }
2479         /*
2480          * A TLP timer has expired. We have been idle for 2 rtts. So we now
2481          * need to figure out how to force a full MSS segment out.
2482          */
2483         rack_log_to_event(rack, RACK_TO_FRM_TLP);
2484         counter_u64_add(rack_tlp_tot, 1);
2485         if (rack->r_state && (rack->r_state != tp->t_state))
2486                 rack_set_state(tp, rack);
2487         so = tp->t_inpcb->inp_socket;
2488         avail = sbavail(&so->so_snd);
2489         out = tp->snd_max - tp->snd_una;
2490         rack->rc_timer_up = 1;
2491         /*
2492          * If we are in recovery we can jazz out a segment if new data is
2493          * present simply by setting rc_prr_sndcnt to a segment.
2494          */
2495         if ((avail > out) &&
2496             ((rack_always_send_oldest == 0) || (TAILQ_EMPTY(&rack->r_ctl.rc_tmap)))) {
2497                 /* New data is available */
2498                 amm = avail - out;
2499                 if (amm > tp->t_maxseg) {
2500                         amm = tp->t_maxseg;
2501                 } else if ((amm < tp->t_maxseg) && ((tp->t_flags & TF_NODELAY) == 0)) {
2502                         /* not enough to fill a MTU and no-delay is off */
2503                         goto need_retran;
2504                 }
2505                 if (IN_RECOVERY(tp->t_flags)) {
2506                         /* Unlikely */
2507                         old_prr_snd = rack->r_ctl.rc_prr_sndcnt;
2508                         if (out + amm <= tp->snd_wnd)
2509                                 rack->r_ctl.rc_prr_sndcnt = amm;
2510                         else
2511                                 goto need_retran;
2512                 } else {
2513                         /* Set the send-new override */
2514                         if (out + amm <= tp->snd_wnd)
2515                                 rack->r_ctl.rc_tlp_new_data = amm;
2516                         else
2517                                 goto need_retran;
2518                 }
2519                 rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2520                 rack->r_ctl.rc_last_tlp_seq = tp->snd_max;
2521                 rack->r_ctl.rc_tlpsend = NULL;
2522                 counter_u64_add(rack_tlp_newdata, 1);
2523                 goto send;
2524         }
2525 need_retran:
2526         /*
2527          * Ok we need to arrange the last un-acked segment to be re-sent, or
2528          * optionally the first un-acked segment.
2529          */
2530         if (rack_always_send_oldest)
2531                 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
2532         else {
2533                 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
2534                 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
2535                         rsm = rack_find_high_nonack(rack, rsm);
2536                 }
2537         }
2538         if (rsm == NULL) {
2539                 counter_u64_add(rack_tlp_does_nada, 1);
2540 #ifdef TCP_BLACKBOX
2541                 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
2542 #endif
2543                 goto out;
2544         }
2545         if ((rsm->r_end - rsm->r_start) > tp->t_maxseg) {
2546                 /*
2547                  * We need to split this the last segment in two.
2548                  */
2549                 int32_t idx;
2550                 struct rack_sendmap *nrsm;
2551
2552                 nrsm = rack_alloc(rack);
2553                 if (nrsm == NULL) {
2554                         /*
2555                          * No memory to split, we will just exit and punt
2556                          * off to the RXT timer.
2557                          */
2558                         counter_u64_add(rack_tlp_does_nada, 1);
2559                         goto out;
2560                 }
2561                 nrsm->r_start = (rsm->r_end - tp->t_maxseg);
2562                 nrsm->r_end = rsm->r_end;
2563                 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
2564                 nrsm->r_flags = rsm->r_flags;
2565                 nrsm->r_sndcnt = rsm->r_sndcnt;
2566                 nrsm->r_rtr_bytes = 0;
2567                 rsm->r_end = nrsm->r_start;
2568                 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
2569                         nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
2570                 }
2571                 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
2572                 if (rsm->r_in_tmap) {
2573                         TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
2574                         nrsm->r_in_tmap = 1;
2575                 }
2576                 rsm->r_flags &= (~RACK_HAS_FIN);
2577                 rsm = nrsm;
2578         }
2579         rack->r_ctl.rc_tlpsend = rsm;
2580         rack->r_ctl.rc_tlp_rtx_out = 1;
2581         if (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) {
2582                 rack->r_ctl.rc_tlp_seg_send_cnt++;
2583                 tp->t_rxtshift++;
2584         } else {
2585                 rack->r_ctl.rc_last_tlp_seq = rsm->r_start;
2586                 rack->r_ctl.rc_tlp_seg_send_cnt = 1;
2587         }
2588 send:
2589         rack->r_ctl.rc_tlp_send_cnt++;
2590         if (rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) {
2591                 /*
2592                  * Can't [re]/transmit a segment we have not heard from the
2593                  * peer in max times. We need the retransmit timer to take
2594                  * over.
2595                  */
2596 restore:
2597                 rack->r_ctl.rc_tlpsend = NULL;
2598                 if (rsm)
2599                         rsm->r_flags &= ~RACK_TLP;
2600                 rack->r_ctl.rc_prr_sndcnt = old_prr_snd;
2601                 counter_u64_add(rack_tlp_retran_fail, 1);
2602                 goto out;
2603         } else if (rsm) {
2604                 rsm->r_flags |= RACK_TLP;
2605         }
2606         if (rsm && (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) &&
2607             (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) {
2608                 /*
2609                  * We don't want to send a single segment more than the max
2610                  * either.
2611                  */
2612                 goto restore;
2613         }
2614         rack->r_timer_override = 1;
2615         rack->r_tlp_running = 1;
2616         rack->rc_tlp_in_progress = 1;
2617         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2618         return (0);
2619 out:
2620         rack->rc_timer_up = 0;
2621         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
2622         return (0);
2623 }
2624
2625 /*
2626  * Delayed ack Timer, here we simply need to setup the
2627  * ACK_NOW flag and remove the DELACK flag. From there
2628  * the output routine will send the ack out.
2629  *
2630  * We only return 1, saying don't proceed, if all timers
2631  * are stopped (destroyed PCB?).
2632  */
2633 static int
2634 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2635 {
2636         if (tp->t_timers->tt_flags & TT_STOPPED) {
2637                 return (1);
2638         }
2639         rack_log_to_event(rack, RACK_TO_FRM_DELACK);
2640         tp->t_flags &= ~TF_DELACK;
2641         tp->t_flags |= TF_ACKNOW;
2642         TCPSTAT_INC(tcps_delack);
2643         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
2644         return (0);
2645 }
2646
2647 /*
2648  * Persists timer, here we simply need to setup the
2649  * FORCE-DATA flag the output routine will send
2650  * the one byte send.
2651  *
2652  * We only return 1, saying don't proceed, if all timers
2653  * are stopped (destroyed PCB?).
2654  */
2655 static int
2656 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2657 {
2658         struct inpcb *inp;
2659         int32_t retval = 0;
2660
2661         inp = tp->t_inpcb;
2662
2663         if (tp->t_timers->tt_flags & TT_STOPPED) {
2664                 return (1);
2665         }
2666         if (rack->rc_in_persist == 0)
2667                 return (0);
2668         if (rack_progress_timeout_check(tp)) {
2669                 tcp_set_inp_to_drop(inp, ETIMEDOUT);
2670                 return (1);
2671         }
2672         KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
2673         /*
2674          * Persistence timer into zero window. Force a byte to be output, if
2675          * possible.
2676          */
2677         TCPSTAT_INC(tcps_persisttimeo);
2678         /*
2679          * Hack: if the peer is dead/unreachable, we do not time out if the
2680          * window is closed.  After a full backoff, drop the connection if
2681          * the idle time (no responses to probes) reaches the maximum
2682          * backoff that we would use if retransmitting.
2683          */
2684         if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
2685             (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
2686             ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
2687                 TCPSTAT_INC(tcps_persistdrop);
2688                 retval = 1;
2689                 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2690                 goto out;
2691         }
2692         if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
2693             tp->snd_una == tp->snd_max)
2694                 rack_exit_persist(tp, rack);
2695         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
2696         /*
2697          * If the user has closed the socket then drop a persisting
2698          * connection after a much reduced timeout.
2699          */
2700         if (tp->t_state > TCPS_CLOSE_WAIT &&
2701             (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
2702                 retval = 1;
2703                 TCPSTAT_INC(tcps_persistdrop);
2704                 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2705                 goto out;
2706         }
2707         tp->t_flags |= TF_FORCEDATA;
2708 out:
2709         rack_log_to_event(rack, RACK_TO_FRM_PERSIST);
2710         return (retval);
2711 }
2712
2713 /*
2714  * If a keepalive goes off, we had no other timers
2715  * happening. We always return 1 here since this
2716  * routine either drops the connection or sends
2717  * out a segment with respond.
2718  */
2719 static int
2720 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2721 {
2722         struct tcptemp *t_template;
2723         struct inpcb *inp;
2724
2725         if (tp->t_timers->tt_flags & TT_STOPPED) {
2726                 return (1);
2727         }
2728         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
2729         inp = tp->t_inpcb;
2730         rack_log_to_event(rack, RACK_TO_FRM_KEEP);
2731         /*
2732          * Keep-alive timer went off; send something or drop connection if
2733          * idle for too long.
2734          */
2735         TCPSTAT_INC(tcps_keeptimeo);
2736         if (tp->t_state < TCPS_ESTABLISHED)
2737                 goto dropit;
2738         if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
2739             tp->t_state <= TCPS_CLOSING) {
2740                 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
2741                         goto dropit;
2742                 /*
2743                  * Send a packet designed to force a response if the peer is
2744                  * up and reachable: either an ACK if the connection is
2745                  * still alive, or an RST if the peer has closed the
2746                  * connection due to timeout or reboot. Using sequence
2747                  * number tp->snd_una-1 causes the transmitted zero-length
2748                  * segment to lie outside the receive window; by the
2749                  * protocol spec, this requires the correspondent TCP to
2750                  * respond.
2751                  */
2752                 TCPSTAT_INC(tcps_keepprobe);
2753                 t_template = tcpip_maketemplate(inp);
2754                 if (t_template) {
2755                         tcp_respond(tp, t_template->tt_ipgen,
2756                             &t_template->tt_t, (struct mbuf *)NULL,
2757                             tp->rcv_nxt, tp->snd_una - 1, 0);
2758                         free(t_template, M_TEMP);
2759                 }
2760         }
2761         rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0);
2762         return (1);
2763 dropit:
2764         TCPSTAT_INC(tcps_keepdrops);
2765         tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT);
2766         return (1);
2767 }
2768
2769 /*
2770  * Retransmit helper function, clear up all the ack
2771  * flags and take care of important book keeping.
2772  */
2773 static void
2774 rack_remxt_tmr(struct tcpcb *tp)
2775 {
2776         /*
2777          * The retransmit timer went off, all sack'd blocks must be
2778          * un-acked.
2779          */
2780         struct rack_sendmap *rsm, *trsm = NULL;
2781         struct tcp_rack *rack;
2782         int32_t cnt = 0;
2783
2784         rack = (struct tcp_rack *)tp->t_fb_ptr;
2785         rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__);
2786         rack_log_to_event(rack, RACK_TO_FRM_TMR);
2787         if (rack->r_state && (rack->r_state != tp->t_state))
2788                 rack_set_state(tp, rack);
2789         /*
2790          * Ideally we would like to be able to
2791          * mark SACK-PASS on anything not acked here.
2792          * However, if we do that we would burst out
2793          * all that data 1ms apart. This would be unwise,
2794          * so for now we will just let the normal rxt timer
2795          * and tlp timer take care of it.
2796          */
2797         TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
2798                 if (rsm->r_flags & RACK_ACKED) {
2799                         cnt++;
2800                         rsm->r_sndcnt = 0;
2801                         if (rsm->r_in_tmap == 0) {
2802                                 /* We must re-add it back to the tlist */
2803                                 if (trsm == NULL) {
2804                                         TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
2805                                 } else {
2806                                         TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
2807                                 }
2808                                 rsm->r_in_tmap = 1;
2809                                 trsm = rsm;
2810                         }
2811                 }
2812                 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS);
2813         }
2814         /* Clear the count (we just un-acked them) */
2815         rack->r_ctl.rc_sacked = 0;
2816         /* Clear the tlp rtx mark */
2817         rack->r_ctl.rc_tlp_rtx_out = 0;
2818         rack->r_ctl.rc_tlp_seg_send_cnt = 0;
2819         rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_map);
2820         /* Setup so we send one segment */
2821         if (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg)
2822                 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
2823         rack->r_timer_override = 1;
2824 }
2825
2826 /*
2827  * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
2828  * we will setup to retransmit the lowest seq number outstanding.
2829  */
2830 static int
2831 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
2832 {
2833         int32_t rexmt;
2834         struct inpcb *inp;
2835         int32_t retval = 0;
2836
2837         inp = tp->t_inpcb;
2838         if (tp->t_timers->tt_flags & TT_STOPPED) {
2839                 return (1);
2840         }
2841         if (rack_progress_timeout_check(tp)) {
2842                 tcp_set_inp_to_drop(inp, ETIMEDOUT);
2843                 return (1);
2844         }
2845         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
2846         if (TCPS_HAVEESTABLISHED(tp->t_state) &&
2847             (tp->snd_una == tp->snd_max)) {
2848                 /* Nothing outstanding .. nothing to do */
2849                 return (0);
2850         }
2851         /*
2852          * Retransmission timer went off.  Message has not been acked within
2853          * retransmit interval.  Back off to a longer retransmit interval
2854          * and retransmit one segment.
2855          */
2856         if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
2857                 tp->t_rxtshift = TCP_MAXRXTSHIFT;
2858                 TCPSTAT_INC(tcps_timeoutdrop);
2859                 retval = 1;
2860                 tcp_set_inp_to_drop(rack->rc_inp,
2861                     (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT));
2862                 goto out;
2863         }
2864         rack_remxt_tmr(tp);
2865         if (tp->t_state == TCPS_SYN_SENT) {
2866                 /*
2867                  * If the SYN was retransmitted, indicate CWND to be limited
2868                  * to 1 segment in cc_conn_init().
2869                  */
2870                 tp->snd_cwnd = 1;
2871         } else if (tp->t_rxtshift == 1) {
2872                 /*
2873                  * first retransmit; record ssthresh and cwnd so they can be
2874                  * recovered if this turns out to be a "bad" retransmit. A
2875                  * retransmit is considered "bad" if an ACK for this segment
2876                  * is received within RTT/2 interval; the assumption here is
2877                  * that the ACK was already in flight.  See "On Estimating
2878                  * End-to-End Network Path Properties" by Allman and Paxson
2879                  * for more details.
2880                  */
2881                 tp->snd_cwnd_prev = tp->snd_cwnd;
2882                 tp->snd_ssthresh_prev = tp->snd_ssthresh;
2883                 tp->snd_recover_prev = tp->snd_recover;
2884                 if (IN_FASTRECOVERY(tp->t_flags))
2885                         tp->t_flags |= TF_WASFRECOVERY;
2886                 else
2887                         tp->t_flags &= ~TF_WASFRECOVERY;
2888                 if (IN_CONGRECOVERY(tp->t_flags))
2889                         tp->t_flags |= TF_WASCRECOVERY;
2890                 else
2891                         tp->t_flags &= ~TF_WASCRECOVERY;
2892                 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
2893                 tp->t_flags |= TF_PREVVALID;
2894         } else
2895                 tp->t_flags &= ~TF_PREVVALID;
2896         TCPSTAT_INC(tcps_rexmttimeo);
2897         if ((tp->t_state == TCPS_SYN_SENT) ||
2898             (tp->t_state == TCPS_SYN_RECEIVED))
2899                 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_syn_backoff[tp->t_rxtshift]);
2900         else
2901                 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
2902         TCPT_RANGESET(tp->t_rxtcur, rexmt,
2903            max(MSEC_2_TICKS(rack_rto_min), rexmt),
2904            MSEC_2_TICKS(rack_rto_max));
2905         /*
2906          * We enter the path for PLMTUD if connection is established or, if
2907          * connection is FIN_WAIT_1 status, reason for the last is that if
2908          * amount of data we send is very small, we could send it in couple
2909          * of packets and process straight to FIN. In that case we won't
2910          * catch ESTABLISHED state.
2911          */
2912         if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED))
2913             || (tp->t_state == TCPS_FIN_WAIT_1))) {
2914 #ifdef INET6
2915                 int32_t isipv6;
2916 #endif
2917
2918                 /*
2919                  * Idea here is that at each stage of mtu probe (usually,
2920                  * 1448 -> 1188 -> 524) should be given 2 chances to recover
2921                  * before further clamping down. 'tp->t_rxtshift % 2 == 0'
2922                  * should take care of that.
2923                  */
2924                 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
2925                     (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
2926                     (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
2927                     tp->t_rxtshift % 2 == 0)) {
2928                         /*
2929                          * Enter Path MTU Black-hole Detection mechanism: -
2930                          * Disable Path MTU Discovery (IP "DF" bit). -
2931                          * Reduce MTU to lower value than what we negotiated
2932                          * with peer.
2933                          */
2934                         if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
2935                                 /* Record that we may have found a black hole. */
2936                                 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
2937                                 /* Keep track of previous MSS. */
2938                                 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
2939                         }
2940
2941                         /*
2942                          * Reduce the MSS to blackhole value or to the
2943                          * default in an attempt to retransmit.
2944                          */
2945 #ifdef INET6
2946                         isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0;
2947                         if (isipv6 &&
2948                             tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
2949                                 /* Use the sysctl tuneable blackhole MSS. */
2950                                 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
2951                                 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
2952                         } else if (isipv6) {
2953                                 /* Use the default MSS. */
2954                                 tp->t_maxseg = V_tcp_v6mssdflt;
2955                                 /*
2956                                  * Disable Path MTU Discovery when we switch
2957                                  * to minmss.
2958                                  */
2959                                 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
2960                                 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
2961                         }
2962 #endif
2963 #if defined(INET6) && defined(INET)
2964                         else
2965 #endif
2966 #ifdef INET
2967                         if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
2968                                 /* Use the sysctl tuneable blackhole MSS. */
2969                                 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
2970                                 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
2971                         } else {
2972                                 /* Use the default MSS. */
2973                                 tp->t_maxseg = V_tcp_mssdflt;
2974                                 /*
2975                                  * Disable Path MTU Discovery when we switch
2976                                  * to minmss.
2977                                  */
2978                                 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
2979                                 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
2980                         }
2981 #endif
2982                 } else {
2983                         /*
2984                          * If further retransmissions are still unsuccessful
2985                          * with a lowered MTU, maybe this isn't a blackhole
2986                          * and we restore the previous MSS and blackhole
2987                          * detection flags. The limit '6' is determined by
2988                          * giving each probe stage (1448, 1188, 524) 2
2989                          * chances to recover.
2990                          */
2991                         if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
2992                             (tp->t_rxtshift >= 6)) {
2993                                 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
2994                                 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
2995                                 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
2996                                 TCPSTAT_INC(tcps_pmtud_blackhole_failed);
2997                         }
2998                 }
2999         }
3000         /*
3001          * Disable RFC1323 and SACK if we haven't got any response to our
3002          * third SYN to work-around some broken terminal servers (most of
3003          * which have hopefully been retired) that have bad VJ header
3004          * compression code which trashes TCP segments containing
3005          * unknown-to-them TCP options.
3006          */
3007         if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
3008             (tp->t_rxtshift == 3))
3009                 tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_SACK_PERMIT);
3010         /*
3011          * If we backed off this far, our srtt estimate is probably bogus.
3012          * Clobber it so we'll take the next rtt measurement as our srtt;
3013          * move the current srtt into rttvar to keep the current retransmit
3014          * times until then.
3015          */
3016         if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
3017 #ifdef INET6
3018                 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
3019                         in6_losing(tp->t_inpcb);
3020                 else
3021 #endif
3022                         in_losing(tp->t_inpcb);
3023                 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
3024                 tp->t_srtt = 0;
3025         }
3026         if (rack_use_sack_filter)
3027                 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
3028         tp->snd_recover = tp->snd_max;
3029         tp->t_flags |= TF_ACKNOW;
3030         tp->t_rtttime = 0;
3031         rack_cong_signal(tp, NULL, CC_RTO);
3032 out:
3033         return (retval);
3034 }
3035
3036 static int
3037 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling)
3038 {
3039         int32_t ret = 0;
3040         int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
3041
3042         if (timers == 0) {
3043                 return (0);
3044         }
3045         if (tp->t_state == TCPS_LISTEN) {
3046                 /* no timers on listen sockets */
3047                 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
3048                         return (0);
3049                 return (1);
3050         }
3051         if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
3052                 uint32_t left;
3053
3054                 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
3055                         ret = -1;
3056                         rack_log_to_processing(rack, cts, ret, 0);
3057                         return (0);
3058                 }
3059                 if (hpts_calling == 0) {
3060                         ret = -2;
3061                         rack_log_to_processing(rack, cts, ret, 0);
3062                         return (0);
3063                 }
3064                 /*
3065                  * Ok our timer went off early and we are not paced false
3066                  * alarm, go back to sleep.
3067                  */
3068                 ret = -3;
3069                 left = rack->r_ctl.rc_timer_exp - cts;
3070                 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
3071                 rack_log_to_processing(rack, cts, ret, left);
3072                 rack->rc_last_pto_set = 0;
3073                 return (1);
3074         }
3075         rack->rc_tmr_stopped = 0;
3076         rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
3077         if (timers & PACE_TMR_DELACK) {
3078                 ret = rack_timeout_delack(tp, rack, cts);
3079         } else if (timers & PACE_TMR_RACK) {
3080                 ret = rack_timeout_rack(tp, rack, cts);
3081         } else if (timers & PACE_TMR_TLP) {
3082                 ret = rack_timeout_tlp(tp, rack, cts);
3083         } else if (timers & PACE_TMR_RXT) {
3084                 ret = rack_timeout_rxt(tp, rack, cts);
3085         } else if (timers & PACE_TMR_PERSIT) {
3086                 ret = rack_timeout_persist(tp, rack, cts);
3087         } else if (timers & PACE_TMR_KEEP) {
3088                 ret = rack_timeout_keepalive(tp, rack, cts);
3089         }
3090         rack_log_to_processing(rack, cts, ret, timers);
3091         return (ret);
3092 }
3093
3094 static void
3095 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
3096 {
3097         uint8_t hpts_removed = 0;
3098
3099         if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
3100             TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
3101                 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3102                 hpts_removed = 1;
3103         }
3104         if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
3105                 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
3106                 if (rack->rc_inp->inp_in_hpts &&
3107                     ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
3108                         /*
3109                          * Canceling timer's when we have no output being
3110                          * paced. We also must remove ourselves from the
3111                          * hpts.
3112                          */
3113                         tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
3114                         hpts_removed = 1;
3115                 }
3116                 rack_log_to_cancel(rack, hpts_removed, line);
3117                 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
3118         }
3119 }
3120
3121 static void
3122 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type)
3123 {
3124         return;
3125 }
3126
3127 static int
3128 rack_stopall(struct tcpcb *tp)
3129 {
3130         struct tcp_rack *rack;
3131         rack = (struct tcp_rack *)tp->t_fb_ptr;
3132         rack->t_timers_stopped = 1;
3133         return (0);
3134 }
3135
3136 static void
3137 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta)
3138 {
3139         return;
3140 }
3141
3142 static int
3143 rack_timer_active(struct tcpcb *tp, uint32_t timer_type)
3144 {
3145         return (0);
3146 }
3147
3148 static void
3149 rack_stop_all_timers(struct tcpcb *tp)
3150 {
3151         struct tcp_rack *rack;
3152
3153         /*
3154          * Assure no timers are running.
3155          */
3156         if (tcp_timer_active(tp, TT_PERSIST)) {
3157                 /* We enter in persists, set the flag appropriately */
3158                 rack = (struct tcp_rack *)tp->t_fb_ptr;
3159                 rack->rc_in_persist = 1;
3160         }
3161         tcp_timer_suspend(tp, TT_PERSIST);
3162         tcp_timer_suspend(tp, TT_REXMT);
3163         tcp_timer_suspend(tp, TT_KEEP);
3164         tcp_timer_suspend(tp, TT_DELACK);
3165 }
3166
3167 static void
3168 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
3169     struct rack_sendmap *rsm, uint32_t ts)
3170 {
3171         int32_t idx;
3172
3173         rsm->r_rtr_cnt++;
3174         rsm->r_sndcnt++;
3175         if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
3176                 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
3177                 rsm->r_flags |= RACK_OVERMAX;
3178         }
3179         if ((rsm->r_rtr_cnt > 1) && (rack->r_tlp_running == 0)) {
3180                 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
3181                 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
3182         }
3183         idx = rsm->r_rtr_cnt - 1;
3184         rsm->r_tim_lastsent[idx] = ts;
3185         if (rsm->r_flags & RACK_ACKED) {
3186                 /* Problably MTU discovery messing with us */
3187                 rsm->r_flags &= ~RACK_ACKED;
3188                 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
3189         }
3190         if (rsm->r_in_tmap) {
3191                 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3192         }
3193         TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3194         rsm->r_in_tmap = 1;
3195         if (rsm->r_flags & RACK_SACK_PASSED) {
3196                 /* We have retransmitted due to the SACK pass */
3197                 rsm->r_flags &= ~RACK_SACK_PASSED;
3198                 rsm->r_flags |= RACK_WAS_SACKPASS;
3199         }
3200         /* Update memory for next rtr */
3201         rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
3202 }
3203
3204
3205 static uint32_t
3206 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
3207     struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp)
3208 {
3209         /*
3210          * We (re-)transmitted starting at rsm->r_start for some length
3211          * (possibly less than r_end.
3212          */
3213         struct rack_sendmap *nrsm;
3214         uint32_t c_end;
3215         int32_t len;
3216         int32_t idx;
3217
3218         len = *lenp;
3219         c_end = rsm->r_start + len;
3220         if (SEQ_GEQ(c_end, rsm->r_end)) {
3221                 /*
3222                  * We retransmitted the whole piece or more than the whole
3223                  * slopping into the next rsm.
3224                  */
3225                 rack_update_rsm(tp, rack, rsm, ts);
3226                 if (c_end == rsm->r_end) {
3227                         *lenp = 0;
3228                         return (0);
3229                 } else {
3230                         int32_t act_len;
3231
3232                         /* Hangs over the end return whats left */
3233                         act_len = rsm->r_end - rsm->r_start;
3234                         *lenp = (len - act_len);
3235                         return (rsm->r_end);
3236                 }
3237                 /* We don't get out of this block. */
3238         }
3239         /*
3240          * Here we retransmitted less than the whole thing which means we
3241          * have to split this into what was transmitted and what was not.
3242          */
3243         nrsm = rack_alloc(rack);
3244         if (nrsm == NULL) {
3245                 /*
3246                  * We can't get memory, so lets not proceed.
3247                  */
3248                 *lenp = 0;
3249                 return (0);
3250         }
3251         /*
3252          * So here we are going to take the original rsm and make it what we
3253          * retransmitted. nrsm will be the tail portion we did not
3254          * retransmit. For example say the chunk was 1, 11 (10 bytes). And
3255          * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
3256          * 1, 6 and the new piece will be 6, 11.
3257          */
3258         nrsm->r_start = c_end;
3259         nrsm->r_end = rsm->r_end;
3260         nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3261         nrsm->r_flags = rsm->r_flags;
3262         nrsm->r_sndcnt = rsm->r_sndcnt;
3263         nrsm->r_rtr_bytes = 0;
3264         rsm->r_end = c_end;
3265         for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3266                 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3267         }
3268         TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3269         if (rsm->r_in_tmap) {
3270                 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3271                 nrsm->r_in_tmap = 1;
3272         }
3273         rsm->r_flags &= (~RACK_HAS_FIN);
3274         rack_update_rsm(tp, rack, rsm, ts);
3275         *lenp = 0;
3276         return (0);
3277 }
3278
3279
3280 static void
3281 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
3282     uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts,
3283     uint8_t pass, struct rack_sendmap *hintrsm)
3284 {
3285         struct tcp_rack *rack;
3286         struct rack_sendmap *rsm, *nrsm;
3287         register uint32_t snd_max, snd_una;
3288         int32_t idx;
3289
3290         /*
3291          * Add to the RACK log of packets in flight or retransmitted. If
3292          * there is a TS option we will use the TS echoed, if not we will
3293          * grab a TS.
3294          *
3295          * Retransmissions will increment the count and move the ts to its
3296          * proper place. Note that if options do not include TS's then we
3297          * won't be able to effectively use the ACK for an RTT on a retran.
3298          *
3299          * Notes about r_start and r_end. Lets consider a send starting at
3300          * sequence 1 for 10 bytes. In such an example the r_start would be
3301          * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
3302          * This means that r_end is actually the first sequence for the next
3303          * slot (11).
3304          *
3305          */
3306         /*
3307          * If err is set what do we do XXXrrs? should we not add the thing?
3308          * -- i.e. return if err != 0 or should we pretend we sent it? --
3309          * i.e. proceed with add ** do this for now.
3310          */
3311         INP_WLOCK_ASSERT(tp->t_inpcb);
3312         if (err)
3313                 /*
3314                  * We don't log errors -- we could but snd_max does not
3315                  * advance in this case either.
3316                  */
3317                 return;
3318
3319         if (th_flags & TH_RST) {
3320                 /*
3321                  * We don't log resets and we return immediately from
3322                  * sending
3323                  */
3324                 return;
3325         }
3326         rack = (struct tcp_rack *)tp->t_fb_ptr;
3327         snd_una = tp->snd_una;
3328         if (SEQ_LEQ((seq_out + len), snd_una)) {
3329                 /* Are sending an old segment to induce an ack (keep-alive)? */
3330                 return;
3331         }
3332         if (SEQ_LT(seq_out, snd_una)) {
3333                 /* huh? should we panic? */
3334                 uint32_t end;
3335
3336                 end = seq_out + len;
3337                 seq_out = snd_una;
3338                 len = end - seq_out;
3339         }
3340         snd_max = tp->snd_max;
3341         if (th_flags & (TH_SYN | TH_FIN)) {
3342                 /*
3343                  * The call to rack_log_output is made before bumping
3344                  * snd_max. This means we can record one extra byte on a SYN
3345                  * or FIN if seq_out is adding more on and a FIN is present
3346                  * (and we are not resending).
3347                  */
3348                 if (th_flags & TH_SYN)
3349                         len++;
3350                 if (th_flags & TH_FIN)
3351                         len++;
3352                 if (SEQ_LT(snd_max, tp->snd_nxt)) {
3353                         /*
3354                          * The add/update as not been done for the FIN/SYN
3355                          * yet.
3356                          */
3357                         snd_max = tp->snd_nxt;
3358                 }
3359         }
3360         if (len == 0) {
3361                 /* We don't log zero window probes */
3362                 return;
3363         }
3364         rack->r_ctl.rc_time_last_sent = ts;
3365         if (IN_RECOVERY(tp->t_flags)) {
3366                 rack->r_ctl.rc_prr_out += len;
3367         }
3368         /* First question is it a retransmission? */
3369         if (seq_out == snd_max) {
3370 again:
3371                 rsm = rack_alloc(rack);
3372                 if (rsm == NULL) {
3373                         /*
3374                          * Hmm out of memory and the tcb got destroyed while
3375                          * we tried to wait.
3376                          */
3377 #ifdef INVARIANTS
3378                         panic("Out of memory when we should not be rack:%p", rack);
3379 #endif
3380                         return;
3381                 }
3382                 if (th_flags & TH_FIN) {
3383                         rsm->r_flags = RACK_HAS_FIN;
3384                 } else {
3385                         rsm->r_flags = 0;
3386                 }
3387                 rsm->r_tim_lastsent[0] = ts;
3388                 rsm->r_rtr_cnt = 1;
3389                 rsm->r_rtr_bytes = 0;
3390                 if (th_flags & TH_SYN) {
3391                         /* The data space is one beyond snd_una */
3392                         rsm->r_start = seq_out + 1;
3393                         rsm->r_end = rsm->r_start + (len - 1);
3394                 } else {
3395                         /* Normal case */
3396                         rsm->r_start = seq_out;
3397                         rsm->r_end = rsm->r_start + len;
3398                 }
3399                 rsm->r_sndcnt = 0;
3400                 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next);
3401                 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
3402                 rsm->r_in_tmap = 1;
3403                 return;
3404         }
3405         /*
3406          * If we reach here its a retransmission and we need to find it.
3407          */
3408 more:
3409         if (hintrsm && (hintrsm->r_start == seq_out)) {
3410                 rsm = hintrsm;
3411                 hintrsm = NULL;
3412         } else if (rack->r_ctl.rc_next) {
3413                 /* We have a hint from a previous run */
3414                 rsm = rack->r_ctl.rc_next;
3415         } else {
3416                 /* No hints sorry */
3417                 rsm = NULL;
3418         }
3419         if ((rsm) && (rsm->r_start == seq_out)) {
3420                 /*
3421                  * We used rc_next or hintrsm  to retransmit, hopefully the
3422                  * likely case.
3423                  */
3424                 seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3425                 if (len == 0) {
3426                         return;
3427                 } else {
3428                         goto more;
3429                 }
3430         }
3431         /* Ok it was not the last pointer go through it the hard way. */
3432         TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
3433                 if (rsm->r_start == seq_out) {
3434                         seq_out = rack_update_entry(tp, rack, rsm, ts, &len);
3435                         rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
3436                         if (len == 0) {
3437                                 return;
3438                         } else {
3439                                 continue;
3440                         }
3441                 }
3442                 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
3443                         /* Transmitted within this piece */
3444                         /*
3445                          * Ok we must split off the front and then let the
3446                          * update do the rest
3447                          */
3448                         nrsm = rack_alloc(rack);
3449                         if (nrsm == NULL) {
3450 #ifdef INVARIANTS
3451                                 panic("Ran out of memory that was preallocated? rack:%p", rack);
3452 #endif
3453                                 rack_update_rsm(tp, rack, rsm, ts);
3454                                 return;
3455                         }
3456                         /*
3457                          * copy rsm to nrsm and then trim the front of rsm
3458                          * to not include this part.
3459                          */
3460                         nrsm->r_start = seq_out;
3461                         nrsm->r_end = rsm->r_end;
3462                         nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3463                         nrsm->r_flags = rsm->r_flags;
3464                         nrsm->r_sndcnt = rsm->r_sndcnt;
3465                         nrsm->r_rtr_bytes = 0;
3466                         for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3467                                 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3468                         }
3469                         rsm->r_end = nrsm->r_start;
3470                         TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3471                         if (rsm->r_in_tmap) {
3472                                 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3473                                 nrsm->r_in_tmap = 1;
3474                         }
3475                         rsm->r_flags &= (~RACK_HAS_FIN);
3476                         seq_out = rack_update_entry(tp, rack, nrsm, ts, &len);
3477                         if (len == 0) {
3478                                 return;
3479                         }
3480                 }
3481         }
3482         /*
3483          * Hmm not found in map did they retransmit both old and on into the
3484          * new?
3485          */
3486         if (seq_out == tp->snd_max) {
3487                 goto again;
3488         } else if (SEQ_LT(seq_out, tp->snd_max)) {
3489 #ifdef INVARIANTS
3490                 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
3491                     seq_out, len, tp->snd_una, tp->snd_max);
3492                 printf("Starting Dump of all rack entries\n");
3493                 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) {
3494                         printf("rsm:%p start:%u end:%u\n",
3495                             rsm, rsm->r_start, rsm->r_end);
3496                 }
3497                 printf("Dump complete\n");
3498                 panic("seq_out not found rack:%p tp:%p",
3499                     rack, tp);
3500 #endif
3501         } else {
3502 #ifdef INVARIANTS
3503                 /*
3504                  * Hmm beyond sndmax? (only if we are using the new rtt-pack
3505                  * flag)
3506                  */
3507                 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
3508                     seq_out, len, tp->snd_max, tp);
3509 #endif
3510         }
3511 }
3512
3513 /*
3514  * Record one of the RTT updates from an ack into
3515  * our sample structure.
3516  */
3517 static void
3518 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt)
3519 {
3520         if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3521             (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
3522                 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
3523         }
3524         if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
3525             (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
3526                 rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
3527         }
3528         rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
3529         rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
3530         rack->r_ctl.rack_rs.rs_rtt_cnt++;
3531 }
3532
3533 /*
3534  * Collect new round-trip time estimate
3535  * and update averages and current timeout.
3536  */
3537 static void
3538 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
3539 {
3540         int32_t delta;
3541         uint32_t o_srtt, o_var;
3542         int32_t rtt;
3543
3544         if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
3545                 /* No valid sample */
3546                 return;
3547         if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
3548                 /* We are to use the lowest RTT seen in a single ack */
3549                 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
3550         } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
3551                 /* We are to use the highest RTT seen in a single ack */
3552                 rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
3553         } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
3554                 /* We are to use the average RTT seen in a single ack */
3555                 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
3556                                 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
3557         } else {
3558 #ifdef INVARIANTS
3559                 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
3560 #endif          
3561                 return;
3562         }
3563         if (rtt == 0)
3564                 rtt = 1;
3565         rack_log_rtt_sample(rack, rtt);
3566         o_srtt = tp->t_srtt;
3567         o_var = tp->t_rttvar;
3568         rack = (struct tcp_rack *)tp->t_fb_ptr;
3569         if (tp->t_srtt != 0) {
3570                 /*
3571                  * srtt is stored as fixed point with 5 bits after the
3572                  * binary point (i.e., scaled by 8).  The following magic is
3573                  * equivalent to the smoothing algorithm in rfc793 with an
3574                  * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
3575                  * Adjust rtt to origin 0.
3576                  */
3577                 delta = ((rtt - 1) << TCP_DELTA_SHIFT)
3578                     - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
3579
3580                 tp->t_srtt += delta;
3581                 if (tp->t_srtt <= 0)
3582                         tp->t_srtt = 1;
3583
3584                 /*
3585                  * We accumulate a smoothed rtt variance (actually, a
3586                  * smoothed mean difference), then set the retransmit timer
3587                  * to smoothed rtt + 4 times the smoothed variance. rttvar
3588                  * is stored as fixed point with 4 bits after the binary
3589                  * point (scaled by 16).  The following is equivalent to
3590                  * rfc793 smoothing with an alpha of .75 (rttvar =
3591                  * rttvar*3/4 + |delta| / 4).  This replaces rfc793's
3592                  * wired-in beta.
3593                  */
3594                 if (delta < 0)
3595                         delta = -delta;
3596                 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
3597                 tp->t_rttvar += delta;
3598                 if (tp->t_rttvar <= 0)
3599                         tp->t_rttvar = 1;
3600                 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
3601                         tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3602         } else {
3603                 /*
3604                  * No rtt measurement yet - use the unsmoothed rtt. Set the
3605                  * variance to half the rtt (so our first retransmit happens
3606                  * at 3*rtt).
3607                  */
3608                 tp->t_srtt = rtt << TCP_RTT_SHIFT;
3609                 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
3610                 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
3611         }
3612         TCPSTAT_INC(tcps_rttupdated);
3613         rack_log_rtt_upd(tp, rack, rtt, o_srtt, o_var);
3614         tp->t_rttupdated++;
3615 #ifdef NETFLIX_STATS
3616         stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
3617 #endif
3618         tp->t_rxtshift = 0;
3619
3620         /*
3621          * the retransmit should happen at rtt + 4 * rttvar. Because of the
3622          * way we do the smoothing, srtt and rttvar will each average +1/2
3623          * tick of bias.  When we compute the retransmit timer, we want 1/2
3624          * tick of rounding and 1 extra tick because of +-1/2 tick
3625          * uncertainty in the firing of the timer.  The bias will give us
3626          * exactly the 1.5 tick we need.  But, because the bias is
3627          * statistical, we have to test that we don't drop below the minimum
3628          * feasible timer (which is 2 ticks).
3629          */
3630         TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
3631            max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max));
3632         tp->t_softerror = 0;
3633 }
3634
3635 static void
3636 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm,
3637     uint32_t t, uint32_t cts)
3638 {
3639         /*
3640          * For this RSM, we acknowledged the data from a previous
3641          * transmission, not the last one we made. This means we did a false
3642          * retransmit.
3643          */
3644         struct tcp_rack *rack;
3645
3646         if (rsm->r_flags & RACK_HAS_FIN) {
3647                 /*
3648                  * The sending of the FIN often is multiple sent when we
3649                  * have everything outstanding ack'd. We ignore this case
3650                  * since its over now.
3651                  */
3652                 return;
3653         }
3654         if (rsm->r_flags & RACK_TLP) {
3655                 /*
3656                  * We expect TLP's to have this occur.
3657                  */
3658                 return;
3659         }
3660         rack = (struct tcp_rack *)tp->t_fb_ptr;
3661         /* should we undo cc changes and exit recovery? */
3662         if (IN_RECOVERY(tp->t_flags)) {
3663                 if (rack->r_ctl.rc_rsm_start == rsm->r_start) {
3664                         /*
3665                          * Undo what we ratched down and exit recovery if
3666                          * possible
3667                          */
3668                         EXIT_RECOVERY(tp->t_flags);
3669                         tp->snd_recover = tp->snd_una;
3670                         if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd)
3671                                 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at;
3672                         if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh)
3673                                 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at;
3674                 }
3675         }
3676         if (rsm->r_flags & RACK_WAS_SACKPASS) {
3677                 /*
3678                  * We retransmitted based on a sack and the earlier
3679                  * retransmission ack'd it - re-ordering is occuring.
3680                  */
3681                 counter_u64_add(rack_reorder_seen, 1);
3682                 rack->r_ctl.rc_reorder_ts = cts;
3683         }
3684         counter_u64_add(rack_badfr, 1);
3685         counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start));
3686 }
3687
3688
3689 static int
3690 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
3691     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type)
3692 {
3693         int32_t i;
3694         uint32_t t;
3695
3696         if (rsm->r_flags & RACK_ACKED)
3697                 /* Already done */
3698                 return (0);
3699
3700
3701         if ((rsm->r_rtr_cnt == 1) ||
3702             ((ack_type == CUM_ACKED) &&
3703             (to->to_flags & TOF_TS) &&
3704             (to->to_tsecr) &&
3705             (rsm->r_tim_lastsent[rsm->r_rtr_cnt - 1] == to->to_tsecr))
3706             ) {
3707                 /*
3708                  * We will only find a matching timestamp if its cum-acked.
3709                  * But if its only one retransmission its for-sure matching
3710                  * :-)
3711                  */
3712                 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3713                 if ((int)t <= 0)
3714                         t = 1;
3715                 if (!tp->t_rttlow || tp->t_rttlow > t)
3716                         tp->t_rttlow = t;
3717                 if (!rack->r_ctl.rc_rack_min_rtt ||
3718                     SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3719                         rack->r_ctl.rc_rack_min_rtt = t;
3720                         if (rack->r_ctl.rc_rack_min_rtt == 0) {
3721                                 rack->r_ctl.rc_rack_min_rtt = 1;
3722                         }
3723                 }
3724                 tcp_rack_xmit_timer(rack, TCP_TS_TO_TICKS(t) + 1);
3725                 if ((rsm->r_flags & RACK_TLP) &&
3726                     (!IN_RECOVERY(tp->t_flags))) {
3727                         /* Segment was a TLP and our retrans matched */
3728                         if (rack->r_ctl.rc_tlp_cwnd_reduce) {
3729                                 rack->r_ctl.rc_rsm_start = tp->snd_max;
3730                                 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
3731                                 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
3732                                 rack_cong_signal(tp, NULL, CC_NDUPACK);
3733                                 /*
3734                                  * When we enter recovery we need to assure
3735                                  * we send one packet.
3736                                  */
3737                                 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
3738                         } else
3739                                 rack->r_ctl.rc_tlp_rtx_out = 0;
3740                 }
3741                 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
3742                         /* New more recent rack_tmit_time */
3743                         rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3744                         rack->rc_rack_rtt = t;
3745                 }
3746                 return (1);
3747         }
3748         /* 
3749          * We clear the soft/rxtshift since we got an ack. 
3750          * There is no assurance we will call the commit() function
3751          * so we need to clear these to avoid incorrect handling.
3752          */
3753         tp->t_rxtshift = 0;
3754         tp->t_softerror = 0;
3755         if ((to->to_flags & TOF_TS) &&
3756             (ack_type == CUM_ACKED) &&
3757             (to->to_tsecr) &&
3758             ((rsm->r_flags & (RACK_DEFERRED | RACK_OVERMAX)) == 0)) {
3759                 /*
3760                  * Now which timestamp does it match? In this block the ACK
3761                  * must be coming from a previous transmission.
3762                  */
3763                 for (i = 0; i < rsm->r_rtr_cnt; i++) {
3764                         if (rsm->r_tim_lastsent[i] == to->to_tsecr) {
3765                                 t = cts - rsm->r_tim_lastsent[i];
3766                                 if ((int)t <= 0)
3767                                         t = 1;
3768                                 if ((i + 1) < rsm->r_rtr_cnt) {
3769                                         /* Likely */
3770                                         rack_earlier_retran(tp, rsm, t, cts);
3771                                 }
3772                                 if (!tp->t_rttlow || tp->t_rttlow > t)
3773                                         tp->t_rttlow = t;
3774                                 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3775                                         rack->r_ctl.rc_rack_min_rtt = t;
3776                                         if (rack->r_ctl.rc_rack_min_rtt == 0) {
3777                                                 rack->r_ctl.rc_rack_min_rtt = 1;
3778                                         }
3779                                 }
3780                                 /*
3781                                  * Note the following calls to
3782                                  * tcp_rack_xmit_timer() are being commented
3783                                  * out for now. They give us no more accuracy
3784                                  * and often lead to a wrong choice. We have
3785                                  * enough samples that have not been 
3786                                  * retransmitted. I leave the commented out
3787                                  * code in here in case in the future we
3788                                  * decide to add it back (though I can't forsee
3789                                  * doing that). That way we will easily see
3790                                  * where they need to be placed.
3791                                  */
3792                                 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
3793                                     rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
3794                                         /* New more recent rack_tmit_time */
3795                                         rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
3796                                         rack->rc_rack_rtt = t;
3797                                 }
3798                                 return (1);
3799                         }
3800                 }
3801                 goto ts_not_found;
3802         } else {
3803                 /*
3804                  * Ok its a SACK block that we retransmitted. or a windows
3805                  * machine without timestamps. We can tell nothing from the
3806                  * time-stamp since its not there or the time the peer last
3807                  * recieved a segment that moved forward its cum-ack point.
3808                  */
3809 ts_not_found:
3810                 i = rsm->r_rtr_cnt - 1;
3811                 t = cts - rsm->r_tim_lastsent[i];
3812                 if ((int)t <= 0)
3813                         t = 1;
3814                 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3815                         /*
3816                          * We retransmitted and the ack came back in less
3817                          * than the smallest rtt we have observed. We most
3818                          * likey did an improper retransmit as outlined in
3819                          * 4.2 Step 3 point 2 in the rack-draft.
3820                          */
3821                         i = rsm->r_rtr_cnt - 2;
3822                         t = cts - rsm->r_tim_lastsent[i];
3823                         rack_earlier_retran(tp, rsm, t, cts);
3824                 } else if (rack->r_ctl.rc_rack_min_rtt) {
3825                         /*
3826                          * We retransmitted it and the retransmit did the
3827                          * job.
3828                          */
3829                         if (!rack->r_ctl.rc_rack_min_rtt ||
3830                             SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
3831                                 rack->r_ctl.rc_rack_min_rtt = t;
3832                                 if (rack->r_ctl.rc_rack_min_rtt == 0) {
3833                                         rack->r_ctl.rc_rack_min_rtt = 1;
3834                                 }
3835                         }
3836                         if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) {
3837                                 /* New more recent rack_tmit_time */
3838                                 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i];
3839                                 rack->rc_rack_rtt = t;
3840                         }
3841                         return (1);
3842                 }
3843         }
3844         return (0);
3845 }
3846
3847 /*
3848  * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
3849  */
3850 static void
3851 rack_log_sack_passed(struct tcpcb *tp,
3852     struct tcp_rack *rack, struct rack_sendmap *rsm)
3853 {
3854         struct rack_sendmap *nrsm;
3855         uint32_t ts;
3856         int32_t idx;
3857
3858         idx = rsm->r_rtr_cnt - 1;
3859         ts = rsm->r_tim_lastsent[idx];
3860         nrsm = rsm;
3861         TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
3862             rack_head, r_tnext) {
3863                 if (nrsm == rsm) {
3864                         /* Skip orginal segment he is acked */
3865                         continue;
3866                 }
3867                 if (nrsm->r_flags & RACK_ACKED) {
3868                         /* Skip ack'd segments */
3869                         continue;
3870                 }
3871                 idx = nrsm->r_rtr_cnt - 1;
3872                 if (ts == nrsm->r_tim_lastsent[idx]) {
3873                         /*
3874                          * For this case lets use seq no, if we sent in a
3875                          * big block (TSO) we would have a bunch of segments
3876                          * sent at the same time.
3877                          *
3878                          * We would only get a report if its SEQ is earlier.
3879                          * If we have done multiple retransmits the times
3880                          * would not be equal.
3881                          */
3882                         if (SEQ_LT(nrsm->r_start, rsm->r_start)) {
3883                                 nrsm->r_flags |= RACK_SACK_PASSED;
3884                                 nrsm->r_flags &= ~RACK_WAS_SACKPASS;
3885                         }
3886                 } else {
3887                         /*
3888                          * Here they were sent at different times, not a big
3889                          * block. Since we transmitted this one later and
3890                          * see it sack'd then this must also be missing (or
3891                          * we would have gotten a sack block for it)
3892                          */
3893                         nrsm->r_flags |= RACK_SACK_PASSED;
3894                         nrsm->r_flags &= ~RACK_WAS_SACKPASS;
3895                 }
3896         }
3897 }
3898
3899 static uint32_t
3900 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
3901     struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts)
3902 {
3903         int32_t idx;
3904         int32_t times = 0;
3905         uint32_t start, end, changed = 0;
3906         struct rack_sendmap *rsm, *nrsm;
3907         int32_t used_ref = 1;
3908
3909         start = sack->start;
3910         end = sack->end;
3911         rsm = *prsm;
3912         if (rsm && SEQ_LT(start, rsm->r_start)) {
3913                 TAILQ_FOREACH_REVERSE_FROM(rsm, &rack->r_ctl.rc_map, rack_head, r_next) {
3914                         if (SEQ_GEQ(start, rsm->r_start) &&
3915                             SEQ_LT(start, rsm->r_end)) {
3916                                 goto do_rest_ofb;
3917                         }
3918                 }
3919         }
3920         if (rsm == NULL) {
3921 start_at_beginning:
3922                 rsm = NULL;
3923                 used_ref = 0;
3924         }
3925         /* First lets locate the block where this guy is */
3926         TAILQ_FOREACH_FROM(rsm, &rack->r_ctl.rc_map, r_next) {
3927                 if (SEQ_GEQ(start, rsm->r_start) &&
3928                     SEQ_LT(start, rsm->r_end)) {
3929                         break;
3930                 }
3931         }
3932 do_rest_ofb:
3933         if (rsm == NULL) {
3934                 /*
3935                  * This happens when we get duplicate sack blocks with the
3936                  * same end. For example SACK 4: 100 SACK 3: 100 The sort
3937                  * will not change there location so we would just start at
3938                  * the end of the first one and get lost.
3939                  */
3940                 if (tp->t_flags & TF_SENTFIN) {
3941                         /*
3942                          * Check to see if we have not logged the FIN that
3943                          * went out.
3944                          */
3945                         nrsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
3946                         if (nrsm && (nrsm->r_end + 1) == tp->snd_max) {
3947                                 /*
3948                                  * Ok we did not get the FIN logged.
3949                                  */
3950                                 nrsm->r_end++;
3951                                 rsm = nrsm;
3952                                 goto do_rest_ofb;
3953                         }
3954                 }
3955                 if (times == 1) {
3956 #ifdef INVARIANTS
3957                         panic("tp:%p rack:%p sack:%p to:%p prsm:%p",
3958                             tp, rack, sack, to, prsm);
3959 #else
3960                         goto out;
3961 #endif
3962                 }
3963                 times++;
3964                 counter_u64_add(rack_sack_proc_restart, 1);
3965                 goto start_at_beginning;
3966         }
3967         /* Ok we have an ACK for some piece of rsm */
3968         if (rsm->r_start != start) {
3969                 /*
3970                  * Need to split this in two pieces the before and after.
3971                  */
3972                 nrsm = rack_alloc(rack);
3973                 if (nrsm == NULL) {
3974                         /*
3975                          * failed XXXrrs what can we do but loose the sack
3976                          * info?
3977                          */
3978                         goto out;
3979                 }
3980                 nrsm->r_start = start;
3981                 nrsm->r_rtr_bytes = 0;
3982                 nrsm->r_end = rsm->r_end;
3983                 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
3984                 nrsm->r_flags = rsm->r_flags;
3985                 nrsm->r_sndcnt = rsm->r_sndcnt;
3986                 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
3987                         nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
3988                 }
3989                 rsm->r_end = nrsm->r_start;
3990                 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
3991                 if (rsm->r_in_tmap) {
3992                         TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
3993                         nrsm->r_in_tmap = 1;
3994                 }
3995                 rsm->r_flags &= (~RACK_HAS_FIN);
3996                 rsm = nrsm;
3997         }
3998         if (SEQ_GEQ(end, rsm->r_end)) {
3999                 /*
4000                  * The end of this block is either beyond this guy or right
4001                  * at this guy.
4002                  */
4003
4004                 if ((rsm->r_flags & RACK_ACKED) == 0) {
4005                         rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4006                         changed += (rsm->r_end - rsm->r_start);
4007                         rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4008                         rack_log_sack_passed(tp, rack, rsm);
4009                         /* Is Reordering occuring? */
4010                         if (rsm->r_flags & RACK_SACK_PASSED) {
4011                                 counter_u64_add(rack_reorder_seen, 1);
4012                                 rack->r_ctl.rc_reorder_ts = cts;
4013                         }
4014                         rsm->r_flags |= RACK_ACKED;
4015                         rsm->r_flags &= ~RACK_TLP;
4016                         if (rsm->r_in_tmap) {
4017                                 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4018                                 rsm->r_in_tmap = 0;
4019                         }
4020                 }
4021                 if (end == rsm->r_end) {
4022                         /* This block only - done */
4023                         goto out;
4024                 }
4025                 /* There is more not coverend by this rsm move on */
4026                 start = rsm->r_end;
4027                 nrsm = TAILQ_NEXT(rsm, r_next);
4028                 rsm = nrsm;
4029                 times = 0;
4030                 goto do_rest_ofb;
4031         }
4032         /* Ok we need to split off this one at the tail */
4033         nrsm = rack_alloc(rack);
4034         if (nrsm == NULL) {
4035                 /* failed rrs what can we do but loose the sack info? */
4036                 goto out;
4037         }
4038         /* Clone it */
4039         nrsm->r_start = end;
4040         nrsm->r_end = rsm->r_end;
4041         nrsm->r_rtr_bytes = 0;
4042         nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
4043         nrsm->r_flags = rsm->r_flags;
4044         nrsm->r_sndcnt = rsm->r_sndcnt;
4045         for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
4046                 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
4047         }
4048         /* The sack block does not cover this guy fully */
4049         rsm->r_flags &= (~RACK_HAS_FIN);
4050         rsm->r_end = end;
4051         TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next);
4052         if (rsm->r_in_tmap) {
4053                 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
4054                 nrsm->r_in_tmap = 1;
4055         }
4056         if (rsm->r_flags & RACK_ACKED) {
4057                 /* Been here done that */
4058                 goto out;
4059         }
4060         rack_update_rtt(tp, rack, rsm, to, cts, SACKED);
4061         changed += (rsm->r_end - rsm->r_start);
4062         rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
4063         rack_log_sack_passed(tp, rack, rsm);
4064         /* Is Reordering occuring? */
4065         if (rsm->r_flags & RACK_SACK_PASSED) {
4066                 counter_u64_add(rack_reorder_seen, 1);
4067                 rack->r_ctl.rc_reorder_ts = cts;
4068         }
4069         rsm->r_flags |= RACK_ACKED;
4070         rsm->r_flags &= ~RACK_TLP;
4071         if (rsm->r_in_tmap) {
4072                 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4073                 rsm->r_in_tmap = 0;
4074         }
4075 out:
4076         if (used_ref == 0) {
4077                 counter_u64_add(rack_sack_proc_all, 1);
4078         } else {
4079                 counter_u64_add(rack_sack_proc_short, 1);
4080         }
4081         /* Save off where we last were */
4082         if (rsm)
4083                 rack->r_ctl.rc_sacklast = TAILQ_NEXT(rsm, r_next);
4084         else
4085                 rack->r_ctl.rc_sacklast = NULL;
4086         *prsm = rsm;
4087         return (changed);
4088 }
4089
4090 static void inline 
4091 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
4092 {
4093         struct rack_sendmap *tmap;
4094
4095         tmap = NULL;
4096         while (rsm && (rsm->r_flags & RACK_ACKED)) {
4097                 /* Its no longer sacked, mark it so */
4098                 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4099 #ifdef INVARIANTS
4100                 if (rsm->r_in_tmap) {
4101                         panic("rack:%p rsm:%p flags:0x%x in tmap?",
4102                               rack, rsm, rsm->r_flags);
4103                 }
4104 #endif
4105                 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
4106                 /* Rebuild it into our tmap */
4107                 if (tmap == NULL) {
4108                         TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4109                         tmap = rsm;
4110                 } else {
4111                         TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
4112                         tmap = rsm;
4113                 }
4114                 tmap->r_in_tmap = 1;
4115                 rsm = TAILQ_NEXT(rsm, r_next);
4116         }
4117         /* 
4118          * Now lets possibly clear the sack filter so we start 
4119          * recognizing sacks that cover this area.
4120          */
4121         if (rack_use_sack_filter)
4122                 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
4123
4124 }
4125
4126 static void
4127 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
4128 {
4129         uint32_t changed, last_seq, entered_recovery = 0;
4130         struct tcp_rack *rack;
4131         struct rack_sendmap *rsm;
4132         struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
4133         register uint32_t th_ack;
4134         int32_t i, j, k, num_sack_blks = 0;
4135         uint32_t cts, acked, ack_point, sack_changed = 0;
4136
4137         INP_WLOCK_ASSERT(tp->t_inpcb);
4138         if (th->th_flags & TH_RST) {
4139                 /* We don't log resets */
4140                 return;
4141         }
4142         rack = (struct tcp_rack *)tp->t_fb_ptr;
4143         cts = tcp_ts_getticks();
4144         rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4145         changed = 0;
4146         th_ack = th->th_ack;
4147
4148         if (SEQ_GT(th_ack, tp->snd_una)) {
4149                 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
4150                 tp->t_acktime = ticks;
4151         }
4152         if (rsm && SEQ_GT(th_ack, rsm->r_start))
4153                 changed = th_ack - rsm->r_start;
4154         if (changed) {
4155                 /*
4156                  * The ACK point is advancing to th_ack, we must drop off
4157                  * the packets in the rack log and calculate any eligble
4158                  * RTT's.
4159                  */
4160                 rack->r_wanted_output++;
4161 more:
4162                 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4163                 if (rsm == NULL) {
4164                         if ((th_ack - 1) == tp->iss) {
4165                                 /*
4166                                  * For the SYN incoming case we will not
4167                                  * have called tcp_output for the sending of
4168                                  * the SYN, so there will be no map. All
4169                                  * other cases should probably be a panic.
4170                                  */
4171                                 goto proc_sack;
4172                         }
4173                         if (tp->t_flags & TF_SENTFIN) {
4174                                 /* if we send a FIN we will not hav a map */
4175                                 goto proc_sack;
4176                         }
4177 #ifdef INVARIANTS
4178                         panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n",
4179                             tp,
4180                             th, tp->t_state, rack,
4181                             tp->snd_una, tp->snd_max, tp->snd_nxt, changed);
4182 #endif
4183                         goto proc_sack;
4184                 }
4185                 if (SEQ_LT(th_ack, rsm->r_start)) {
4186                         /* Huh map is missing this */
4187 #ifdef INVARIANTS
4188                         printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
4189                             rsm->r_start,
4190                             th_ack, tp->t_state, rack->r_state);
4191 #endif
4192                         goto proc_sack;
4193                 }
4194                 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED);
4195                 /* Now do we consume the whole thing? */
4196                 if (SEQ_GEQ(th_ack, rsm->r_end)) {
4197                         /* Its all consumed. */
4198                         uint32_t left;
4199
4200                         rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
4201                         rsm->r_rtr_bytes = 0;
4202                         TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next);
4203                         if (rsm->r_in_tmap) {
4204                                 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
4205                                 rsm->r_in_tmap = 0;
4206                         }
4207                         if (rack->r_ctl.rc_next == rsm) {
4208                                 /* scoot along the marker */
4209                                 rack->r_ctl.rc_next = TAILQ_FIRST(&rack->r_ctl.rc_map);
4210                         }
4211                         if (rsm->r_flags & RACK_ACKED) {
4212                                 /*
4213                                  * It was acked on the scoreboard -- remove
4214                                  * it from total
4215                                  */
4216                                 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
4217                         } else if (rsm->r_flags & RACK_SACK_PASSED) {
4218                                 /*
4219                                  * There are acked segments ACKED on the
4220                                  * scoreboard further up. We are seeing
4221                                  * reordering.
4222                                  */
4223                                 counter_u64_add(rack_reorder_seen, 1);
4224                                 rsm->r_flags |= RACK_ACKED;
4225                                 rack->r_ctl.rc_reorder_ts = cts;
4226                         }
4227                         left = th_ack - rsm->r_end;
4228                         if (rsm->r_rtr_cnt > 1) {
4229                                 /*
4230                                  * Technically we should make r_rtr_cnt be
4231                                  * monotonicly increasing and just mod it to
4232                                  * the timestamp it is replacing.. that way
4233                                  * we would have the last 3 retransmits. Now
4234                                  * rc_loss_count will be wrong if we
4235                                  * retransmit something more than 2 times in
4236                                  * recovery :(
4237                                  */
4238                                 rack->r_ctl.rc_loss_count += (rsm->r_rtr_cnt - 1);
4239                         }
4240                         /* Free back to zone */
4241                         rack_free(rack, rsm);
4242                         if (left) {
4243                                 goto more;
4244                         }
4245                         goto proc_sack;
4246                 }
4247                 if (rsm->r_flags & RACK_ACKED) {
4248                         /*
4249                          * It was acked on the scoreboard -- remove it from
4250                          * total for the part being cum-acked.
4251                          */
4252                         rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
4253                 }
4254                 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
4255                 rsm->r_rtr_bytes = 0;
4256                 rsm->r_start = th_ack;
4257         }
4258 proc_sack:
4259         /* Check for reneging */
4260         rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
4261         if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
4262                 /*
4263                  * The peer has moved snd_una up to
4264                  * the edge of this send, i.e. one
4265                  * that it had previously acked. The only
4266                  * way that can be true if the peer threw
4267                  * away data (space issues) that it had
4268                  * previously sacked (else it would have 
4269                  * given us snd_una up to (rsm->r_end).
4270                  * We need to undo the acked markings here.
4271                  *
4272                  * Note we have to look to make sure th_ack is
4273                  * our rsm->r_start in case we get an old ack
4274                  * where th_ack is behind snd_una.
4275                  */
4276                 rack_peer_reneges(rack, rsm, th->th_ack);
4277         }
4278         if ((to->to_flags & TOF_SACK) == 0) {
4279                 /* We are done nothing left to log */
4280                 goto out;
4281         }
4282         rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next);
4283         if (rsm) {
4284                 last_seq = rsm->r_end;
4285         } else {
4286                 last_seq = tp->snd_max;
4287         }
4288         /* Sack block processing */
4289         if (SEQ_GT(th_ack, tp->snd_una))
4290                 ack_point = th_ack;
4291         else
4292                 ack_point = tp->snd_una;
4293         for (i = 0; i < to->to_nsacks; i++) {
4294                 bcopy((to->to_sacks + i * TCPOLEN_SACK),
4295                     &sack, sizeof(sack));
4296                 sack.start = ntohl(sack.start);
4297                 sack.end = ntohl(sack.end);
4298                 if (SEQ_GT(sack.end, sack.start) &&
4299                     SEQ_GT(sack.start, ack_point) &&
4300                     SEQ_LT(sack.start, tp->snd_max) &&
4301                     SEQ_GT(sack.end, ack_point) &&
4302                     SEQ_LEQ(sack.end, tp->snd_max)) {
4303                         if ((rack->r_ctl.rc_num_maps_alloced > rack_sack_block_limit) &&
4304                             (SEQ_LT(sack.end, last_seq)) &&
4305                             ((sack.end - sack.start) < (tp->t_maxseg / 8))) {
4306                                 /*
4307                                  * Not the last piece and its smaller than
4308                                  * 1/8th of a MSS. We ignore this.
4309                                  */
4310                                 counter_u64_add(rack_runt_sacks, 1);
4311                                 continue;
4312                         }
4313                         sack_blocks[num_sack_blks] = sack;
4314                         num_sack_blks++;
4315 #ifdef NETFLIX_STATS
4316                 } else if (SEQ_LEQ(sack.start, th_ack) &&
4317                            SEQ_LEQ(sack.end, th_ack)) {
4318                         /*
4319                          * Its a D-SACK block.
4320                          */
4321                         tcp_record_dsack(sack.start, sack.end);
4322 #endif
4323                 }
4324
4325         }
4326         if (num_sack_blks == 0)
4327                 goto out;
4328         /*
4329          * Sort the SACK blocks so we can update the rack scoreboard with
4330          * just one pass.
4331          */
4332         if (rack_use_sack_filter) {
4333                 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, num_sack_blks, th->th_ack);
4334         }
4335         if (num_sack_blks < 2) {
4336                 goto do_sack_work;
4337         }
4338         /* Sort the sacks */
4339         for (i = 0; i < num_sack_blks; i++) {
4340                 for (j = i + 1; j < num_sack_blks; j++) {
4341                         if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
4342                                 sack = sack_blocks[i];
4343                                 sack_blocks[i] = sack_blocks[j];
4344                                 sack_blocks[j] = sack;
4345                         }
4346                 }
4347         }
4348         /*
4349          * Now are any of the sack block ends the same (yes some
4350          * implememtations send these)?
4351          */
4352 again:
4353         if (num_sack_blks > 1) {
4354                 for (i = 0; i < num_sack_blks; i++) {
4355                         for (j = i + 1; j < num_sack_blks; j++) {
4356                                 if (sack_blocks[i].end == sack_blocks[j].end) {
4357                                         /*
4358                                          * Ok these two have the same end we
4359                                          * want the smallest end and then
4360                                          * throw away the larger and start
4361                                          * again.
4362                                          */
4363                                         if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
4364                                                 /*
4365                                                  * The second block covers
4366                                                  * more area use that
4367                                                  */
4368                                                 sack_blocks[i].start = sack_blocks[j].start;
4369                                         }
4370                                         /*
4371                                          * Now collapse out the dup-sack and
4372                                          * lower the count
4373                                          */
4374                                         for (k = (j + 1); k < num_sack_blks; k++) {
4375                                                 sack_blocks[j].start = sack_blocks[k].start;
4376                                                 sack_blocks[j].end = sack_blocks[k].end;
4377                                                 j++;
4378                                         }
4379                                         num_sack_blks--;
4380                                         goto again;
4381                                 }
4382                         }
4383                 }
4384         }
4385 do_sack_work:
4386         rsm = rack->r_ctl.rc_sacklast;
4387         for (i = 0; i < num_sack_blks; i++) {
4388                 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts);
4389                 if (acked) {
4390                         rack->r_wanted_output++;
4391                         changed += acked;
4392                         sack_changed += acked;
4393                 }
4394         }
4395 out:
4396         if (changed) {
4397                 /* Something changed cancel the rack timer */
4398                 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4399         }
4400         if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) {
4401                 /*
4402                  * Ok we have a high probability that we need to go in to
4403                  * recovery since we have data sack'd
4404                  */
4405                 struct rack_sendmap *rsm;
4406                 uint32_t tsused;
4407
4408                 tsused = tcp_ts_getticks();
4409                 rsm = tcp_rack_output(tp, rack, tsused);
4410                 if (rsm) {
4411                         /* Enter recovery */
4412                         rack->r_ctl.rc_rsm_start = rsm->r_start;
4413                         rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
4414                         rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
4415                         entered_recovery = 1;
4416                         rack_cong_signal(tp, NULL, CC_NDUPACK);
4417                         /*
4418                          * When we enter recovery we need to assure we send
4419                          * one packet.
4420                          */
4421                         rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
4422                         rack->r_timer_override = 1;
4423                 }
4424         }
4425         if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) {
4426                 /* Deal with changed an PRR here (in recovery only) */
4427                 uint32_t pipe, snd_una;
4428
4429                 rack->r_ctl.rc_prr_delivered += changed;
4430                 /* Compute prr_sndcnt */
4431                 if (SEQ_GT(tp->snd_una, th_ack)) {
4432                         snd_una = tp->snd_una;
4433                 } else {
4434                         snd_una = th_ack;
4435                 }
4436                 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt;
4437                 if (pipe > tp->snd_ssthresh) {
4438                         long sndcnt;
4439
4440                         sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
4441                         if (rack->r_ctl.rc_prr_recovery_fs > 0)
4442                                 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
4443                         else {
4444                                 rack->r_ctl.rc_prr_sndcnt = 0;
4445                                 sndcnt = 0;
4446                         }
4447                         sndcnt++;
4448                         if (sndcnt > (long)rack->r_ctl.rc_prr_out)
4449                                 sndcnt -= rack->r_ctl.rc_prr_out;
4450                         else
4451                                 sndcnt = 0;
4452                         rack->r_ctl.rc_prr_sndcnt = sndcnt;
4453                 } else {
4454                         uint32_t limit;
4455
4456                         if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
4457                                 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
4458                         else
4459                                 limit = 0;
4460                         if (changed > limit)
4461                                 limit = changed;
4462                         limit += tp->t_maxseg;
4463                         if (tp->snd_ssthresh > pipe) {
4464                                 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
4465                         } else {
4466                                 rack->r_ctl.rc_prr_sndcnt = min(0, limit);
4467                         }
4468                 }
4469                 if (rack->r_ctl.rc_prr_sndcnt >= tp->t_maxseg) {
4470                         rack->r_timer_override = 1;
4471                 }
4472         }
4473 }
4474
4475 /*
4476  * Return value of 1, we do not need to call rack_process_data().
4477  * return value of 0, rack_process_data can be called.
4478  * For ret_val if its 0 the TCP is locked, if its non-zero
4479  * its unlocked and probably unsafe to touch the TCB.
4480  */
4481 static int
4482 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
4483     struct tcpcb *tp, struct tcpopt *to,
4484     int32_t * ti_locked, uint32_t tiwin, int32_t tlen,
4485     int32_t * ofia, int32_t thflags, int32_t * ret_val)
4486 {
4487         int32_t ourfinisacked = 0;
4488         int32_t nsegs, acked_amount;
4489         int32_t acked;
4490         struct mbuf *mfree;
4491         struct tcp_rack *rack;
4492         int32_t recovery = 0;
4493
4494         rack = (struct tcp_rack *)tp->t_fb_ptr;
4495         if (SEQ_GT(th->th_ack, tp->snd_max)) {
4496                 rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, ret_val);
4497                 return (1);
4498         }
4499         if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
4500                 rack_log_ack(tp, to, th);
4501         }
4502         if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
4503                 /*
4504                  * Old ack, behind (or duplicate to) the last one rcv'd
4505                  * Note: Should mark reordering is occuring! We should also
4506                  * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1,
4507                  * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no
4508                  * retran and> ack 3
4509                  */
4510                 return (0);
4511         }
4512         /*
4513          * If we reach this point, ACK is not a duplicate, i.e., it ACKs
4514          * something we sent.
4515          */
4516         if (tp->t_flags & TF_NEEDSYN) {
4517                 /*
4518                  * T/TCP: Connection was half-synchronized, and our SYN has
4519                  * been ACK'd (so connection is now fully synchronized).  Go
4520                  * to non-starred state, increment snd_una for ACK of SYN,
4521                  * and check if we can do window scaling.
4522                  */
4523                 tp->t_flags &= ~TF_NEEDSYN;
4524                 tp->snd_una++;
4525                 /* Do window scaling? */
4526                 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
4527                     (TF_RCVD_SCALE | TF_REQ_SCALE)) {
4528                         tp->rcv_scale = tp->request_r_scale;
4529                         /* Send window already scaled. */
4530                 }
4531         }
4532         nsegs = max(1, m->m_pkthdr.lro_nsegs);
4533         INP_WLOCK_ASSERT(tp->t_inpcb);
4534
4535         acked = BYTES_THIS_ACK(tp, th);
4536         TCPSTAT_ADD(tcps_rcvackpack, nsegs);
4537         TCPSTAT_ADD(tcps_rcvackbyte, acked);
4538
4539         /*
4540          * If we just performed our first retransmit, and the ACK arrives
4541          * within our recovery window, then it was a mistake to do the
4542          * retransmit in the first place.  Recover our original cwnd and
4543          * ssthresh, and proceed to transmit where we left off.
4544          */
4545         if (tp->t_flags & TF_PREVVALID) {
4546                 tp->t_flags &= ~TF_PREVVALID;
4547                 if (tp->t_rxtshift == 1 &&
4548                     (int)(ticks - tp->t_badrxtwin) < 0)
4549                         rack_cong_signal(tp, th, CC_RTO_ERR);
4550         }
4551         /*
4552          * If we have a timestamp reply, update smoothed round trip time. If
4553          * no timestamp is present but transmit timer is running and timed
4554          * sequence number was acked, update smoothed round trip time. Since
4555          * we now have an rtt measurement, cancel the timer backoff (cf.,
4556          * Phil Karn's retransmit alg.). Recompute the initial retransmit
4557          * timer.
4558          *
4559          * Some boxes send broken timestamp replies during the SYN+ACK
4560          * phase, ignore timestamps of 0 or we could calculate a huge RTT
4561          * and blow up the retransmit timer.
4562          */
4563         /*
4564          * If all outstanding data is acked, stop retransmit timer and
4565          * remember to restart (more output or persist). If there is more
4566          * data to be acked, restart retransmit timer, using current
4567          * (possibly backed-off) value.
4568          */
4569         if (th->th_ack == tp->snd_max) {
4570                 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4571                 rack->r_wanted_output++;
4572         }
4573         /*
4574          * If no data (only SYN) was ACK'd, skip rest of ACK processing.
4575          */
4576         if (acked == 0) {
4577                 if (ofia)
4578                         *ofia = ourfinisacked;
4579                 return (0);
4580         }
4581         if (rack->r_ctl.rc_early_recovery) {
4582                 if (IN_FASTRECOVERY(tp->t_flags)) {
4583                         if (SEQ_LT(th->th_ack, tp->snd_recover)) {
4584                                 tcp_rack_partialack(tp, th);
4585                         } else {
4586                                 rack_post_recovery(tp, th);
4587                                 recovery = 1;
4588                         }
4589                 }
4590         }
4591         /*
4592          * Let the congestion control algorithm update congestion control
4593          * related information. This typically means increasing the
4594          * congestion window.
4595          */
4596         rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery);
4597         SOCKBUF_LOCK(&so->so_snd);
4598         acked_amount = min(acked, (int)sbavail(&so->so_snd));
4599         tp->snd_wnd -= acked_amount;
4600         mfree = sbcut_locked(&so->so_snd, acked_amount);
4601         if ((sbused(&so->so_snd) == 0) &&
4602             (acked > acked_amount) &&
4603             (tp->t_state >= TCPS_FIN_WAIT_1)) {
4604                 ourfinisacked = 1;
4605         }
4606         /* NB: sowwakeup_locked() does an implicit unlock. */
4607         sowwakeup_locked(so);
4608         m_freem(mfree);
4609         if (rack->r_ctl.rc_early_recovery == 0) {
4610                 if (IN_FASTRECOVERY(tp->t_flags)) {
4611                         if (SEQ_LT(th->th_ack, tp->snd_recover)) {
4612                                 tcp_rack_partialack(tp, th);
4613                         } else {
4614                                 rack_post_recovery(tp, th);
4615                         }
4616                 }
4617         }
4618         tp->snd_una = th->th_ack;
4619         if (SEQ_GT(tp->snd_una, tp->snd_recover))
4620                 tp->snd_recover = tp->snd_una;
4621
4622         if (SEQ_LT(tp->snd_nxt, tp->snd_una)) {
4623                 tp->snd_nxt = tp->snd_una;
4624         }
4625         if (tp->snd_una == tp->snd_max) {
4626                 /* Nothing left outstanding */
4627                 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
4628                 tp->t_acktime = 0;
4629                 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4630                 /* Set need output so persist might get set */
4631                 rack->r_wanted_output++;
4632                 if (rack_use_sack_filter)
4633                         sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
4634                 if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
4635                     (sbavail(&so->so_snd) == 0) &&
4636                     (tp->t_flags2 & TF2_DROP_AF_DATA)) {
4637                         /* 
4638                          * The socket was gone and the
4639                          * peer sent data, time to
4640                          * reset him.
4641                          */
4642                         *ret_val = 1;
4643                         tp = tcp_close(tp);
4644                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_UNLIMITED, tlen);
4645                         return (1);
4646                 }
4647         }
4648         if (ofia)
4649                 *ofia = ourfinisacked;
4650         return (0);
4651 }
4652
4653
4654 /*
4655  * Return value of 1, the TCB is unlocked and most
4656  * likely gone, return value of 0, the TCP is still
4657  * locked.
4658  */
4659 static int
4660 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
4661     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
4662     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
4663 {
4664         /*
4665          * Update window information. Don't look at window if no ACK: TAC's
4666          * send garbage on first SYN.
4667          */
4668         int32_t nsegs;
4669         int32_t tfo_syn;
4670         struct tcp_rack *rack;
4671
4672         rack = (struct tcp_rack *)tp->t_fb_ptr;
4673         INP_WLOCK_ASSERT(tp->t_inpcb);
4674
4675         nsegs = max(1, m->m_pkthdr.lro_nsegs);
4676         if ((thflags & TH_ACK) &&
4677             (SEQ_LT(tp->snd_wl1, th->th_seq) ||
4678             (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
4679             (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
4680                 /* keep track of pure window updates */
4681                 if (tlen == 0 &&
4682                     tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
4683                         TCPSTAT_INC(tcps_rcvwinupd);
4684                 tp->snd_wnd = tiwin;
4685                 tp->snd_wl1 = th->th_seq;
4686                 tp->snd_wl2 = th->th_ack;
4687                 if (tp->snd_wnd > tp->max_sndwnd)
4688                         tp->max_sndwnd = tp->snd_wnd;
4689                 rack->r_wanted_output++;
4690         } else if (thflags & TH_ACK) {
4691                 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
4692                         tp->snd_wnd = tiwin;
4693                         tp->snd_wl1 = th->th_seq;
4694                         tp->snd_wl2 = th->th_ack;
4695                 }
4696         }
4697         /* Was persist timer active and now we have window space? */
4698         if ((rack->rc_in_persist != 0) && tp->snd_wnd) {
4699                 rack_exit_persist(tp, rack);
4700                 tp->snd_nxt = tp->snd_max;
4701                 /* Make sure we output to start the timer */
4702                 rack->r_wanted_output++;
4703         }
4704         /*
4705          * Process segments with URG.
4706          */
4707         if ((thflags & TH_URG) && th->th_urp &&
4708             TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4709                 /*
4710                  * This is a kludge, but if we receive and accept random
4711                  * urgent pointers, we'll crash in soreceive.  It's hard to
4712                  * imagine someone actually wanting to send this much urgent
4713                  * data.
4714                  */
4715                 SOCKBUF_LOCK(&so->so_rcv);
4716                 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
4717                         th->th_urp = 0; /* XXX */
4718                         thflags &= ~TH_URG;     /* XXX */
4719                         SOCKBUF_UNLOCK(&so->so_rcv);    /* XXX */
4720                         goto dodata;    /* XXX */
4721                 }
4722                 /*
4723                  * If this segment advances the known urgent pointer, then
4724                  * mark the data stream.  This should not happen in
4725                  * CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since a
4726                  * FIN has been received from the remote side. In these
4727                  * states we ignore the URG.
4728                  *
4729                  * According to RFC961 (Assigned Protocols), the urgent
4730                  * pointer points to the last octet of urgent data.  We
4731                  * continue, however, to consider it to indicate the first
4732                  * octet of data past the urgent section as the original
4733                  * spec states (in one of two places).
4734                  */
4735                 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) {
4736                         tp->rcv_up = th->th_seq + th->th_urp;
4737                         so->so_oobmark = sbavail(&so->so_rcv) +
4738                             (tp->rcv_up - tp->rcv_nxt) - 1;
4739                         if (so->so_oobmark == 0)
4740                                 so->so_rcv.sb_state |= SBS_RCVATMARK;
4741                         sohasoutofband(so);
4742                         tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
4743                 }
4744                 SOCKBUF_UNLOCK(&so->so_rcv);
4745                 /*
4746                  * Remove out of band data so doesn't get presented to user.
4747                  * This can happen independent of advancing the URG pointer,
4748                  * but if two URG's are pending at once, some out-of-band
4749                  * data may creep in... ick.
4750                  */
4751                 if (th->th_urp <= (uint32_t) tlen &&
4752                     !(so->so_options & SO_OOBINLINE)) {
4753                         /* hdr drop is delayed */
4754                         tcp_pulloutofband(so, th, m, drop_hdrlen);
4755                 }
4756         } else {
4757                 /*
4758                  * If no out of band data is expected, pull receive urgent
4759                  * pointer along with the receive window.
4760                  */
4761                 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
4762                         tp->rcv_up = tp->rcv_nxt;
4763         }
4764 dodata:                         /* XXX */
4765         INP_WLOCK_ASSERT(tp->t_inpcb);
4766
4767         /*
4768          * Process the segment text, merging it into the TCP sequencing
4769          * queue, and arranging for acknowledgment of receipt if necessary.
4770          * This process logically involves adjusting tp->rcv_wnd as data is
4771          * presented to the user (this happens in tcp_usrreq.c, case
4772          * PRU_RCVD).  If a FIN has already been received on this connection
4773          * then we just ignore the text.
4774          */
4775         tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
4776                    IS_FASTOPEN(tp->t_flags));
4777         if ((tlen || (thflags & TH_FIN) || tfo_syn) &&
4778             TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4779                 tcp_seq save_start = th->th_seq;
4780
4781                 m_adj(m, drop_hdrlen);  /* delayed header drop */
4782                 /*
4783                  * Insert segment which includes th into TCP reassembly
4784                  * queue with control block tp.  Set thflags to whether
4785                  * reassembly now includes a segment with FIN.  This handles
4786                  * the common case inline (segment is the next to be
4787                  * received on an established connection, and the queue is
4788                  * empty), avoiding linkage into and removal from the queue
4789                  * and repetition of various conversions. Set DELACK for
4790                  * segments received in order, but ack immediately when
4791                  * segments are out of order (so fast retransmit can work).
4792                  */
4793                 if (th->th_seq == tp->rcv_nxt &&
4794                     LIST_EMPTY(&tp->t_segq) &&
4795                     (TCPS_HAVEESTABLISHED(tp->t_state) ||
4796                     tfo_syn)) {
4797                         if (DELAY_ACK(tp, tlen) || tfo_syn) {
4798                                 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4799                                 tp->t_flags |= TF_DELACK;
4800                         } else {
4801                                 rack->r_wanted_output++;
4802                                 tp->t_flags |= TF_ACKNOW;
4803                         }
4804                         tp->rcv_nxt += tlen;
4805                         thflags = th->th_flags & TH_FIN;
4806                         TCPSTAT_ADD(tcps_rcvpack, nsegs);
4807                         TCPSTAT_ADD(tcps_rcvbyte, tlen);
4808                         SOCKBUF_LOCK(&so->so_rcv);
4809                         if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
4810                                 m_freem(m);
4811                         else
4812                                 sbappendstream_locked(&so->so_rcv, m, 0);
4813                         /* NB: sorwakeup_locked() does an implicit unlock. */
4814                         sorwakeup_locked(so);
4815                 } else {
4816                         /*
4817                          * XXX: Due to the header drop above "th" is
4818                          * theoretically invalid by now.  Fortunately
4819                          * m_adj() doesn't actually frees any mbufs when
4820                          * trimming from the head.
4821                          */
4822                         thflags = tcp_reass(tp, th, &tlen, m);
4823                         tp->t_flags |= TF_ACKNOW;
4824                 }
4825                 if (tlen > 0)
4826                         tcp_update_sack_list(tp, save_start, save_start + tlen);
4827         } else {
4828                 m_freem(m);
4829                 thflags &= ~TH_FIN;
4830         }
4831
4832         /*
4833          * If FIN is received ACK the FIN and let the user know that the
4834          * connection is closing.
4835          */
4836         if (thflags & TH_FIN) {
4837                 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4838                         socantrcvmore(so);
4839                         /*
4840                          * If connection is half-synchronized (ie NEEDSYN
4841                          * flag on) then delay ACK, so it may be piggybacked
4842                          * when SYN is sent. Otherwise, since we received a
4843                          * FIN then no more input can be expected, send ACK
4844                          * now.
4845                          */
4846                         if (tp->t_flags & TF_NEEDSYN) {
4847                                 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4848                                 tp->t_flags |= TF_DELACK;
4849                         } else {
4850                                 tp->t_flags |= TF_ACKNOW;
4851                         }
4852                         tp->rcv_nxt++;
4853                 }
4854                 switch (tp->t_state) {
4855
4856                         /*
4857                          * In SYN_RECEIVED and ESTABLISHED STATES enter the
4858                          * CLOSE_WAIT state.
4859                          */
4860                 case TCPS_SYN_RECEIVED:
4861                         tp->t_starttime = ticks;
4862                         /* FALLTHROUGH */
4863                 case TCPS_ESTABLISHED:
4864                         rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4865                         tcp_state_change(tp, TCPS_CLOSE_WAIT);
4866                         break;
4867
4868                         /*
4869                          * If still in FIN_WAIT_1 STATE FIN has not been
4870                          * acked so enter the CLOSING state.
4871                          */
4872                 case TCPS_FIN_WAIT_1:
4873                         rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4874                         tcp_state_change(tp, TCPS_CLOSING);
4875                         break;
4876
4877                         /*
4878                          * In FIN_WAIT_2 state enter the TIME_WAIT state,
4879                          * starting the time-wait timer, turning off the
4880                          * other standard timers.
4881                          */
4882                 case TCPS_FIN_WAIT_2:
4883                         rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
4884                         INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
4885                         KASSERT(*ti_locked == TI_RLOCKED, ("%s: dodata "
4886                             "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
4887                             *ti_locked));
4888                         tcp_twstart(tp);
4889                         *ti_locked = TI_UNLOCKED;
4890                         INP_INFO_RUNLOCK(&V_tcbinfo);
4891                         return (1);
4892                 }
4893         }
4894         if (*ti_locked == TI_RLOCKED) {
4895                 INP_INFO_RUNLOCK(&V_tcbinfo);
4896                 *ti_locked = TI_UNLOCKED;
4897         }
4898         /*
4899          * Return any desired output.
4900          */
4901         if ((tp->t_flags & TF_ACKNOW) || (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
4902                 rack->r_wanted_output++;
4903         }
4904         KASSERT(*ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
4905             __func__, *ti_locked));
4906         INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
4907         INP_WLOCK_ASSERT(tp->t_inpcb);
4908         return (0);
4909 }
4910
4911 /*
4912  * Here nothing is really faster, its just that we
4913  * have broken out the fast-data path also just like
4914  * the fast-ack.
4915  */
4916 static int
4917 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
4918     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
4919     int32_t * ti_locked, uint32_t tiwin, int32_t nxt_pkt)
4920 {
4921         int32_t nsegs;
4922         int32_t newsize = 0;    /* automatic sockbuf scaling */
4923         struct tcp_rack *rack;
4924 #ifdef TCPDEBUG
4925         /*
4926          * The size of tcp_saveipgen must be the size of the max ip header,
4927          * now IPv6.
4928          */
4929         u_char tcp_saveipgen[IP6_HDR_LEN];
4930         struct tcphdr tcp_savetcp;
4931         short ostate = 0;
4932
4933 #endif
4934         /*
4935          * If last ACK falls within this segment's sequence numbers, record
4936          * the timestamp. NOTE that the test is modified according to the
4937          * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
4938          */
4939         if (__predict_false(th->th_seq != tp->rcv_nxt)) {
4940                 return (0);
4941         }
4942         if (__predict_false(tp->snd_nxt != tp->snd_max)) {
4943                 return (0);
4944         }
4945         if (tiwin && tiwin != tp->snd_wnd) {
4946                 return (0);
4947         }
4948         if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
4949                 return (0);
4950         }
4951         if (__predict_false((to->to_flags & TOF_TS) &&
4952             (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
4953                 return (0);
4954         }
4955         if (__predict_false((th->th_ack != tp->snd_una))) {
4956                 return (0);
4957         }
4958         if (__predict_false(tlen > sbspace(&so->so_rcv))) {
4959                 return (0);
4960         }
4961         if ((to->to_flags & TOF_TS) != 0 &&
4962             SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
4963                 tp->ts_recent_age = tcp_ts_getticks();
4964                 tp->ts_recent = to->to_tsval;
4965         }
4966         rack = (struct tcp_rack *)tp->t_fb_ptr;
4967         /*
4968          * This is a pure, in-sequence data packet with nothing on the
4969          * reassembly queue and we have enough buffer space to take it.
4970          */
4971         if (*ti_locked == TI_RLOCKED) {
4972                 INP_INFO_RUNLOCK(&V_tcbinfo);
4973                 *ti_locked = TI_UNLOCKED;
4974         }
4975         nsegs = max(1, m->m_pkthdr.lro_nsegs);
4976
4977
4978         /* Clean receiver SACK report if present */
4979         if (tp->rcv_numsacks)
4980                 tcp_clean_sackreport(tp);
4981         TCPSTAT_INC(tcps_preddat);
4982         tp->rcv_nxt += tlen;
4983         /*
4984          * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
4985          */
4986         tp->snd_wl1 = th->th_seq;
4987         /*
4988          * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
4989          */
4990         tp->rcv_up = tp->rcv_nxt;
4991         TCPSTAT_ADD(tcps_rcvpack, nsegs);
4992         TCPSTAT_ADD(tcps_rcvbyte, tlen);
4993 #ifdef TCPDEBUG
4994         if (so->so_options & SO_DEBUG)
4995                 tcp_trace(TA_INPUT, ostate, tp,
4996                     (void *)tcp_saveipgen, &tcp_savetcp, 0);
4997 #endif
4998         newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
4999
5000         /* Add data to socket buffer. */
5001         SOCKBUF_LOCK(&so->so_rcv);
5002         if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5003                 m_freem(m);
5004         } else {
5005                 /*
5006                  * Set new socket buffer size. Give up when limit is
5007                  * reached.
5008                  */
5009                 if (newsize)
5010                         if (!sbreserve_locked(&so->so_rcv,
5011                             newsize, so, NULL))
5012                                 so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
5013                 m_adj(m, drop_hdrlen);  /* delayed header drop */
5014                 sbappendstream_locked(&so->so_rcv, m, 0);
5015                 rack_calc_rwin(so, tp);
5016         }
5017         /* NB: sorwakeup_locked() does an implicit unlock. */
5018         sorwakeup_locked(so);
5019         if (DELAY_ACK(tp, tlen)) {
5020                 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5021                 tp->t_flags |= TF_DELACK;
5022         } else {
5023                 tp->t_flags |= TF_ACKNOW;
5024                 rack->r_wanted_output++;
5025         }
5026         if ((tp->snd_una == tp->snd_max) && rack_use_sack_filter)
5027                 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
5028         return (1);
5029 }
5030
5031 /*
5032  * This subfunction is used to try to highly optimize the
5033  * fast path. We again allow window updates that are
5034  * in sequence to remain in the fast-path. We also add
5035  * in the __predict's to attempt to help the compiler.
5036  * Note that if we return a 0, then we can *not* process
5037  * it and the caller should push the packet into the
5038  * slow-path.
5039  */
5040 static int
5041 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
5042     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5043     int32_t * ti_locked, uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
5044 {
5045         int32_t acked;
5046         int32_t nsegs;
5047
5048 #ifdef TCPDEBUG
5049         /*
5050          * The size of tcp_saveipgen must be the size of the max ip header,
5051          * now IPv6.
5052          */
5053         u_char tcp_saveipgen[IP6_HDR_LEN];
5054         struct tcphdr tcp_savetcp;
5055         short ostate = 0;
5056
5057 #endif
5058         struct tcp_rack *rack;
5059
5060         if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
5061                 /* Old ack, behind (or duplicate to) the last one rcv'd */
5062                 return (0);
5063         }
5064         if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
5065                 /* Above what we have sent? */
5066                 return (0);
5067         }
5068         if (__predict_false(tp->snd_nxt != tp->snd_max)) {
5069                 /* We are retransmitting */
5070                 return (0);
5071         }
5072         if (__predict_false(tiwin == 0)) {
5073                 /* zero window */
5074                 return (0);
5075         }
5076         if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
5077                 /* We need a SYN or a FIN, unlikely.. */
5078                 return (0);
5079         }
5080         if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
5081                 /* Timestamp is behind .. old ack with seq wrap? */
5082                 return (0);
5083         }
5084         if (__predict_false(IN_RECOVERY(tp->t_flags))) {
5085                 /* Still recovering */
5086                 return (0);
5087         }
5088         rack = (struct tcp_rack *)tp->t_fb_ptr;
5089         if (rack->r_ctl.rc_sacked) {
5090                 /* We have sack holes on our scoreboard */
5091                 return (0);
5092         }
5093         /* Ok if we reach here, we can process a fast-ack */
5094         nsegs = max(1, m->m_pkthdr.lro_nsegs);
5095         rack_log_ack(tp, to, th);
5096         /* Did the window get updated? */
5097         if (tiwin != tp->snd_wnd) {
5098                 tp->snd_wnd = tiwin;
5099                 tp->snd_wl1 = th->th_seq;
5100                 if (tp->snd_wnd > tp->max_sndwnd)
5101                         tp->max_sndwnd = tp->snd_wnd;
5102         }
5103         if ((rack->rc_in_persist != 0) && (tp->snd_wnd >= tp->t_maxseg)) {
5104                 rack_exit_persist(tp, rack);
5105         }
5106         /*
5107          * If last ACK falls within this segment's sequence numbers, record
5108          * the timestamp. NOTE that the test is modified according to the
5109          * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
5110          */
5111         if ((to->to_flags & TOF_TS) != 0 &&
5112             SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
5113                 tp->ts_recent_age = tcp_ts_getticks();
5114                 tp->ts_recent = to->to_tsval;
5115         }
5116         /*
5117          * This is a pure ack for outstanding data.
5118          */
5119         if (*ti_locked == TI_RLOCKED) {
5120                 INP_INFO_RUNLOCK(&V_tcbinfo);
5121                 *ti_locked = TI_UNLOCKED;
5122         }
5123         TCPSTAT_INC(tcps_predack);
5124
5125         /*
5126          * "bad retransmit" recovery.
5127          */
5128         if (tp->t_flags & TF_PREVVALID) {
5129                 tp->t_flags &= ~TF_PREVVALID;
5130                 if (tp->t_rxtshift == 1 &&
5131                     (int)(ticks - tp->t_badrxtwin) < 0)
5132                         rack_cong_signal(tp, th, CC_RTO_ERR);
5133         }
5134         /*
5135          * Recalculate the transmit timer / rtt.
5136          *
5137          * Some boxes send broken timestamp replies during the SYN+ACK
5138          * phase, ignore timestamps of 0 or we could calculate a huge RTT
5139          * and blow up the retransmit timer.
5140          */
5141         acked = BYTES_THIS_ACK(tp, th);
5142
5143 #ifdef TCP_HHOOK
5144         /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
5145         hhook_run_tcp_est_in(tp, th, to);
5146 #endif
5147
5148         TCPSTAT_ADD(tcps_rcvackpack, nsegs);
5149         TCPSTAT_ADD(tcps_rcvackbyte, acked);
5150         sbdrop(&so->so_snd, acked);
5151         /*
5152          * Let the congestion control algorithm update congestion control
5153          * related information. This typically means increasing the
5154          * congestion window.
5155          */
5156         rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0);
5157
5158         tp->snd_una = th->th_ack;
5159         /*
5160          * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
5161          */
5162         tp->snd_wl2 = th->th_ack;
5163         tp->t_dupacks = 0;
5164         m_freem(m);
5165         /* ND6_HINT(tp);         *//* Some progress has been made. */
5166
5167         /*
5168          * If all outstanding data are acked, stop retransmit timer,
5169          * otherwise restart timer using current (possibly backed-off)
5170          * value. If process is waiting for space, wakeup/selwakeup/signal.
5171          * If data are ready to send, let tcp_output decide between more
5172          * output or persist.
5173          */
5174 #ifdef TCPDEBUG
5175         if (so->so_options & SO_DEBUG)
5176                 tcp_trace(TA_INPUT, ostate, tp,
5177                     (void *)tcp_saveipgen,
5178                     &tcp_savetcp, 0);
5179 #endif
5180         if (tp->snd_una == tp->snd_max) {
5181                 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
5182                 tp->t_acktime = 0;
5183                 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
5184         }
5185         /* Wake up the socket if we have room to write more */
5186         sowwakeup(so);
5187         if (sbavail(&so->so_snd)) {
5188                 rack->r_wanted_output++;
5189         }
5190         return (1);
5191 }
5192
5193 /*
5194  * Return value of 1, the TCB is unlocked and most
5195  * likely gone, return value of 0, the TCP is still
5196  * locked.
5197  */
5198 static int
5199 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
5200     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5201     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5202 {
5203         int32_t ret_val = 0;
5204         int32_t todrop;
5205         int32_t ourfinisacked = 0;
5206
5207         rack_calc_rwin(so, tp);
5208         /*
5209          * If the state is SYN_SENT: if seg contains an ACK, but not for our
5210          * SYN, drop the input. if seg contains a RST, then drop the
5211          * connection. if seg does not contain SYN, then drop it. Otherwise
5212          * this is an acceptable SYN segment initialize tp->rcv_nxt and
5213          * tp->irs if seg contains ack then advance tp->snd_una if seg
5214          * contains an ECE and ECN support is enabled, the stream is ECN
5215          * capable. if SYN has been acked change to ESTABLISHED else
5216          * SYN_RCVD state arrange for segment to be acked (eventually)
5217          * continue processing rest of data/controls, beginning with URG
5218          */
5219         if ((thflags & TH_ACK) &&
5220             (SEQ_LEQ(th->th_ack, tp->iss) ||
5221             SEQ_GT(th->th_ack, tp->snd_max))) {
5222                 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5223                 return (1);
5224         }
5225         if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
5226                 TCP_PROBE5(connect__refused, NULL, tp,
5227                     mtod(m, const char *), tp, th);
5228                 tp = tcp_drop(tp, ECONNREFUSED);
5229                 rack_do_drop(m, tp, ti_locked);
5230                 return (1);
5231         }
5232         if (thflags & TH_RST) {
5233                 rack_do_drop(m, tp, ti_locked);
5234                 return (1);
5235         }
5236         if (!(thflags & TH_SYN)) {
5237                 rack_do_drop(m, tp, ti_locked);
5238                 return (1);
5239         }
5240         tp->irs = th->th_seq;
5241         tcp_rcvseqinit(tp);
5242         if (thflags & TH_ACK) {
5243                 int tfo_partial = 0;
5244                 
5245                 TCPSTAT_INC(tcps_connects);
5246                 soisconnected(so);
5247 #ifdef MAC
5248                 mac_socketpeer_set_from_mbuf(m, so);
5249 #endif
5250                 /* Do window scaling on this connection? */
5251                 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
5252                     (TF_RCVD_SCALE | TF_REQ_SCALE)) {
5253                         tp->rcv_scale = tp->request_r_scale;
5254                 }
5255                 tp->rcv_adv += min(tp->rcv_wnd,
5256                     TCP_MAXWIN << tp->rcv_scale);
5257                 /*
5258                  * If not all the data that was sent in the TFO SYN
5259                  * has been acked, resend the remainder right away.
5260                  */
5261                 if (IS_FASTOPEN(tp->t_flags) &&
5262                     (tp->snd_una != tp->snd_max)) {
5263                         tp->snd_nxt = th->th_ack;
5264                         tfo_partial = 1;
5265                 }
5266                 /*
5267                  * If there's data, delay ACK; if there's also a FIN ACKNOW
5268                  * will be turned on later.
5269                  */
5270                 if (DELAY_ACK(tp, tlen) && tlen != 0 && (tfo_partial == 0)) {
5271                         rack_timer_cancel(tp, (struct tcp_rack *)tp->t_fb_ptr,
5272                                           ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rcvtime, __LINE__);
5273                         tp->t_flags |= TF_DELACK;
5274                 } else {
5275                         ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++;
5276                         tp->t_flags |= TF_ACKNOW;
5277                 }
5278
5279                 if ((thflags & TH_ECE) && V_tcp_do_ecn) {
5280                         tp->t_flags |= TF_ECN_PERMIT;
5281                         TCPSTAT_INC(tcps_ecn_shs);
5282                 }
5283                 if (SEQ_GT(th->th_ack, tp->snd_una)) {
5284                         /* 
5285                          * We advance snd_una for the 
5286                          * fast open case. If th_ack is
5287                          * acknowledging data beyond 
5288                          * snd_una we can't just call
5289                          * ack-processing since the 
5290                          * data stream in our send-map
5291                          * will start at snd_una + 1 (one
5292                          * beyond the SYN). If its just
5293                          * equal we don't need to do that
5294                          * and there is no send_map.
5295                          */
5296                         tp->snd_una++;
5297                 }
5298                 /*
5299                  * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
5300                  * SYN_SENT  --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
5301                  */
5302                 tp->t_starttime = ticks;
5303                 if (tp->t_flags & TF_NEEDFIN) {
5304                         tcp_state_change(tp, TCPS_FIN_WAIT_1);
5305                         tp->t_flags &= ~TF_NEEDFIN;
5306                         thflags &= ~TH_SYN;
5307                 } else {
5308                         tcp_state_change(tp, TCPS_ESTABLISHED);
5309                         TCP_PROBE5(connect__established, NULL, tp,
5310                             mtod(m, const char *), tp, th);
5311                         cc_conn_init(tp);
5312                 }
5313         } else {
5314                 /*
5315                  * Received initial SYN in SYN-SENT[*] state => simultaneous
5316                  * open.  If segment contains CC option and there is a
5317                  * cached CC, apply TAO test. If it succeeds, connection is *
5318                  * half-synchronized. Otherwise, do 3-way handshake:
5319                  * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
5320                  * there was no CC option, clear cached CC value.
5321                  */
5322                 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
5323                 tcp_state_change(tp, TCPS_SYN_RECEIVED);
5324         }
5325         KASSERT(*ti_locked == TI_RLOCKED, ("%s: trimthenstep6: "
5326             "ti_locked %d", __func__, *ti_locked));
5327         INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5328         INP_WLOCK_ASSERT(tp->t_inpcb);
5329         /*
5330          * Advance th->th_seq to correspond to first data byte. If data,
5331          * trim to stay within window, dropping FIN if necessary.
5332          */
5333         th->th_seq++;
5334         if (tlen > tp->rcv_wnd) {
5335                 todrop = tlen - tp->rcv_wnd;
5336                 m_adj(m, -todrop);
5337                 tlen = tp->rcv_wnd;
5338                 thflags &= ~TH_FIN;
5339                 TCPSTAT_INC(tcps_rcvpackafterwin);
5340                 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
5341         }
5342         tp->snd_wl1 = th->th_seq - 1;
5343         tp->rcv_up = th->th_seq;
5344         /*
5345          * Client side of transaction: already sent SYN and data. If the
5346          * remote host used T/TCP to validate the SYN, our data will be
5347          * ACK'd; if so, enter normal data segment processing in the middle
5348          * of step 5, ack processing. Otherwise, goto step 6.
5349          */
5350         if (thflags & TH_ACK) {
5351                 if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
5352                         return (ret_val);
5353                 /* We may have changed to FIN_WAIT_1 above */
5354                 if (tp->t_state == TCPS_FIN_WAIT_1) {
5355                         /*
5356                          * In FIN_WAIT_1 STATE in addition to the processing
5357                          * for the ESTABLISHED state if our FIN is now
5358                          * acknowledged then enter FIN_WAIT_2.
5359                          */
5360                         if (ourfinisacked) {
5361                                 /*
5362                                  * If we can't receive any more data, then
5363                                  * closing user can proceed. Starting the
5364                                  * timer is contrary to the specification,
5365                                  * but if we don't get a FIN we'll hang
5366                                  * forever.
5367                                  *
5368                                  * XXXjl: we should release the tp also, and
5369                                  * use a compressed state.
5370                                  */
5371                                 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5372                                         soisdisconnected(so);
5373                                         tcp_timer_activate(tp, TT_2MSL,
5374                                             (tcp_fast_finwait2_recycle ?
5375                                             tcp_finwait2_timeout :
5376                                             TP_MAXIDLE(tp)));
5377                                 }
5378                                 tcp_state_change(tp, TCPS_FIN_WAIT_2);
5379                         }
5380                 }
5381         }
5382         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5383             ti_locked, tiwin, thflags, nxt_pkt));
5384 }
5385
5386 /*
5387  * Return value of 1, the TCB is unlocked and most
5388  * likely gone, return value of 0, the TCP is still
5389  * locked.
5390  */
5391 static int
5392 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
5393     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5394     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5395 {
5396         int32_t ret_val = 0;
5397         int32_t ourfinisacked = 0;
5398
5399         rack_calc_rwin(so, tp);
5400
5401         if ((thflags & TH_ACK) &&
5402             (SEQ_LEQ(th->th_ack, tp->snd_una) ||
5403             SEQ_GT(th->th_ack, tp->snd_max))) {
5404                 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5405                 return (1);
5406         }
5407         if (IS_FASTOPEN(tp->t_flags)) {
5408                 /*
5409                  * When a TFO connection is in SYN_RECEIVED, the
5410                  * only valid packets are the initial SYN, a
5411                  * retransmit/copy of the initial SYN (possibly with
5412                  * a subset of the original data), a valid ACK, a
5413                  * FIN, or a RST.
5414                  */
5415                 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
5416                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5417                         return (1);
5418                 } else if (thflags & TH_SYN) {
5419                         /* non-initial SYN is ignored */
5420                         struct tcp_rack *rack;
5421
5422                         rack = (struct tcp_rack *)tp->t_fb_ptr;
5423                         if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
5424                             (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
5425                             (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
5426                                 rack_do_drop(m, NULL, ti_locked);
5427                                 return (0);
5428                         }
5429                 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
5430                         rack_do_drop(m, NULL, ti_locked);
5431                         return (0);
5432                 }
5433         }
5434         if (thflags & TH_RST)
5435                 return (rack_process_rst(m, th, so, tp, ti_locked));
5436         /*
5437          * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5438          * synchronized state.
5439          */
5440         if (thflags & TH_SYN) {
5441                 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5442                 return (ret_val);
5443         }
5444         /*
5445          * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5446          * it's less than ts_recent, drop it.
5447          */
5448         if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5449             TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5450                 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5451                         return (ret_val);
5452         }
5453         /*
5454          * In the SYN-RECEIVED state, validate that the packet belongs to
5455          * this connection before trimming the data to fit the receive
5456          * window.  Check the sequence number versus IRS since we know the
5457          * sequence numbers haven't wrapped.  This is a partial fix for the
5458          * "LAND" DoS attack.
5459          */
5460         if (SEQ_LT(th->th_seq, tp->irs)) {
5461                 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5462                 return (1);
5463         }
5464         if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5465                 return (ret_val);
5466         }
5467         /*
5468          * If last ACK falls within this segment's sequence numbers, record
5469          * its timestamp. NOTE: 1) That the test incorporates suggestions
5470          * from the latest proposal of the tcplw@cray.com list (Braden
5471          * 1993/04/26). 2) That updating only on newer timestamps interferes
5472          * with our earlier PAWS tests, so this check should be solely
5473          * predicated on the sequence space of this segment. 3) That we
5474          * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5475          * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5476          * SEG.Len, This modified check allows us to overcome RFC1323's
5477          * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5478          * p.869. In such cases, we can still calculate the RTT correctly
5479          * when RCV.NXT == Last.ACK.Sent.
5480          */
5481         if ((to->to_flags & TOF_TS) != 0 &&
5482             SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5483             SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5484             ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5485                 tp->ts_recent_age = tcp_ts_getticks();
5486                 tp->ts_recent = to->to_tsval;
5487         }
5488         /*
5489          * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
5490          * is on (half-synchronized state), then queue data for later
5491          * processing; else drop segment and return.
5492          */
5493         if ((thflags & TH_ACK) == 0) {
5494                 if (IS_FASTOPEN(tp->t_flags)) {
5495                         tp->snd_wnd = tiwin;
5496                         cc_conn_init(tp);
5497                 }
5498                 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5499                     ti_locked, tiwin, thflags, nxt_pkt));
5500         }
5501         TCPSTAT_INC(tcps_connects);
5502         soisconnected(so);
5503         /* Do window scaling? */
5504         if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
5505             (TF_RCVD_SCALE | TF_REQ_SCALE)) {
5506                 tp->rcv_scale = tp->request_r_scale;
5507                 tp->snd_wnd = tiwin;
5508         }
5509         /*
5510          * Make transitions: SYN-RECEIVED  -> ESTABLISHED SYN-RECEIVED* ->
5511          * FIN-WAIT-1
5512          */
5513         tp->t_starttime = ticks;
5514         if (tp->t_flags & TF_NEEDFIN) {
5515                 tcp_state_change(tp, TCPS_FIN_WAIT_1);
5516                 tp->t_flags &= ~TF_NEEDFIN;
5517         } else {
5518                 tcp_state_change(tp, TCPS_ESTABLISHED);
5519                 TCP_PROBE5(accept__established, NULL, tp,
5520                     mtod(m, const char *), tp, th);
5521                 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
5522                         tcp_fastopen_decrement_counter(tp->t_tfo_pending);
5523                         tp->t_tfo_pending = NULL;
5524
5525                         /*
5526                          * Account for the ACK of our SYN prior to regular
5527                          * ACK processing below.
5528                          */
5529                         tp->snd_una++;
5530                 }
5531                 /*
5532                  * TFO connections call cc_conn_init() during SYN
5533                  * processing.  Calling it again here for such connections
5534                  * is not harmless as it would undo the snd_cwnd reduction
5535                  * that occurs when a TFO SYN|ACK is retransmitted.
5536                  */
5537                 if (!IS_FASTOPEN(tp->t_flags))
5538                         cc_conn_init(tp);
5539         }
5540         /*
5541          * If segment contains data or ACK, will call tcp_reass() later; if
5542          * not, do so now to pass queued data to user.
5543          */
5544         if (tlen == 0 && (thflags & TH_FIN) == 0)
5545                 (void)tcp_reass(tp, (struct tcphdr *)0, 0,
5546                     (struct mbuf *)0);
5547         tp->snd_wl1 = th->th_seq - 1;
5548         if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
5549                 return (ret_val);
5550         }
5551         if (tp->t_state == TCPS_FIN_WAIT_1) {
5552                 /* We could have went to FIN_WAIT_1 (or EST) above */
5553                 /*
5554                  * In FIN_WAIT_1 STATE in addition to the processing for the
5555                  * ESTABLISHED state if our FIN is now acknowledged then
5556                  * enter FIN_WAIT_2.
5557                  */
5558                 if (ourfinisacked) {
5559                         /*
5560                          * If we can't receive any more data, then closing
5561                          * user can proceed. Starting the timer is contrary
5562                          * to the specification, but if we don't get a FIN
5563                          * we'll hang forever.
5564                          *
5565                          * XXXjl: we should release the tp also, and use a
5566                          * compressed state.
5567                          */
5568                         if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5569                                 soisdisconnected(so);
5570                                 tcp_timer_activate(tp, TT_2MSL,
5571                                     (tcp_fast_finwait2_recycle ?
5572                                     tcp_finwait2_timeout :
5573                                     TP_MAXIDLE(tp)));
5574                         }
5575                         tcp_state_change(tp, TCPS_FIN_WAIT_2);
5576                 }
5577         }
5578         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5579             ti_locked, tiwin, thflags, nxt_pkt));
5580 }
5581
5582 /*
5583  * Return value of 1, the TCB is unlocked and most
5584  * likely gone, return value of 0, the TCP is still
5585  * locked.
5586  */
5587 static int
5588 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
5589     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5590     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5591 {
5592         int32_t ret_val = 0;
5593
5594         /*
5595          * Header prediction: check for the two common cases of a
5596          * uni-directional data xfer.  If the packet has no control flags,
5597          * is in-sequence, the window didn't change and we're not
5598          * retransmitting, it's a candidate.  If the length is zero and the
5599          * ack moved forward, we're the sender side of the xfer.  Just free
5600          * the data acked & wake any higher level process that was blocked
5601          * waiting for space.  If the length is non-zero and the ack didn't
5602          * move, we're the receiver side.  If we're getting packets in-order
5603          * (the reassembly queue is empty), add the data toc The socket
5604          * buffer and note that we need a delayed ack. Make sure that the
5605          * hidden state-flags are also off. Since we check for
5606          * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
5607          */
5608         if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
5609             __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) &&
5610             __predict_true(LIST_EMPTY(&tp->t_segq)) &&
5611             __predict_true(th->th_seq == tp->rcv_nxt)) {
5612                 struct tcp_rack *rack;
5613
5614                 rack = (struct tcp_rack *)tp->t_fb_ptr;
5615                 if (tlen == 0) {
5616                         if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
5617                             ti_locked, tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
5618                                 return (0);
5619                         }
5620                 } else {
5621                         if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
5622                             ti_locked, tiwin, nxt_pkt)) {
5623                                 return (0);
5624                         }
5625                 }
5626         }
5627         rack_calc_rwin(so, tp);
5628
5629         if (thflags & TH_RST)
5630                 return (rack_process_rst(m, th, so, tp, ti_locked));
5631
5632         /*
5633          * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5634          * synchronized state.
5635          */
5636         if (thflags & TH_SYN) {
5637                 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5638                 return (ret_val);
5639         }
5640         /*
5641          * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5642          * it's less than ts_recent, drop it.
5643          */
5644         if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5645             TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5646                 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5647                         return (ret_val);
5648         }
5649         if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5650                 return (ret_val);
5651         }
5652         /*
5653          * If last ACK falls within this segment's sequence numbers, record
5654          * its timestamp. NOTE: 1) That the test incorporates suggestions
5655          * from the latest proposal of the tcplw@cray.com list (Braden
5656          * 1993/04/26). 2) That updating only on newer timestamps interferes
5657          * with our earlier PAWS tests, so this check should be solely
5658          * predicated on the sequence space of this segment. 3) That we
5659          * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5660          * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5661          * SEG.Len, This modified check allows us to overcome RFC1323's
5662          * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5663          * p.869. In such cases, we can still calculate the RTT correctly
5664          * when RCV.NXT == Last.ACK.Sent.
5665          */
5666         if ((to->to_flags & TOF_TS) != 0 &&
5667             SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5668             SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5669             ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5670                 tp->ts_recent_age = tcp_ts_getticks();
5671                 tp->ts_recent = to->to_tsval;
5672         }
5673         /*
5674          * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
5675          * is on (half-synchronized state), then queue data for later
5676          * processing; else drop segment and return.
5677          */
5678         if ((thflags & TH_ACK) == 0) {
5679                 if (tp->t_flags & TF_NEEDSYN) {
5680
5681                         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5682                             ti_locked, tiwin, thflags, nxt_pkt));
5683
5684                 } else if (tp->t_flags & TF_ACKNOW) {
5685                         rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
5686                         return (ret_val);
5687                 } else {
5688                         rack_do_drop(m, NULL, ti_locked);
5689                         return (0);
5690                 }
5691         }
5692         /*
5693          * Ack processing.
5694          */
5695         if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, NULL, thflags, &ret_val)) {
5696                 return (ret_val);
5697         }
5698         if (sbavail(&so->so_snd)) {
5699                 if (rack_progress_timeout_check(tp)) {
5700                         tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5701                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5702                         return (1);
5703                 }
5704         }
5705         /* State changes only happen in rack_process_data() */
5706         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5707             ti_locked, tiwin, thflags, nxt_pkt));
5708 }
5709
5710 /*
5711  * Return value of 1, the TCB is unlocked and most
5712  * likely gone, return value of 0, the TCP is still
5713  * locked.
5714  */
5715 static int
5716 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
5717     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5718     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5719 {
5720         int32_t ret_val = 0;
5721
5722         rack_calc_rwin(so, tp);
5723         if (thflags & TH_RST)
5724                 return (rack_process_rst(m, th, so, tp, ti_locked));
5725         /*
5726          * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5727          * synchronized state.
5728          */
5729         if (thflags & TH_SYN) {
5730                 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5731                 return (ret_val);
5732         }
5733         /*
5734          * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5735          * it's less than ts_recent, drop it.
5736          */
5737         if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5738             TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5739                 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5740                         return (ret_val);
5741         }
5742         if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5743                 return (ret_val);
5744         }
5745         /*
5746          * If last ACK falls within this segment's sequence numbers, record
5747          * its timestamp. NOTE: 1) That the test incorporates suggestions
5748          * from the latest proposal of the tcplw@cray.com list (Braden
5749          * 1993/04/26). 2) That updating only on newer timestamps interferes
5750          * with our earlier PAWS tests, so this check should be solely
5751          * predicated on the sequence space of this segment. 3) That we
5752          * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5753          * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5754          * SEG.Len, This modified check allows us to overcome RFC1323's
5755          * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5756          * p.869. In such cases, we can still calculate the RTT correctly
5757          * when RCV.NXT == Last.ACK.Sent.
5758          */
5759         if ((to->to_flags & TOF_TS) != 0 &&
5760             SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5761             SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5762             ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5763                 tp->ts_recent_age = tcp_ts_getticks();
5764                 tp->ts_recent = to->to_tsval;
5765         }
5766         /*
5767          * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
5768          * is on (half-synchronized state), then queue data for later
5769          * processing; else drop segment and return.
5770          */
5771         if ((thflags & TH_ACK) == 0) {
5772                 if (tp->t_flags & TF_NEEDSYN) {
5773                         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5774                             ti_locked, tiwin, thflags, nxt_pkt));
5775
5776                 } else if (tp->t_flags & TF_ACKNOW) {
5777                         rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
5778                         return (ret_val);
5779                 } else {
5780                         rack_do_drop(m, NULL, ti_locked);
5781                         return (0);
5782                 }
5783         }
5784         /*
5785          * Ack processing.
5786          */
5787         if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, NULL, thflags, &ret_val)) {
5788                 return (ret_val);
5789         }
5790         if (sbavail(&so->so_snd)) {
5791                 if (rack_progress_timeout_check(tp)) {
5792                         tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5793                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5794                         return (1);
5795                 }
5796         }
5797         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5798             ti_locked, tiwin, thflags, nxt_pkt));
5799 }
5800
5801 static int
5802 rack_check_data_after_close(struct mbuf *m, 
5803     struct tcpcb *tp, int32_t *ti_locked, int32_t *tlen, struct tcphdr *th, struct socket *so)
5804 {
5805         struct tcp_rack *rack; 
5806
5807         KASSERT(*ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && "
5808                                            "CLOSE_WAIT && tlen ti_locked %d", __func__, *ti_locked));
5809         INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
5810         rack = (struct tcp_rack *)tp->t_fb_ptr;
5811         if (rack->rc_allow_data_af_clo == 0) {
5812         close_now:
5813                 tp = tcp_close(tp);
5814                 TCPSTAT_INC(tcps_rcvafterclose);
5815                 rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_UNLIMITED, (*tlen));
5816                 return (1);
5817         }
5818         if (sbavail(&so->so_snd) == 0)
5819                 goto close_now;
5820         /* Ok we allow data that is ignored and a followup reset */
5821         tp->rcv_nxt = th->th_seq + *tlen;
5822         tp->t_flags2 |= TF2_DROP_AF_DATA;
5823         rack->r_wanted_output = 1;
5824         *tlen = 0;
5825         return (0);
5826 }
5827
5828 /*
5829  * Return value of 1, the TCB is unlocked and most
5830  * likely gone, return value of 0, the TCP is still
5831  * locked.
5832  */
5833 static int
5834 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
5835     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5836     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5837 {
5838         int32_t ret_val = 0;
5839         int32_t ourfinisacked = 0;
5840
5841         rack_calc_rwin(so, tp);
5842
5843         if (thflags & TH_RST)
5844                 return (rack_process_rst(m, th, so, tp, ti_locked));
5845         /*
5846          * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5847          * synchronized state.
5848          */
5849         if (thflags & TH_SYN) {
5850                 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5851                 return (ret_val);
5852         }
5853         /*
5854          * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5855          * it's less than ts_recent, drop it.
5856          */
5857         if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5858             TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5859                 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5860                         return (ret_val);
5861         }
5862         if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5863                 return (ret_val);
5864         }
5865         /*
5866          * If new data are received on a connection after the user processes
5867          * are gone, then RST the other end.
5868          */
5869         if ((so->so_state & SS_NOFDREF) && tlen) {
5870                 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
5871                         return (1);
5872         }
5873         /*
5874          * If last ACK falls within this segment's sequence numbers, record
5875          * its timestamp. NOTE: 1) That the test incorporates suggestions
5876          * from the latest proposal of the tcplw@cray.com list (Braden
5877          * 1993/04/26). 2) That updating only on newer timestamps interferes
5878          * with our earlier PAWS tests, so this check should be solely
5879          * predicated on the sequence space of this segment. 3) That we
5880          * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
5881          * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
5882          * SEG.Len, This modified check allows us to overcome RFC1323's
5883          * limitations as described in Stevens TCP/IP Illustrated Vol. 2
5884          * p.869. In such cases, we can still calculate the RTT correctly
5885          * when RCV.NXT == Last.ACK.Sent.
5886          */
5887         if ((to->to_flags & TOF_TS) != 0 &&
5888             SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
5889             SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
5890             ((thflags & (TH_SYN | TH_FIN)) != 0))) {
5891                 tp->ts_recent_age = tcp_ts_getticks();
5892                 tp->ts_recent = to->to_tsval;
5893         }
5894         /*
5895          * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
5896          * is on (half-synchronized state), then queue data for later
5897          * processing; else drop segment and return.
5898          */
5899         if ((thflags & TH_ACK) == 0) {
5900                 if (tp->t_flags & TF_NEEDSYN) {
5901                         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5902                             ti_locked, tiwin, thflags, nxt_pkt));
5903                 } else if (tp->t_flags & TF_ACKNOW) {
5904                         rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
5905                         return (ret_val);
5906                 } else {
5907                         rack_do_drop(m, NULL, ti_locked);
5908                         return (0);
5909                 }
5910         }
5911         /*
5912          * Ack processing.
5913          */
5914         if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
5915                 return (ret_val);
5916         }
5917         if (ourfinisacked) {
5918                 /*
5919                  * If we can't receive any more data, then closing user can
5920                  * proceed. Starting the timer is contrary to the
5921                  * specification, but if we don't get a FIN we'll hang
5922                  * forever.
5923                  *
5924                  * XXXjl: we should release the tp also, and use a
5925                  * compressed state.
5926                  */
5927                 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5928                         soisdisconnected(so);
5929                         tcp_timer_activate(tp, TT_2MSL,
5930                             (tcp_fast_finwait2_recycle ?
5931                             tcp_finwait2_timeout :
5932                             TP_MAXIDLE(tp)));
5933                 }
5934                 tcp_state_change(tp, TCPS_FIN_WAIT_2);
5935         }
5936         if (sbavail(&so->so_snd)) {
5937                 if (rack_progress_timeout_check(tp)) {
5938                         tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
5939                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
5940                         return (1);
5941                 }
5942         }
5943         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
5944             ti_locked, tiwin, thflags, nxt_pkt));
5945 }
5946
5947 /*
5948  * Return value of 1, the TCB is unlocked and most
5949  * likely gone, return value of 0, the TCP is still
5950  * locked.
5951  */
5952 static int
5953 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
5954     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
5955     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
5956 {
5957         int32_t ret_val = 0;
5958         int32_t ourfinisacked = 0;
5959
5960         rack_calc_rwin(so, tp);
5961
5962         if (thflags & TH_RST)
5963                 return (rack_process_rst(m, th, so, tp, ti_locked));
5964         /*
5965          * RFC5961 Section 4.2 Send challenge ACK for any SYN in
5966          * synchronized state.
5967          */
5968         if (thflags & TH_SYN) {
5969                 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
5970                 return (ret_val);
5971         }
5972         /*
5973          * RFC 1323 PAWS: If we have a timestamp reply on this segment and
5974          * it's less than ts_recent, drop it.
5975          */
5976         if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
5977             TSTMP_LT(to->to_tsval, tp->ts_recent)) {
5978                 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
5979                         return (ret_val);
5980         }
5981         if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
5982                 return (ret_val);
5983         }
5984         /*
5985          * If new data are received on a connection after the user processes
5986          * are gone, then RST the other end.
5987          */
5988         if ((so->so_state & SS_NOFDREF) && tlen) {
5989                 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
5990                         return (1);
5991         }
5992         /*
5993          * If last ACK falls within this segment's sequence numbers, record
5994          * its timestamp. NOTE: 1) That the test incorporates suggestions
5995          * from the latest proposal of the tcplw@cray.com list (Braden
5996          * 1993/04/26). 2) That updating only on newer timestamps interferes
5997          * with our earlier PAWS tests, so this check should be solely
5998          * predicated on the sequence space of this segment. 3) That we
5999          * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6000          * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6001          * SEG.Len, This modified check allows us to overcome RFC1323's
6002          * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6003          * p.869. In such cases, we can still calculate the RTT correctly
6004          * when RCV.NXT == Last.ACK.Sent.
6005          */
6006         if ((to->to_flags & TOF_TS) != 0 &&
6007             SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6008             SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6009             ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6010                 tp->ts_recent_age = tcp_ts_getticks();
6011                 tp->ts_recent = to->to_tsval;
6012         }
6013         /*
6014          * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
6015          * is on (half-synchronized state), then queue data for later
6016          * processing; else drop segment and return.
6017          */
6018         if ((thflags & TH_ACK) == 0) {
6019                 if (tp->t_flags & TF_NEEDSYN) {
6020                         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6021                             ti_locked, tiwin, thflags, nxt_pkt));
6022                 } else if (tp->t_flags & TF_ACKNOW) {
6023                         rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
6024                         return (ret_val);
6025                 } else {
6026                         rack_do_drop(m, NULL, ti_locked);
6027                         return (0);
6028                 }
6029         }
6030         /*
6031          * Ack processing.
6032          */
6033         if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6034                 return (ret_val);
6035         }
6036         if (ourfinisacked) {
6037                 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6038                 tcp_twstart(tp);
6039                 INP_INFO_RUNLOCK(&V_tcbinfo);
6040                 *ti_locked = TI_UNLOCKED;
6041                 m_freem(m);
6042                 return (1);
6043         }
6044         if (sbavail(&so->so_snd)) {
6045                 if (rack_progress_timeout_check(tp)) {
6046                         tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6047                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
6048                         return (1);
6049                 }
6050         }
6051         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6052             ti_locked, tiwin, thflags, nxt_pkt));
6053 }
6054
6055 /*
6056  * Return value of 1, the TCB is unlocked and most
6057  * likely gone, return value of 0, the TCP is still
6058  * locked.
6059  */
6060 static int
6061 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
6062     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6063     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6064 {
6065         int32_t ret_val = 0;
6066         int32_t ourfinisacked = 0;
6067
6068         rack_calc_rwin(so, tp);
6069
6070         if (thflags & TH_RST)
6071                 return (rack_process_rst(m, th, so, tp, ti_locked));
6072         /*
6073          * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6074          * synchronized state.
6075          */
6076         if (thflags & TH_SYN) {
6077                 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
6078                 return (ret_val);
6079         }
6080         /*
6081          * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6082          * it's less than ts_recent, drop it.
6083          */
6084         if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6085             TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6086                 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
6087                         return (ret_val);
6088         }
6089         if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
6090                 return (ret_val);
6091         }
6092         /*
6093          * If new data are received on a connection after the user processes
6094          * are gone, then RST the other end.
6095          */
6096         if ((so->so_state & SS_NOFDREF) && tlen) {
6097                 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
6098                         return (1);
6099         }
6100         /*
6101          * If last ACK falls within this segment's sequence numbers, record
6102          * its timestamp. NOTE: 1) That the test incorporates suggestions
6103          * from the latest proposal of the tcplw@cray.com list (Braden
6104          * 1993/04/26). 2) That updating only on newer timestamps interferes
6105          * with our earlier PAWS tests, so this check should be solely
6106          * predicated on the sequence space of this segment. 3) That we
6107          * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6108          * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6109          * SEG.Len, This modified check allows us to overcome RFC1323's
6110          * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6111          * p.869. In such cases, we can still calculate the RTT correctly
6112          * when RCV.NXT == Last.ACK.Sent.
6113          */
6114         if ((to->to_flags & TOF_TS) != 0 &&
6115             SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6116             SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6117             ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6118                 tp->ts_recent_age = tcp_ts_getticks();
6119                 tp->ts_recent = to->to_tsval;
6120         }
6121         /*
6122          * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
6123          * is on (half-synchronized state), then queue data for later
6124          * processing; else drop segment and return.
6125          */
6126         if ((thflags & TH_ACK) == 0) {
6127                 if (tp->t_flags & TF_NEEDSYN) {
6128                         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6129                             ti_locked, tiwin, thflags, nxt_pkt));
6130                 } else if (tp->t_flags & TF_ACKNOW) {
6131                         rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
6132                         return (ret_val);
6133                 } else {
6134                         rack_do_drop(m, NULL, ti_locked);
6135                         return (0);
6136                 }
6137         }
6138         /*
6139          * case TCPS_LAST_ACK: Ack processing.
6140          */
6141         if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6142                 return (ret_val);
6143         }
6144         if (ourfinisacked) {
6145                 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6146                 tp = tcp_close(tp);
6147                 rack_do_drop(m, tp, ti_locked);
6148                 return (1);
6149         }
6150         if (sbavail(&so->so_snd)) {
6151                 if (rack_progress_timeout_check(tp)) {
6152                         tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6153                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
6154                         return (1);
6155                 }
6156         }
6157         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6158             ti_locked, tiwin, thflags, nxt_pkt));
6159 }
6160
6161
6162 /*
6163  * Return value of 1, the TCB is unlocked and most
6164  * likely gone, return value of 0, the TCP is still
6165  * locked.
6166  */
6167 static int
6168 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
6169     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
6170     int32_t * ti_locked, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
6171 {
6172         int32_t ret_val = 0;
6173         int32_t ourfinisacked = 0;
6174
6175         rack_calc_rwin(so, tp);
6176
6177         /* Reset receive buffer auto scaling when not in bulk receive mode. */
6178         if (thflags & TH_RST)
6179                 return (rack_process_rst(m, th, so, tp, ti_locked));
6180         /*
6181          * RFC5961 Section 4.2 Send challenge ACK for any SYN in
6182          * synchronized state.
6183          */
6184         if (thflags & TH_SYN) {
6185                 rack_challenge_ack(m, th, tp, ti_locked, &ret_val);
6186                 return (ret_val);
6187         }
6188         /*
6189          * RFC 1323 PAWS: If we have a timestamp reply on this segment and
6190          * it's less than ts_recent, drop it.
6191          */
6192         if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
6193             TSTMP_LT(to->to_tsval, tp->ts_recent)) {
6194                 if (rack_ts_check(m, th, tp, ti_locked, tlen, thflags, &ret_val))
6195                         return (ret_val);
6196         }
6197         if (rack_drop_checks(to, m, th, tp, &tlen, ti_locked, &thflags, &drop_hdrlen, &ret_val)) {
6198                 return (ret_val);
6199         }
6200         /*
6201          * If new data are received on a connection after the user processes
6202          * are gone, then RST the other end.
6203          */
6204         if ((so->so_state & SS_NOFDREF) &&
6205             tlen) {
6206                 if (rack_check_data_after_close(m, tp, ti_locked, &tlen, th, so))
6207                         return (1);
6208         }
6209         /*
6210          * If last ACK falls within this segment's sequence numbers, record
6211          * its timestamp. NOTE: 1) That the test incorporates suggestions
6212          * from the latest proposal of the tcplw@cray.com list (Braden
6213          * 1993/04/26). 2) That updating only on newer timestamps interferes
6214          * with our earlier PAWS tests, so this check should be solely
6215          * predicated on the sequence space of this segment. 3) That we
6216          * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
6217          * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
6218          * SEG.Len, This modified check allows us to overcome RFC1323's
6219          * limitations as described in Stevens TCP/IP Illustrated Vol. 2
6220          * p.869. In such cases, we can still calculate the RTT correctly
6221          * when RCV.NXT == Last.ACK.Sent.
6222          */
6223         if ((to->to_flags & TOF_TS) != 0 &&
6224             SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
6225             SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
6226             ((thflags & (TH_SYN | TH_FIN)) != 0))) {
6227                 tp->ts_recent_age = tcp_ts_getticks();
6228                 tp->ts_recent = to->to_tsval;
6229         }
6230         /*
6231          * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
6232          * is on (half-synchronized state), then queue data for later
6233          * processing; else drop segment and return.
6234          */
6235         if ((thflags & TH_ACK) == 0) {
6236                 if (tp->t_flags & TF_NEEDSYN) {
6237                         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6238                             ti_locked, tiwin, thflags, nxt_pkt));
6239                 } else if (tp->t_flags & TF_ACKNOW) {
6240                         rack_do_dropafterack(m, tp, th, ti_locked, thflags, tlen, &ret_val);
6241                         return (ret_val);
6242                 } else {
6243                         rack_do_drop(m, NULL, ti_locked);
6244                         return (0);
6245                 }
6246         }
6247         /*
6248          * Ack processing.
6249          */
6250         if (rack_process_ack(m, th, so, tp, to, ti_locked, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
6251                 return (ret_val);
6252         }
6253         if (sbavail(&so->so_snd)) {
6254                 if (rack_progress_timeout_check(tp)) {
6255                         tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
6256                         rack_do_dropwithreset(m, tp, th, ti_locked, BANDLIM_RST_OPENPORT, tlen);
6257                         return (1);
6258                 }
6259         }
6260         return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
6261             ti_locked, tiwin, thflags, nxt_pkt));
6262 }
6263
6264
6265 static void inline
6266 rack_clear_rate_sample(struct tcp_rack *rack)
6267 {
6268         rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
6269         rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
6270         rack->r_ctl.rack_rs.rs_rtt_tot = 0;
6271 }
6272
6273 static int
6274 rack_init(struct tcpcb *tp)
6275 {
6276         struct tcp_rack *rack = NULL;
6277
6278         tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
6279         if (tp->t_fb_ptr == NULL) {
6280                 /*
6281                  * We need to allocate memory but cant. The INP and INP_INFO
6282                  * locks and they are recusive (happens during setup. So a
6283                  * scheme to drop the locks fails :(
6284                  *
6285                  */
6286                 return (ENOMEM);
6287         }
6288         memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack));
6289
6290         rack = (struct tcp_rack *)tp->t_fb_ptr;
6291         TAILQ_INIT(&rack->r_ctl.rc_map);
6292         TAILQ_INIT(&rack->r_ctl.rc_free);
6293         TAILQ_INIT(&rack->r_ctl.rc_tmap);
6294         rack->rc_tp = tp;
6295         if (tp->t_inpcb) {
6296                 rack->rc_inp = tp->t_inpcb;
6297         }
6298         /* Probably not needed but lets be sure */
6299         rack_clear_rate_sample(rack);
6300         rack->r_cpu = 0;
6301         rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
6302         rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
6303         rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
6304         rack->rc_pace_reduce = rack_slot_reduction;
6305         if (V_tcp_delack_enabled)
6306                 tp->t_delayed_ack = 1;
6307         else
6308                 tp->t_delayed_ack = 0;
6309         rack->rc_pace_max_segs = rack_hptsi_segments;
6310         rack->r_ctl.rc_early_recovery_segs = rack_early_recovery_max_seg;
6311         rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
6312         rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
6313         rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce;
6314         rack->r_idle_reduce_largest  = rack_reduce_largest_on_idle;
6315         rack->r_enforce_min_pace = rack_min_pace_time;
6316         rack->r_min_pace_seg_thresh = rack_min_pace_time_seg_req;
6317         rack->r_ctl.rc_prop_rate = rack_proportional_rate;
6318         rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
6319         rack->r_ctl.rc_early_recovery = rack_early_recovery;
6320         rack->rc_always_pace = rack_pace_every_seg;
6321         rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
6322         rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
6323         rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
6324         rack->r_ctl.rc_min_to = rack_min_to;
6325         rack->r_ctl.rc_prr_inc_var = rack_inc_var;
6326         rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6327         if (tp->snd_una != tp->snd_max) {
6328                 /* Create a send map for the current outstanding data */
6329                 struct rack_sendmap *rsm;
6330
6331                 rsm = rack_alloc(rack);
6332                 if (rsm == NULL) {
6333                         uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
6334                         tp->t_fb_ptr = NULL;
6335                         return (ENOMEM);
6336                 }
6337                 rsm->r_flags = RACK_OVERMAX;
6338                 rsm->r_tim_lastsent[0] = tcp_ts_getticks();
6339                 rsm->r_rtr_cnt = 1;
6340                 rsm->r_rtr_bytes = 0;
6341                 rsm->r_start = tp->snd_una;
6342                 rsm->r_end = tp->snd_max;
6343                 rsm->r_sndcnt = 0;
6344                 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next);
6345                 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
6346                 rsm->r_in_tmap = 1;
6347         }
6348         return (0);
6349 }
6350
6351 static int
6352 rack_handoff_ok(struct tcpcb *tp)
6353 {
6354         if ((tp->t_state == TCPS_CLOSED) ||
6355             (tp->t_state == TCPS_LISTEN)) {
6356                 /* Sure no problem though it may not stick */
6357                 return (0);
6358         }
6359         if ((tp->t_state == TCPS_SYN_SENT) ||
6360             (tp->t_state == TCPS_SYN_RECEIVED)) {
6361                 /*
6362                  * We really don't know you have to get to ESTAB or beyond
6363                  * to tell.
6364                  */
6365                 return (EAGAIN);
6366         }
6367         if (tp->t_flags & TF_SACK_PERMIT) {
6368                 return (0);
6369         }
6370         /*
6371          * If we reach here we don't do SACK on this connection so we can
6372          * never do rack.
6373          */
6374         return (EINVAL);
6375 }
6376
6377 static void
6378 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
6379 {
6380         if (tp->t_fb_ptr) {
6381                 struct tcp_rack *rack;
6382                 struct rack_sendmap *rsm;
6383
6384                 rack = (struct tcp_rack *)tp->t_fb_ptr;
6385 #ifdef TCP_BLACKBOX
6386                 tcp_log_flowend(tp);
6387 #endif
6388                 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
6389                 while (rsm) {
6390                         TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next);
6391                         uma_zfree(rack_zone, rsm);
6392                         rsm = TAILQ_FIRST(&rack->r_ctl.rc_map);
6393                 }
6394                 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
6395                 while (rsm) {
6396                         TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next);
6397                         uma_zfree(rack_zone, rsm);
6398                         rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
6399                 }
6400                 rack->rc_free_cnt = 0;
6401                 uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
6402                 tp->t_fb_ptr = NULL;
6403         }
6404 }
6405
6406 static void
6407 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
6408 {
6409         switch (tp->t_state) {
6410         case TCPS_SYN_SENT:
6411                 rack->r_state = TCPS_SYN_SENT;
6412                 rack->r_substate = rack_do_syn_sent;
6413                 break;
6414         case TCPS_SYN_RECEIVED:
6415                 rack->r_state = TCPS_SYN_RECEIVED;
6416                 rack->r_substate = rack_do_syn_recv;
6417                 break;
6418         case TCPS_ESTABLISHED:
6419                 rack->r_state = TCPS_ESTABLISHED;
6420                 rack->r_substate = rack_do_established;
6421                 break;
6422         case TCPS_CLOSE_WAIT:
6423                 rack->r_state = TCPS_CLOSE_WAIT;
6424                 rack->r_substate = rack_do_close_wait;
6425                 break;
6426         case TCPS_FIN_WAIT_1:
6427                 rack->r_state = TCPS_FIN_WAIT_1;
6428                 rack->r_substate = rack_do_fin_wait_1;
6429                 break;
6430         case TCPS_CLOSING:
6431                 rack->r_state = TCPS_CLOSING;
6432                 rack->r_substate = rack_do_closing;
6433                 break;
6434         case TCPS_LAST_ACK:
6435                 rack->r_state = TCPS_LAST_ACK;
6436                 rack->r_substate = rack_do_lastack;
6437                 break;
6438         case TCPS_FIN_WAIT_2:
6439                 rack->r_state = TCPS_FIN_WAIT_2;
6440                 rack->r_substate = rack_do_fin_wait_2;
6441                 break;
6442         case TCPS_LISTEN:
6443         case TCPS_CLOSED:
6444         case TCPS_TIME_WAIT:
6445         default:
6446 #ifdef INVARIANTS
6447                 panic("tcp tp:%p state:%d sees impossible state?", tp, tp->t_state);
6448 #endif
6449                 break;
6450         };
6451 }
6452
6453
6454 static void
6455 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
6456 {
6457         /*
6458          * We received an ack, and then did not
6459          * call send or were bounced out due to the
6460          * hpts was running. Now a timer is up as well, is
6461          * it the right timer?
6462          */
6463         struct rack_sendmap *rsm;
6464         int tmr_up;
6465         
6466         tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
6467         if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
6468                 return;
6469         rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6470         if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
6471             (tmr_up == PACE_TMR_RXT)) {
6472                 /* Should be an RXT */
6473                 return;
6474         }
6475         if (rsm == NULL) {
6476                 /* Nothing outstanding? */
6477                 if (tp->t_flags & TF_DELACK) {
6478                         if (tmr_up == PACE_TMR_DELACK)
6479                                 /* We are supposed to have delayed ack up and we do */
6480                                 return;
6481                 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
6482                         /* 
6483                          * if we hit enobufs then we would expect the possiblity
6484                          * of nothing outstanding and the RXT up (and the hptsi timer).
6485                          */
6486                         return;
6487                 } else if (((tcp_always_keepalive ||
6488                              rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
6489                             (tp->t_state <= TCPS_CLOSING)) &&
6490                            (tmr_up == PACE_TMR_KEEP) &&
6491                            (tp->snd_max == tp->snd_una)) {
6492                         /* We should have keep alive up and we do */
6493                         return;
6494                 }
6495         }
6496         if (rsm && (rsm->r_flags & RACK_SACK_PASSED)) {
6497                 if ((tp->t_flags & TF_SENTFIN) &&
6498                     ((tp->snd_max - tp->snd_una) == 1) &&
6499                     (rsm->r_flags & RACK_HAS_FIN)) {
6500                         /* needs to be a RXT */
6501                         if (tmr_up == PACE_TMR_RXT)
6502                                 return;
6503                 } else if (tmr_up == PACE_TMR_RACK)
6504                         return;
6505         } else if (SEQ_GT(tp->snd_max,tp->snd_una) &&
6506                    ((tmr_up == PACE_TMR_TLP) ||
6507                     (tmr_up == PACE_TMR_RXT))) {
6508                 /* 
6509                  * Either a TLP or RXT is fine if no sack-passed 
6510                  * is in place and data is outstanding.
6511                  */
6512                 return;
6513         } else if (tmr_up == PACE_TMR_DELACK) {
6514                 /*
6515                  * If the delayed ack was going to go off
6516                  * before the rtx/tlp/rack timer were going to
6517                  * expire, then that would be the timer in control.
6518                  * Note we don't check the time here trusting the
6519                  * code is correct.
6520                  */
6521                 return;
6522         }
6523         /* 
6524          * Ok the timer originally started is not what we want now.
6525          * We will force the hpts to be stopped if any, and restart
6526          * with the slot set to what was in the saved slot.
6527          */
6528         rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
6529         rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6530 }
6531
6532 static void
6533 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
6534     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
6535     int32_t ti_locked, int32_t nxt_pkt, struct timeval *tv)
6536 {
6537         int32_t thflags, retval, did_out = 0;
6538         int32_t way_out = 0;
6539         uint32_t cts;
6540         uint32_t tiwin;
6541         struct tcpopt to;
6542         struct tcp_rack *rack;
6543         struct rack_sendmap *rsm;
6544         int32_t prev_state = 0;
6545
6546         cts = tcp_tv_to_mssectick(tv);
6547         rack = (struct tcp_rack *)tp->t_fb_ptr;
6548
6549         kern_prefetch(rack, &prev_state);
6550         prev_state = 0;
6551         thflags = th->th_flags;
6552         /*
6553          * If this is either a state-changing packet or current state isn't
6554          * established, we require a read lock on tcbinfo.  Otherwise, we
6555          * allow the tcbinfo to be in either locked or unlocked, as the
6556          * caller may have unnecessarily acquired a lock due to a race.
6557          */
6558         if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
6559             tp->t_state != TCPS_ESTABLISHED) {
6560                 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for "
6561                     "SYN/FIN/RST/!EST", __func__, ti_locked));
6562                 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6563         } else {
6564 #ifdef INVARIANTS
6565                 if (ti_locked == TI_RLOCKED) {
6566                         INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6567                 } else {
6568                         KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST "
6569                             "ti_locked: %d", __func__, ti_locked));
6570                         INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
6571                 }
6572 #endif
6573         }
6574         INP_WLOCK_ASSERT(tp->t_inpcb);
6575         KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
6576             __func__));
6577         KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
6578             __func__));
6579         {
6580                 union tcp_log_stackspecific log;
6581
6582                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
6583                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
6584                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
6585                 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
6586                     tlen, &log, true);
6587         }
6588         /*
6589          * Segment received on connection. Reset idle time and keep-alive
6590          * timer. XXX: This should be done after segment validation to
6591          * ignore broken/spoofed segs.
6592          */
6593         if  (tp->t_idle_reduce && (tp->snd_max == tp->snd_una)) {
6594 #ifdef NETFLIX_CWV
6595                 if ((tp->cwv_enabled) &&
6596                     ((tp->cwv_cwnd_valid == 0) &&
6597                      TCPS_HAVEESTABLISHED(tp->t_state) &&
6598                      (tp->snd_cwnd > tp->snd_cwv.init_cwnd))) {
6599                         tcp_newcwv_nvp_closedown(tp);
6600                 } else 
6601 #endif
6602                        if ((ticks - tp->t_rcvtime) >= tp->t_rxtcur) {
6603                         counter_u64_add(rack_input_idle_reduces, 1);
6604                         rack_cc_after_idle(tp,
6605                             (rack->r_idle_reduce_largest ? 1 :0));
6606                 }
6607         }
6608         rack->r_ctl.rc_rcvtime = cts;
6609         tp->t_rcvtime = ticks;
6610
6611 #ifdef NETFLIX_CWV
6612         if (tp->cwv_enabled) {
6613                 if ((tp->cwv_cwnd_valid == 0) &&
6614                     TCPS_HAVEESTABLISHED(tp->t_state) &&
6615                     (tp->snd_cwnd > tp->snd_cwv.init_cwnd))
6616                         tcp_newcwv_nvp_closedown(tp);
6617         }
6618 #endif
6619         /*
6620          * Unscale the window into a 32-bit value. For the SYN_SENT state
6621          * the scale is zero.
6622          */
6623         tiwin = th->th_win << tp->snd_scale;
6624 #ifdef NETFLIX_STATS
6625         stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
6626 #endif
6627         /*
6628          * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
6629          * this to occur after we've validated the segment.
6630          */
6631         if (tp->t_flags & TF_ECN_PERMIT) {
6632                 if (thflags & TH_CWR)
6633                         tp->t_flags &= ~TF_ECN_SND_ECE;
6634                 switch (iptos & IPTOS_ECN_MASK) {
6635                 case IPTOS_ECN_CE:
6636                         tp->t_flags |= TF_ECN_SND_ECE;
6637                         TCPSTAT_INC(tcps_ecn_ce);
6638                         break;
6639                 case IPTOS_ECN_ECT0:
6640                         TCPSTAT_INC(tcps_ecn_ect0);
6641                         break;
6642                 case IPTOS_ECN_ECT1:
6643                         TCPSTAT_INC(tcps_ecn_ect1);
6644                         break;
6645                 }
6646                 /* Congestion experienced. */
6647                 if (thflags & TH_ECE) {
6648                         rack_cong_signal(tp, th, CC_ECN);
6649                 }
6650         }
6651         /*
6652          * Parse options on any incoming segment.
6653          */
6654         tcp_dooptions(&to, (u_char *)(th + 1),
6655             (th->th_off << 2) - sizeof(struct tcphdr),
6656             (thflags & TH_SYN) ? TO_SYN : 0);
6657
6658         /*
6659          * If echoed timestamp is later than the current time, fall back to
6660          * non RFC1323 RTT calculation.  Normalize timestamp if syncookies
6661          * were used when this connection was established.
6662          */
6663         if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
6664                 to.to_tsecr -= tp->ts_offset;
6665                 if (TSTMP_GT(to.to_tsecr, cts))
6666                         to.to_tsecr = 0;
6667         }
6668         /*
6669          * If its the first time in we need to take care of options and
6670          * verify we can do SACK for rack!
6671          */
6672         if (rack->r_state == 0) {
6673                 /* Should be init'd by rack_init() */
6674                 KASSERT(rack->rc_inp != NULL,
6675                     ("%s: rack->rc_inp unexpectedly NULL", __func__));
6676                 if (rack->rc_inp == NULL) {
6677                         rack->rc_inp = tp->t_inpcb;
6678                 }
6679
6680                 /*
6681                  * Process options only when we get SYN/ACK back. The SYN
6682                  * case for incoming connections is handled in tcp_syncache.
6683                  * According to RFC1323 the window field in a SYN (i.e., a
6684                  * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
6685                  * this is traditional behavior, may need to be cleaned up.
6686                  */
6687                 rack->r_cpu = inp_to_cpuid(tp->t_inpcb);
6688                 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
6689                         if ((to.to_flags & TOF_SCALE) &&
6690                             (tp->t_flags & TF_REQ_SCALE)) {
6691                                 tp->t_flags |= TF_RCVD_SCALE;
6692                                 tp->snd_scale = to.to_wscale;
6693                         }
6694                         /*
6695                          * Initial send window.  It will be updated with the
6696                          * next incoming segment to the scaled value.
6697                          */
6698                         tp->snd_wnd = th->th_win;
6699                         if (to.to_flags & TOF_TS) {
6700                                 tp->t_flags |= TF_RCVD_TSTMP;
6701                                 tp->ts_recent = to.to_tsval;
6702                                 tp->ts_recent_age = cts;
6703                         }
6704                         if (to.to_flags & TOF_MSS)
6705                                 tcp_mss(tp, to.to_mss);
6706                         if ((tp->t_flags & TF_SACK_PERMIT) &&
6707                             (to.to_flags & TOF_SACKPERM) == 0)
6708                                 tp->t_flags &= ~TF_SACK_PERMIT;
6709                 }
6710                 /*
6711                  * At this point we are at the initial call. Here we decide
6712                  * if we are doing RACK or not. We do this by seeing if
6713                  * TF_SACK_PERMIT is set, if not rack is *not* possible and
6714                  * we switch to the default code.
6715                  */
6716                 if ((tp->t_flags & TF_SACK_PERMIT) == 0) {
6717                         tcp_switch_back_to_default(tp);
6718                         (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen,
6719                             tlen, iptos, ti_locked);
6720                         return;
6721                 }
6722                 /* Set the flag */
6723                 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
6724                 tcp_set_hpts(tp->t_inpcb);
6725                 rack_stop_all_timers(tp);
6726                 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
6727         }
6728         /*
6729          * This is the one exception case where we set the rack state
6730          * always. All other times (timers etc) we must have a rack-state
6731          * set (so we assure we have done the checks above for SACK).
6732          */
6733         if (rack->r_state != tp->t_state)
6734                 rack_set_state(tp, rack);
6735         if (SEQ_GT(th->th_ack, tp->snd_una) && (rsm = TAILQ_FIRST(&rack->r_ctl.rc_map)) != NULL)
6736                 kern_prefetch(rsm, &prev_state);
6737         prev_state = rack->r_state;
6738         rack->r_ctl.rc_tlp_send_cnt = 0;
6739         rack_clear_rate_sample(rack);
6740         retval = (*rack->r_substate) (m, th, so,
6741             tp, &to, drop_hdrlen,
6742             tlen, &ti_locked, tiwin, thflags, nxt_pkt);
6743 #ifdef INVARIANTS
6744         if ((retval == 0) &&
6745             (tp->t_inpcb == NULL)) {
6746                 panic("retval:%d tp:%p t_inpcb:NULL state:%d",
6747                     retval, tp, prev_state);
6748         }
6749 #endif
6750         if (ti_locked != TI_UNLOCKED) {
6751                 INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
6752                 INP_INFO_RUNLOCK(&V_tcbinfo);
6753                 ti_locked = TI_UNLOCKED;
6754         }
6755         if (retval == 0) {
6756                 /*
6757                  * If retval is 1 the tcb is unlocked and most likely the tp
6758                  * is gone.
6759                  */
6760                 INP_WLOCK_ASSERT(tp->t_inpcb);
6761                 tcp_rack_xmit_timer_commit(rack, tp);
6762                 if (((tp->snd_max - tp->snd_una) > tp->snd_wnd) &&
6763                     (rack->rc_in_persist == 0)){
6764                         /* 
6765                          * The peer shrunk its window on us to the point
6766                          * where we have sent too much. The only thing
6767                          * we can do here is stop any timers and
6768                          * enter persist. We most likely lost the last
6769                          * bytes we sent but oh well, we will have to
6770                          * retransmit them after the peer is caught up.
6771                          */
6772                         if (rack->rc_inp->inp_in_hpts)
6773                                 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
6774                         rack_timer_cancel(tp, rack, cts, __LINE__);
6775                         rack_enter_persist(tp, rack, cts);
6776                         rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6777                         way_out = 3;
6778                         goto done_with_input;
6779                 }
6780                 if (nxt_pkt == 0) {
6781                         if (rack->r_wanted_output != 0) {
6782                                 did_out = 1;
6783                                 (void)tp->t_fb->tfb_tcp_output(tp);
6784                         }
6785                         rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0);
6786                 }
6787                 if (((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
6788                     (SEQ_GT(tp->snd_max, tp->snd_una) ||
6789                      (tp->t_flags & TF_DELACK) ||
6790                      ((tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
6791                       (tp->t_state <= TCPS_CLOSING)))) {
6792                         /* We could not send (probably in the hpts but stopped the timer earlier)? */
6793                         if ((tp->snd_max == tp->snd_una) &&
6794                             ((tp->t_flags & TF_DELACK) == 0) &&
6795                             (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
6796                                 /* keep alive not needed if we are hptsi output yet */
6797                                 ;
6798                         } else {
6799                                 if (rack->rc_inp->inp_in_hpts)
6800                                         tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT);
6801                                 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0);
6802                         }
6803                         way_out = 1;
6804                 } else {
6805                         /* Do we have the correct timer running? */
6806                         rack_timer_audit(tp, rack, &so->so_snd);
6807                         way_out = 2;
6808                 }
6809         done_with_input:
6810                 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out);
6811                 if (did_out)
6812                         rack->r_wanted_output = 0;
6813 #ifdef INVARIANTS
6814                 if (tp->t_inpcb == NULL) {
6815                         panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
6816                               did_out,
6817                               retval, tp, prev_state);
6818                 }
6819 #endif
6820                 INP_WUNLOCK(tp->t_inpcb);
6821         }
6822 }
6823
6824 void
6825 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
6826     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
6827     int32_t ti_locked)
6828 {
6829         struct timeval tv;
6830 #ifdef RSS
6831         struct tcp_function_block *tfb;
6832         struct tcp_rack *rack;
6833         struct inpcb *inp;
6834
6835         rack = (struct tcp_rack *)tp->t_fb_ptr;
6836         if (rack->r_state == 0) {
6837                 /*
6838                  * Initial input (ACK to SYN-ACK etc)lets go ahead and get
6839                  * it processed
6840                  */
6841                 if (ti_locked != TI_RLOCKED && INP_INFO_TRY_RLOCK(&V_tcbinfo))
6842                         ti_locked = TI_RLOCKED;
6843                 if (ti_locked != TI_RLOCKED) {
6844                         inp = tp->t_inpcb;
6845                         tfb = tp->t_fb;
6846                         in_pcbref(inp);
6847                         INP_WUNLOCK(inp);
6848                         INP_INFO_RLOCK(&V_tcbinfo);
6849                         ti_locked = TI_RLOCKED;
6850                         INP_WLOCK(inp);
6851                         if (in_pcbrele_wlocked(inp))
6852                                 inp = NULL;
6853                         if (inp == NULL || (inp->inp_flags2 & INP_FREED) ||
6854                             (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED))) {
6855                                 /* The TCPCB went away. Free the packet. */
6856                                 INP_INFO_RUNLOCK(&V_tcbinfo);
6857                                 if (inp)
6858                                         INP_WUNLOCK(inp);
6859                                 m_freem(m);
6860                                 return;
6861                         }
6862                         /* If the stack changed, call the correct stack. */
6863                         if (tp->t_fb != tfb) {
6864                                 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp,
6865                                     drop_hdrlen, tlen, iptos, ti_locked);
6866                                 return;
6867                         }
6868                 }
6869                 tcp_get_usecs(&tv);
6870                 rack_hpts_do_segment(m, th, so, tp, drop_hdrlen,
6871                     tlen, iptos, ti_locked, 0, &tv);
6872                 return;
6873         }
6874         if (ti_locked == TI_RLOCKED)
6875                 INP_INFO_RUNLOCK(&V_tcbinfo);
6876         tcp_queue_to_input(tp, m, th, tlen, drop_hdrlen, iptos, (uint8_t) ti_locked);
6877         INP_WUNLOCK(tp->t_inpcb);
6878 #else
6879         tcp_get_usecs(&tv);
6880         rack_hpts_do_segment(m, th, so, tp, drop_hdrlen,
6881             tlen, iptos, ti_locked, 0, &tv);
6882 #endif
6883 }
6884
6885 struct rack_sendmap *
6886 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
6887 {
6888         struct rack_sendmap *rsm = NULL;
6889         int32_t idx;
6890         uint32_t srtt_cur, srtt = 0, thresh = 0, ts_low = 0;
6891
6892         /* Return the next guy to be re-transmitted */
6893         if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) {
6894                 return (NULL);
6895         }
6896         if (tp->t_flags & TF_SENTFIN) {
6897                 /* retran the end FIN? */
6898                 return (NULL);
6899         }
6900         /* ok lets look at this one */
6901         rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6902         if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
6903                 goto check_it;
6904         }
6905         rsm = rack_find_lowest_rsm(rack);
6906         if (rsm == NULL) {
6907                 return (NULL);
6908         }
6909 check_it:
6910         srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT;
6911         srtt = TICKS_2_MSEC(srtt_cur);
6912         if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt))
6913                 srtt = rack->rc_rack_rtt;
6914         if (rsm->r_flags & RACK_ACKED) {
6915                 return (NULL);
6916         }
6917         if ((rsm->r_flags & RACK_SACK_PASSED) == 0) {
6918                 /* Its not yet ready */
6919                 return (NULL);
6920         }
6921         idx = rsm->r_rtr_cnt - 1;
6922         ts_low = rsm->r_tim_lastsent[idx];
6923         thresh = rack_calc_thresh_rack(rack, srtt, tsused);
6924         if (tsused <= ts_low) {
6925                 return (NULL);
6926         }
6927         if ((tsused - ts_low) >= thresh) {
6928                 return (rsm);
6929         }
6930         return (NULL);
6931 }
6932
6933 static int
6934 rack_output(struct tcpcb *tp)
6935 {
6936         struct socket *so;
6937         uint32_t recwin, sendwin;
6938         uint32_t sb_offset;
6939         int32_t len, flags, error = 0;
6940         struct mbuf *m;
6941         struct mbuf *mb;
6942         uint32_t if_hw_tsomaxsegcount = 0;
6943         uint32_t if_hw_tsomaxsegsize;
6944         long tot_len_this_send = 0;
6945         struct ip *ip = NULL;
6946 #ifdef TCPDEBUG
6947         struct ipovly *ipov = NULL;
6948 #endif
6949         struct udphdr *udp = NULL;
6950         struct tcp_rack *rack;
6951         struct tcphdr *th;
6952         uint8_t pass = 0;
6953         uint8_t wanted_cookie = 0;
6954         u_char opt[TCP_MAXOLEN];
6955         unsigned ipoptlen, optlen, hdrlen, ulen=0;
6956         uint32_t rack_seq;
6957
6958 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
6959         unsigned ipsec_optlen = 0;
6960
6961 #endif
6962         int32_t idle, sendalot;
6963         int32_t sub_from_prr = 0;
6964         volatile int32_t sack_rxmit;
6965         struct rack_sendmap *rsm = NULL;
6966         int32_t tso, mtu, would_have_fin = 0;
6967         struct tcpopt to;
6968         int32_t slot = 0;
6969         uint32_t cts;
6970         uint8_t hpts_calling, doing_tlp = 0;
6971         int32_t do_a_prefetch;
6972         int32_t prefetch_rsm = 0;
6973         int32_t prefetch_so_done = 0;
6974         struct tcp_log_buffer *lgb = NULL;
6975         struct inpcb *inp;
6976         struct sockbuf *sb;
6977 #ifdef INET6
6978         struct ip6_hdr *ip6 = NULL;
6979         int32_t isipv6;
6980 #endif
6981         /* setup and take the cache hits here */
6982         rack = (struct tcp_rack *)tp->t_fb_ptr;
6983         inp = rack->rc_inp;
6984         so = inp->inp_socket;
6985         sb = &so->so_snd;
6986         kern_prefetch(sb, &do_a_prefetch);
6987         do_a_prefetch = 1;
6988         
6989         INP_WLOCK_ASSERT(inp);
6990 #ifdef TCP_OFFLOAD
6991         if (tp->t_flags & TF_TOE)
6992                 return (tcp_offload_output(tp));
6993 #endif
6994
6995         /*
6996          * For TFO connections in SYN_RECEIVED, only allow the initial
6997          * SYN|ACK and those sent by the retransmit timer.
6998          */
6999         if (IS_FASTOPEN(tp->t_flags) &&
7000             (tp->t_state == TCPS_SYN_RECEIVED) &&
7001             SEQ_GT(tp->snd_max, tp->snd_una) &&    /* initial SYN|ACK sent */
7002             (rack->r_ctl.rc_resend == NULL))         /* not a retransmit */
7003                 return (0);
7004 #ifdef INET6
7005         if (rack->r_state) {
7006                 /* Use the cache line loaded if possible */
7007                 isipv6 = rack->r_is_v6;
7008         } else {
7009                 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
7010         }
7011 #endif
7012         cts = tcp_ts_getticks();
7013         if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
7014             inp->inp_in_hpts) {
7015                 /*
7016                  * We are on the hpts for some timer but not hptsi output.
7017                  * Remove from the hpts unconditionally.
7018                  */
7019                 rack_timer_cancel(tp, rack, cts, __LINE__);
7020         }
7021         /* Mark that we have called rack_output(). */
7022         if ((rack->r_timer_override) ||
7023             (tp->t_flags & TF_FORCEDATA) ||
7024             (tp->t_state < TCPS_ESTABLISHED)) {
7025                 if (tp->t_inpcb->inp_in_hpts)
7026                         tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT);
7027         } else if (tp->t_inpcb->inp_in_hpts) {
7028                 /*
7029                  * On the hpts you can't pass even if ACKNOW is on, we will
7030                  * when the hpts fires.
7031                  */
7032                 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
7033                 return (0);
7034         }
7035         hpts_calling = inp->inp_hpts_calls;
7036         inp->inp_hpts_calls = 0;
7037         if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
7038                 if (rack_process_timers(tp, rack, cts, hpts_calling)) {
7039                         counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
7040                         return (0);
7041                 }
7042         }
7043         rack->r_wanted_output = 0;
7044         rack->r_timer_override = 0;
7045         /*
7046          * Determine length of data that should be transmitted, and flags
7047          * that will be used. If there is some data or critical controls
7048          * (SYN, RST) to send, then transmit; otherwise, investigate
7049          * further.
7050          */
7051         idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
7052 #ifdef NETFLIX_CWV
7053         if (tp->cwv_enabled) {
7054                 if ((tp->cwv_cwnd_valid == 0) &&
7055                     TCPS_HAVEESTABLISHED(tp->t_state) &&
7056                     (tp->snd_cwnd > tp->snd_cwv.init_cwnd))
7057                         tcp_newcwv_nvp_closedown(tp);
7058         } else
7059 #endif
7060         if (tp->t_idle_reduce) {
7061                 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur))
7062                         rack_cc_after_idle(tp,
7063                             (rack->r_idle_reduce_largest ? 1 :0));
7064         }
7065         tp->t_flags &= ~TF_LASTIDLE;
7066         if (idle) {
7067                 if (tp->t_flags & TF_MORETOCOME) {
7068                         tp->t_flags |= TF_LASTIDLE;
7069                         idle = 0;
7070                 }
7071         }
7072 again:
7073         /*
7074          * If we've recently taken a timeout, snd_max will be greater than
7075          * snd_nxt.  There may be SACK information that allows us to avoid
7076          * resending already delivered data.  Adjust snd_nxt accordingly.
7077          */
7078         sendalot = 0;
7079         cts = tcp_ts_getticks();
7080         tso = 0;
7081         mtu = 0;
7082         sb_offset = tp->snd_max - tp->snd_una;
7083         sendwin = min(tp->snd_wnd, tp->snd_cwnd);
7084
7085         flags = tcp_outflags[tp->t_state];
7086         /*
7087          * Send any SACK-generated retransmissions.  If we're explicitly
7088          * trying to send out new data (when sendalot is 1), bypass this
7089          * function. If we retransmit in fast recovery mode, decrement
7090          * snd_cwnd, since we're replacing a (future) new transmission with
7091          * a retransmission now, and we previously incremented snd_cwnd in
7092          * tcp_input().
7093          */
7094         /*
7095          * Still in sack recovery , reset rxmit flag to zero.
7096          */
7097         while (rack->rc_free_cnt < rack_free_cache) {
7098                 rsm = rack_alloc(rack);
7099                 if (rsm == NULL) {
7100                         if (inp->inp_hpts_calls)
7101                                 /* Retry in a ms */
7102                                 slot = 1;
7103                         goto just_return_nolock;
7104                 }
7105                 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next);
7106                 rack->rc_free_cnt++;
7107                 rsm = NULL;
7108         }
7109         if (inp->inp_hpts_calls)
7110                 inp->inp_hpts_calls = 0;
7111         sack_rxmit = 0;
7112         len = 0;
7113         rsm = NULL;
7114         if (flags & TH_RST) {
7115                 SOCKBUF_LOCK(sb);
7116                 goto send;
7117         }
7118         if (rack->r_ctl.rc_tlpsend) {
7119                 /* Tail loss probe */
7120                 long cwin;
7121                 long tlen;
7122
7123                 doing_tlp = 1;
7124                 rsm = rack->r_ctl.rc_tlpsend;
7125                 rack->r_ctl.rc_tlpsend = NULL;
7126                 sack_rxmit = 1;
7127                 tlen = rsm->r_end - rsm->r_start;
7128                 if (tlen > tp->t_maxseg)
7129                         tlen = tp->t_maxseg;
7130 #ifdef INVARIANTS
7131                 if (SEQ_GT(tp->snd_una, rsm->r_start)) {
7132                         panic("tp:%p rack:%p snd_una:%u rsm:%p r_start:%u",
7133                             tp, rack, tp->snd_una, rsm, rsm->r_start);
7134                 }
7135 #endif
7136                 sb_offset = rsm->r_start - tp->snd_una;
7137                 cwin = min(tp->snd_wnd, tlen);
7138                 len = cwin;
7139         } else if (rack->r_ctl.rc_resend) {
7140                 /* Retransmit timer */
7141                 rsm = rack->r_ctl.rc_resend;
7142                 rack->r_ctl.rc_resend = NULL;
7143                 len = rsm->r_end - rsm->r_start;
7144                 sack_rxmit = 1;
7145                 sendalot = 0;
7146                 sb_offset = rsm->r_start - tp->snd_una;
7147                 if (len >= tp->t_maxseg) {
7148                         len = tp->t_maxseg;
7149                 }
7150                 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d",
7151                     __func__, sb_offset));
7152         } else if ((rack->rc_in_persist == 0) &&
7153             ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) {
7154                 long tlen;
7155
7156                 if ((!IN_RECOVERY(tp->t_flags)) &&
7157                     ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) {
7158                         /* Enter recovery if not induced by a time-out */
7159                         rack->r_ctl.rc_rsm_start = rsm->r_start;
7160                         rack->r_ctl.rc_cwnd_at = tp->snd_cwnd;
7161                         rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh;
7162                         rack_cong_signal(tp, NULL, CC_NDUPACK);
7163                         /*
7164                          * When we enter recovery we need to assure we send
7165                          * one packet.
7166                          */
7167                         rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg;
7168                 }
7169 #ifdef INVARIANTS
7170                 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
7171                         panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
7172                             tp, rack, rsm, rsm->r_start, tp->snd_una);
7173                 }
7174 #endif
7175                 tlen = rsm->r_end - rsm->r_start;
7176                 sb_offset = rsm->r_start - tp->snd_una;
7177                 if (tlen > rack->r_ctl.rc_prr_sndcnt) {
7178                         len = rack->r_ctl.rc_prr_sndcnt;
7179                 } else {
7180                         len = tlen;
7181                 }
7182                 if (len >= tp->t_maxseg) {
7183                         sendalot = 1;
7184                         len = tp->t_maxseg;
7185                 } else {
7186                         sendalot = 0;
7187                         if ((rack->rc_timer_up == 0) &&
7188                             (len < tlen)) {
7189                                 /*
7190                                  * If its not a timer don't send a partial
7191                                  * segment.
7192                                  */
7193                                 len = 0;
7194                                 goto just_return_nolock;
7195                         }
7196                 }
7197                 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d",
7198                     __func__, sb_offset));
7199                 if (len > 0) {
7200                         sub_from_prr = 1;
7201                         sack_rxmit = 1;
7202                         TCPSTAT_INC(tcps_sack_rexmits);
7203                         TCPSTAT_ADD(tcps_sack_rexmit_bytes,
7204                             min(len, tp->t_maxseg));
7205                         counter_u64_add(rack_rtm_prr_retran, 1);
7206                 }
7207         }
7208         if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
7209                 /* we are retransmitting the fin */
7210                 len--;
7211                 if (len) {
7212                         /*
7213                          * When retransmitting data do *not* include the
7214                          * FIN. This could happen from a TLP probe.
7215                          */
7216                         flags &= ~TH_FIN;
7217                 }
7218         }
7219 #ifdef INVARIANTS
7220         /* For debugging */
7221         rack->r_ctl.rc_rsm_at_retran = rsm;
7222 #endif
7223         /*
7224          * Get standard flags, and add SYN or FIN if requested by 'hidden'
7225          * state flags.
7226          */
7227         if (tp->t_flags & TF_NEEDFIN)
7228                 flags |= TH_FIN;
7229         if (tp->t_flags & TF_NEEDSYN)
7230                 flags |= TH_SYN;
7231         if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
7232                 void *end_rsm;
7233                 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
7234                 if (end_rsm)
7235                         kern_prefetch(end_rsm, &prefetch_rsm);
7236                 prefetch_rsm = 1;
7237         }
7238         SOCKBUF_LOCK(sb);
7239         /*
7240          * If in persist timeout with window of 0, send 1 byte. Otherwise,
7241          * if window is small but nonzero and time TF_SENTFIN expired, we
7242          * will send what we can and go to transmit state.
7243          */
7244         if (tp->t_flags & TF_FORCEDATA) {
7245                 if (sendwin == 0) {
7246                         /*
7247                          * If we still have some data to send, then clear
7248                          * the FIN bit.  Usually this would happen below
7249                          * when it realizes that we aren't sending all the
7250                          * data.  However, if we have exactly 1 byte of
7251                          * unsent data, then it won't clear the FIN bit
7252                          * below, and if we are in persist state, we wind up
7253                          * sending the packet without recording that we sent
7254                          * the FIN bit.
7255                          *
7256                          * We can't just blindly clear the FIN bit, because
7257                          * if we don't have any more data to send then the
7258                          * probe will be the FIN itself.
7259                          */
7260                         if (sb_offset < sbused(sb))
7261                                 flags &= ~TH_FIN;
7262                         sendwin = 1;
7263                 } else {
7264                         if (rack->rc_in_persist)
7265                                 rack_exit_persist(tp, rack);
7266                         /*
7267                          * If we are dropping persist mode then we need to
7268                          * correct snd_nxt/snd_max and off.
7269                          */
7270                         tp->snd_nxt = tp->snd_max;
7271                         sb_offset = tp->snd_nxt - tp->snd_una;
7272                 }
7273         }
7274         /*
7275          * If snd_nxt == snd_max and we have transmitted a FIN, the
7276          * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
7277          * negative length.  This can also occur when TCP opens up its
7278          * congestion window while receiving additional duplicate acks after
7279          * fast-retransmit because TCP will reset snd_nxt to snd_max after
7280          * the fast-retransmit.
7281          *
7282          * In the normal retransmit-FIN-only case, however, snd_nxt will be
7283          * set to snd_una, the sb_offset will be 0, and the length may wind
7284          * up 0.
7285          *
7286          * If sack_rxmit is true we are retransmitting from the scoreboard
7287          * in which case len is already set.
7288          */
7289         if (sack_rxmit == 0) {
7290                 uint32_t avail;
7291
7292                 avail = sbavail(sb);
7293                 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
7294                         sb_offset = tp->snd_nxt - tp->snd_una;
7295                 else
7296                         sb_offset = 0;
7297                 if (IN_RECOVERY(tp->t_flags) == 0) {
7298                         if (rack->r_ctl.rc_tlp_new_data) {
7299                                 /* TLP is forcing out new data */
7300                                 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
7301                                         rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
7302                                 }
7303                                 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd)
7304                                         len = tp->snd_wnd;
7305                                 else
7306                                         len = rack->r_ctl.rc_tlp_new_data;
7307                                 rack->r_ctl.rc_tlp_new_data = 0;
7308                                 doing_tlp = 1;
7309                         } else {
7310                                 if (sendwin > avail) {
7311                                         /* use the available */
7312                                         if (avail > sb_offset) {
7313                                                 len = (int32_t)(avail - sb_offset);
7314                                         } else {
7315                                                 len = 0;
7316                                         }
7317                                 } else {
7318                                         if (sendwin > sb_offset) {
7319                                                 len = (int32_t)(sendwin - sb_offset);
7320                                         } else {
7321                                                 len = 0;
7322                                         }
7323                                 }
7324                         }
7325                 } else {
7326                         uint32_t outstanding;
7327
7328                         /*
7329                          * We are inside of a SACK recovery episode and are
7330                          * sending new data, having retransmitted all the
7331                          * data possible so far in the scoreboard.
7332                          */
7333                         outstanding = tp->snd_max - tp->snd_una;
7334                         if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd)
7335                                 len = 0;
7336                         else if (avail > sb_offset)
7337                                 len = avail - sb_offset;
7338                         else
7339                                 len = 0;
7340                         if (len > 0) {
7341                                 if (len > rack->r_ctl.rc_prr_sndcnt)
7342                                         len = rack->r_ctl.rc_prr_sndcnt;
7343
7344                                 if (len > 0) {
7345                                         sub_from_prr = 1;
7346                                         counter_u64_add(rack_rtm_prr_newdata, 1);
7347                                 }
7348                         }
7349                         if (len > tp->t_maxseg) {
7350                                 /*
7351                                  * We should never send more than a MSS when
7352                                  * retransmitting or sending new data in prr
7353                                  * mode unless the override flag is on. Most
7354                                  * likely the PRR algorithm is not going to
7355                                  * let us send a lot as well :-)
7356                                  */
7357                                 if (rack->r_ctl.rc_prr_sendalot == 0)
7358                                         len = tp->t_maxseg;
7359                         } else if (len < tp->t_maxseg) {
7360                                 /*
7361                                  * Do we send any? The idea here is if the
7362                                  * send empty's the socket buffer we want to
7363                                  * do it. However if not then lets just wait
7364                                  * for our prr_sndcnt to get bigger.
7365                                  */
7366                                 long leftinsb;
7367
7368                                 leftinsb = sbavail(sb) - sb_offset;
7369                                 if (leftinsb > len) {
7370                                         /* This send does not empty the sb */
7371                                         len = 0;
7372                                 }
7373                         }
7374                 }
7375         }
7376         if (prefetch_so_done == 0) {
7377                 kern_prefetch(so, &prefetch_so_done);
7378                 prefetch_so_done = 1;
7379         }
7380         /*
7381          * Lop off SYN bit if it has already been sent.  However, if this is
7382          * SYN-SENT state and if segment contains data and if we don't know
7383          * that foreign host supports TAO, suppress sending segment.
7384          */
7385         if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
7386             ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
7387                 if (tp->t_state != TCPS_SYN_RECEIVED)
7388                         flags &= ~TH_SYN;
7389                 /*
7390                  * When sending additional segments following a TFO SYN|ACK,
7391                  * do not include the SYN bit.
7392                  */
7393                 if (IS_FASTOPEN(tp->t_flags) &&
7394                     (tp->t_state == TCPS_SYN_RECEIVED))
7395                         flags &= ~TH_SYN;
7396                 sb_offset--, len++;
7397         }
7398         /*
7399          * Be careful not to send data and/or FIN on SYN segments. This
7400          * measure is needed to prevent interoperability problems with not
7401          * fully conformant TCP implementations.
7402          */
7403         if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
7404                 len = 0;
7405                 flags &= ~TH_FIN;
7406         }
7407         /*
7408          * On TFO sockets, ensure no data is sent in the following cases:
7409          *
7410          *  - When retransmitting SYN|ACK on a passively-created socket
7411          *
7412          *  - When retransmitting SYN on an actively created socket
7413          *
7414          *  - When sending a zero-length cookie (cookie request) on an
7415          *    actively created socket
7416          *
7417          *  - When the socket is in the CLOSED state (RST is being sent)
7418          */
7419         if (IS_FASTOPEN(tp->t_flags) &&
7420             (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
7421              ((tp->t_state == TCPS_SYN_SENT) &&
7422               (tp->t_tfo_client_cookie_len == 0)) ||
7423              (flags & TH_RST)))
7424                 len = 0;
7425         /* Without fast-open there should never be data sent on a SYN */
7426         if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags)))
7427                 len = 0;
7428         if (len <= 0) {
7429                 /*
7430                  * If FIN has been sent but not acked, but we haven't been
7431                  * called to retransmit, len will be < 0.  Otherwise, window
7432                  * shrank after we sent into it.  If window shrank to 0,
7433                  * cancel pending retransmit, pull snd_nxt back to (closed)
7434                  * window, and set the persist timer if it isn't already
7435                  * going.  If the window didn't close completely, just wait
7436                  * for an ACK.
7437                  *
7438                  * We also do a general check here to ensure that we will
7439                  * set the persist timer when we have data to send, but a
7440                  * 0-byte window. This makes sure the persist timer is set
7441                  * even if the packet hits one of the "goto send" lines
7442                  * below.
7443                  */
7444                 len = 0;
7445                 if ((tp->snd_wnd == 0) &&
7446                     (TCPS_HAVEESTABLISHED(tp->t_state)) &&
7447                     (sb_offset < (int)sbavail(sb))) {
7448                         tp->snd_nxt = tp->snd_una;
7449                         rack_enter_persist(tp, rack, cts);
7450                 }
7451         }
7452         /* len will be >= 0 after this point. */
7453         KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
7454         tcp_sndbuf_autoscale(tp, so, sendwin);
7455         /*
7456          * Decide if we can use TCP Segmentation Offloading (if supported by
7457          * hardware).
7458          *
7459          * TSO may only be used if we are in a pure bulk sending state.  The
7460          * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
7461          * options prevent using TSO.  With TSO the TCP header is the same
7462          * (except for the sequence number) for all generated packets.  This
7463          * makes it impossible to transmit any options which vary per
7464          * generated segment or packet.
7465          *
7466          * IPv4 handling has a clear separation of ip options and ip header
7467          * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
7468          * the right thing below to provide length of just ip options and thus
7469          * checking for ipoptlen is enough to decide if ip options are present.
7470          */
7471
7472 #ifdef INET6
7473         if (isipv6)
7474                 ipoptlen = ip6_optlen(tp->t_inpcb);
7475         else
7476 #endif
7477                 if (tp->t_inpcb->inp_options)
7478                         ipoptlen = tp->t_inpcb->inp_options->m_len -
7479                             offsetof(struct ipoption, ipopt_list);
7480                 else
7481                         ipoptlen = 0;
7482 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7483         /*
7484          * Pre-calculate here as we save another lookup into the darknesses
7485          * of IPsec that way and can actually decide if TSO is ok.
7486          */
7487 #ifdef INET6
7488         if (isipv6 && IPSEC_ENABLED(ipv6))
7489                 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
7490 #ifdef INET
7491         else
7492 #endif
7493 #endif                          /* INET6 */
7494 #ifdef INET
7495         if (IPSEC_ENABLED(ipv4))
7496                 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
7497 #endif                          /* INET */
7498 #endif
7499
7500 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7501         ipoptlen += ipsec_optlen;
7502 #endif
7503         if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
7504             (tp->t_port == 0) &&
7505             ((tp->t_flags & TF_SIGNATURE) == 0) &&
7506             tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
7507             ipoptlen == 0)
7508                 tso = 1;
7509         {
7510                 uint32_t outstanding;
7511
7512                 outstanding = tp->snd_max - tp->snd_una;
7513                 if (tp->t_flags & TF_SENTFIN) {
7514                         /*
7515                          * If we sent a fin, snd_max is 1 higher than
7516                          * snd_una
7517                          */
7518                         outstanding--;
7519                 }
7520                 if (outstanding > 0) {
7521                         /*
7522                          * This is sub-optimal. We only send a stand alone
7523                          * FIN on its own segment.
7524                          */
7525                         if (flags & TH_FIN) {
7526                                 flags &= ~TH_FIN;
7527                                 would_have_fin = 1;
7528                         }
7529                 } else if (sack_rxmit) {
7530                         if ((rsm->r_flags & RACK_HAS_FIN) == 0)
7531                                 flags &= ~TH_FIN;
7532                 } else {
7533                         if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
7534                             sbused(sb)))
7535                                 flags &= ~TH_FIN;
7536                 }
7537         }
7538         recwin = sbspace(&so->so_rcv);
7539
7540         /*
7541          * Sender silly window avoidance.   We transmit under the following
7542          * conditions when len is non-zero:
7543          *
7544          * - We have a full segment (or more with TSO) - This is the last
7545          * buffer in a write()/send() and we are either idle or running
7546          * NODELAY - we've timed out (e.g. persist timer) - we have more
7547          * then 1/2 the maximum send window's worth of data (receiver may be
7548          * limited the window size) - we need to retransmit
7549          */
7550         if (len) {
7551                 if (len >= tp->t_maxseg) {
7552                         pass = 1;
7553                         goto send;
7554                 }
7555                 /*
7556                  * NOTE! on localhost connections an 'ack' from the remote
7557                  * end may occur synchronously with the output and cause us
7558                  * to flush a buffer queued with moretocome.  XXX
7559                  *
7560                  */
7561                 if (!(tp->t_flags & TF_MORETOCOME) &&   /* normal case */
7562                     (idle || (tp->t_flags & TF_NODELAY)) &&
7563                     ((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) && 
7564                     (tp->t_flags & TF_NOPUSH) == 0) {
7565                         pass = 2;
7566                         goto send;
7567                 }
7568                 if (tp->t_flags & TF_FORCEDATA) {       /* typ. timeout case */
7569                         pass = 3;
7570                         goto send;
7571                 }
7572                 if ((tp->snd_una == tp->snd_max) && len) {      /* Nothing outstanding */
7573                         goto send;
7574                 }
7575                 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
7576                         pass = 4;
7577                         goto send;
7578                 }
7579                 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */
7580                         pass = 5;
7581                         goto send;
7582                 }
7583                 if (sack_rxmit) {
7584                         pass = 6;
7585                         goto send;
7586                 }
7587         }
7588         /*
7589          * Sending of standalone window updates.
7590          *
7591          * Window updates are important when we close our window due to a
7592          * full socket buffer and are opening it again after the application
7593          * reads data from it.  Once the window has opened again and the
7594          * remote end starts to send again the ACK clock takes over and
7595          * provides the most current window information.
7596          *
7597          * We must avoid the silly window syndrome whereas every read from
7598          * the receive buffer, no matter how small, causes a window update
7599          * to be sent.  We also should avoid sending a flurry of window
7600          * updates when the socket buffer had queued a lot of data and the
7601          * application is doing small reads.
7602          *
7603          * Prevent a flurry of pointless window updates by only sending an
7604          * update when we can increase the advertized window by more than
7605          * 1/4th of the socket buffer capacity.  When the buffer is getting
7606          * full or is very small be more aggressive and send an update
7607          * whenever we can increase by two mss sized segments. In all other
7608          * situations the ACK's to new incoming data will carry further
7609          * window increases.
7610          *
7611          * Don't send an independent window update if a delayed ACK is
7612          * pending (it will get piggy-backed on it) or the remote side
7613          * already has done a half-close and won't send more data.  Skip
7614          * this if the connection is in T/TCP half-open state.
7615          */
7616         if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
7617             !(tp->t_flags & TF_DELACK) &&
7618             !TCPS_HAVERCVDFIN(tp->t_state)) {
7619                 /*
7620                  * "adv" is the amount we could increase the window, taking
7621                  * into account that we are limited by TCP_MAXWIN <<
7622                  * tp->rcv_scale.
7623                  */
7624                 int32_t adv;
7625                 int oldwin;
7626
7627                 adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale);
7628                 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
7629                         oldwin = (tp->rcv_adv - tp->rcv_nxt);
7630                         adv -= oldwin;
7631                 } else
7632                         oldwin = 0;
7633
7634                 /*
7635                  * If the new window size ends up being the same as the old
7636                  * size when it is scaled, then don't force a window update.
7637                  */
7638                 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale)
7639                         goto dontupdate;
7640
7641                 if (adv >= (int32_t)(2 * tp->t_maxseg) &&
7642                     (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
7643                     recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
7644                     so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg)) {
7645                         pass = 7;
7646                         goto send;
7647                 }
7648                 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat)
7649                         goto send;
7650         }
7651 dontupdate:
7652
7653         /*
7654          * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
7655          * is also a catch-all for the retransmit timer timeout case.
7656          */
7657         if (tp->t_flags & TF_ACKNOW) {
7658                 pass = 8;
7659                 goto send;
7660         }
7661         if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
7662                 pass = 9;
7663                 goto send;
7664         }
7665         if (SEQ_GT(tp->snd_up, tp->snd_una)) {
7666                 pass = 10;
7667                 goto send;
7668         }
7669         /*
7670          * If our state indicates that FIN should be sent and we have not
7671          * yet done so, then we need to send.
7672          */
7673         if (flags & TH_FIN) {
7674                 if ((tp->t_flags & TF_SENTFIN) ||
7675                     (((tp->t_flags & TF_SENTFIN) == 0) &&
7676                      (tp->snd_nxt == tp->snd_una))) {
7677                         pass = 11;
7678                         goto send;
7679                 }
7680         }
7681         /*
7682          * No reason to send a segment, just return.
7683          */
7684 just_return:
7685         SOCKBUF_UNLOCK(sb);
7686 just_return_nolock:
7687         if (tot_len_this_send == 0)
7688                 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
7689         rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1);
7690         rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling);
7691         tp->t_flags &= ~TF_FORCEDATA;
7692         return (0);
7693
7694 send:
7695         if (doing_tlp == 0) {
7696                 /*
7697                  * Data not a TLP, and its not the rxt firing. If it is the
7698                  * rxt firing, we want to leave the tlp_in_progress flag on
7699                  * so we don't send another TLP. It has to be a rack timer
7700                  * or normal send (response to acked data) to clear the tlp
7701                  * in progress flag.
7702                  */
7703                 rack->rc_tlp_in_progress = 0;
7704         }
7705         SOCKBUF_LOCK_ASSERT(sb);
7706         if (len > 0) {
7707                 if (len >= tp->t_maxseg)
7708                         tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
7709                 else
7710                         tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
7711         }
7712         /*
7713          * Before ESTABLISHED, force sending of initial options unless TCP
7714          * set not to do any options. NOTE: we assume that the IP/TCP header
7715          * plus TCP options always fit in a single mbuf, leaving room for a
7716          * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
7717          * + optlen <= MCLBYTES
7718          */
7719         optlen = 0;
7720 #ifdef INET6
7721         if (isipv6)
7722                 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
7723         else
7724 #endif
7725                 hdrlen = sizeof(struct tcpiphdr);
7726
7727         /*
7728          * Compute options for segment. We only have to care about SYN and
7729          * established connection segments.  Options for SYN-ACK segments
7730          * are handled in TCP syncache.
7731          */
7732         to.to_flags = 0;
7733         if ((tp->t_flags & TF_NOOPT) == 0) {
7734                 /* Maximum segment size. */
7735                 if (flags & TH_SYN) {
7736                         tp->snd_nxt = tp->iss;
7737                         to.to_mss = tcp_mssopt(&inp->inp_inc);
7738 #ifdef NETFLIX_TCPOUDP
7739                         if (tp->t_port)
7740                                 to.to_mss -= V_tcp_udp_tunneling_overhead;
7741 #endif
7742                         to.to_flags |= TOF_MSS;
7743
7744                         /*
7745                          * On SYN or SYN|ACK transmits on TFO connections,
7746                          * only include the TFO option if it is not a
7747                          * retransmit, as the presence of the TFO option may
7748                          * have caused the original SYN or SYN|ACK to have
7749                          * been dropped by a middlebox.
7750                          */
7751                         if (IS_FASTOPEN(tp->t_flags) &&
7752                             (tp->t_rxtshift == 0)) {
7753                                 if (tp->t_state == TCPS_SYN_RECEIVED) {
7754                                         to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
7755                                         to.to_tfo_cookie =
7756                                             (u_int8_t *)&tp->t_tfo_cookie.server;
7757                                         to.to_flags |= TOF_FASTOPEN;
7758                                         wanted_cookie = 1;
7759                                 } else if (tp->t_state == TCPS_SYN_SENT) {
7760                                         to.to_tfo_len =
7761                                             tp->t_tfo_client_cookie_len;
7762                                         to.to_tfo_cookie =
7763                                             tp->t_tfo_cookie.client;
7764                                         to.to_flags |= TOF_FASTOPEN;
7765                                         wanted_cookie = 1;
7766                                         /*
7767                                          * If we wind up having more data to
7768                                          * send with the SYN than can fit in
7769                                          * one segment, don't send any more
7770                                          * until the SYN|ACK comes back from
7771                                          * the other end.
7772                                          */
7773                                         sendalot = 0;
7774                                 }
7775                         }
7776                 }
7777                 /* Window scaling. */
7778                 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
7779                         to.to_wscale = tp->request_r_scale;
7780                         to.to_flags |= TOF_SCALE;
7781                 }
7782                 /* Timestamps. */
7783                 if ((tp->t_flags & TF_RCVD_TSTMP) ||
7784                     ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
7785                         to.to_tsval = cts + tp->ts_offset;
7786                         to.to_tsecr = tp->ts_recent;
7787                         to.to_flags |= TOF_TS;
7788                 }
7789                 /* Set receive buffer autosizing timestamp. */
7790                 if (tp->rfbuf_ts == 0 &&
7791                     (so->so_rcv.sb_flags & SB_AUTOSIZE))
7792                         tp->rfbuf_ts = tcp_ts_getticks();
7793                 /* Selective ACK's. */
7794                 if (flags & TH_SYN)
7795                         to.to_flags |= TOF_SACKPERM;
7796                 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
7797                     tp->rcv_numsacks > 0) {
7798                         to.to_flags |= TOF_SACK;
7799                         to.to_nsacks = tp->rcv_numsacks;
7800                         to.to_sacks = (u_char *)tp->sackblks;
7801                 }
7802 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
7803                 /* TCP-MD5 (RFC2385). */
7804                 if (tp->t_flags & TF_SIGNATURE)
7805                         to.to_flags |= TOF_SIGNATURE;
7806 #endif                          /* TCP_SIGNATURE */
7807
7808                 /* Processing the options. */
7809                 hdrlen += optlen = tcp_addoptions(&to, opt);
7810                 /*
7811                  * If we wanted a TFO option to be added, but it was unable
7812                  * to fit, ensure no data is sent.
7813                  */
7814                 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
7815                     !(to.to_flags & TOF_FASTOPEN))
7816                         len = 0;
7817         }
7818 #ifdef NETFLIX_TCPOUDP
7819         if (tp->t_port) {
7820                 if (V_tcp_udp_tunneling_port == 0) {
7821                         /* The port was removed?? */
7822                         SOCKBUF_UNLOCK(&so->so_snd);
7823                         return (EHOSTUNREACH);
7824                 }
7825                 hdrlen += sizeof(struct udphdr);
7826         }
7827 #endif
7828         ipoptlen = 0;
7829 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
7830         ipoptlen += ipsec_optlen;
7831 #endif
7832
7833         /*
7834          * Adjust data length if insertion of options will bump the packet
7835          * length beyond the t_maxseg length. Clear the FIN bit because we
7836          * cut off the tail of the segment.
7837          */
7838         if (len + optlen + ipoptlen > tp->t_maxseg) {
7839                 if (flags & TH_FIN) {
7840                         would_have_fin = 1;
7841                         flags &= ~TH_FIN;
7842                 }
7843                 if (tso) {
7844                         uint32_t if_hw_tsomax;
7845                         uint32_t moff;
7846                         int32_t max_len;
7847
7848                         /* extract TSO information */
7849                         if_hw_tsomax = tp->t_tsomax;
7850                         if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
7851                         if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
7852                         KASSERT(ipoptlen == 0,
7853                             ("%s: TSO can't do IP options", __func__));
7854
7855                         /*
7856                          * Check if we should limit by maximum payload
7857                          * length:
7858                          */
7859                         if (if_hw_tsomax != 0) {
7860                                 /* compute maximum TSO length */
7861                                 max_len = (if_hw_tsomax - hdrlen -
7862                                     max_linkhdr);
7863                                 if (max_len <= 0) {
7864                                         len = 0;
7865                                 } else if (len > max_len) {
7866                                         sendalot = 1;
7867                                         len = max_len;
7868                                 }
7869                         }
7870                         /*
7871                          * Prevent the last segment from being fractional
7872                          * unless the send sockbuf can be emptied:
7873                          */
7874                         max_len = (tp->t_maxseg - optlen);
7875                         if ((sb_offset + len) < sbavail(sb)) {
7876                                 moff = len % (u_int)max_len;
7877                                 if (moff != 0) {
7878                                         len -= moff;
7879                                         sendalot = 1;
7880                                 }
7881                         }
7882                         /*
7883                          * In case there are too many small fragments don't
7884                          * use TSO:
7885                          */
7886                         if (len <= max_len) {
7887                                 len = max_len;
7888                                 sendalot = 1;
7889                                 tso = 0;
7890                         }
7891                         /*
7892                          * Send the FIN in a separate segment after the bulk
7893                          * sending is done. We don't trust the TSO
7894                          * implementations to clear the FIN flag on all but
7895                          * the last segment.
7896                          */
7897                         if (tp->t_flags & TF_NEEDFIN)
7898                                 sendalot = 1;
7899
7900                 } else {
7901                         len = tp->t_maxseg - optlen - ipoptlen;
7902                         sendalot = 1;
7903                 }
7904         } else
7905                 tso = 0;
7906         KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
7907             ("%s: len > IP_MAXPACKET", __func__));
7908 #ifdef DIAGNOSTIC
7909 #ifdef INET6
7910         if (max_linkhdr + hdrlen > MCLBYTES)
7911 #else
7912         if (max_linkhdr + hdrlen > MHLEN)
7913 #endif
7914                 panic("tcphdr too big");
7915 #endif
7916
7917         /*
7918          * This KASSERT is here to catch edge cases at a well defined place.
7919          * Before, those had triggered (random) panic conditions further
7920          * down.
7921          */
7922         KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
7923         if ((len == 0) &&
7924             (flags & TH_FIN) &&
7925             (sbused(sb))) {
7926                 /*
7927                  * We have outstanding data, don't send a fin by itself!.
7928                  */
7929                 goto just_return;
7930         }
7931         /*
7932          * Grab a header mbuf, attaching a copy of data to be transmitted,
7933          * and initialize the header from the template for sends on this
7934          * connection.
7935          */
7936         if (len) {
7937                 uint32_t max_val;
7938                 uint32_t moff;
7939
7940                 if (rack->rc_pace_max_segs)
7941                         max_val = rack->rc_pace_max_segs * tp->t_maxseg;
7942                 else
7943                         max_val = len;
7944                 /*
7945                  * We allow a limit on sending with hptsi.
7946                  */
7947                 if (len > max_val) {
7948                         len = max_val;
7949                 }
7950 #ifdef INET6
7951                 if (MHLEN < hdrlen + max_linkhdr)
7952                         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
7953                 else
7954 #endif
7955                         m = m_gethdr(M_NOWAIT, MT_DATA);
7956
7957                 if (m == NULL) {
7958                         SOCKBUF_UNLOCK(sb);
7959                         error = ENOBUFS;
7960                         sack_rxmit = 0;
7961                         goto out;
7962                 }
7963                 m->m_data += max_linkhdr;
7964                 m->m_len = hdrlen;
7965
7966                 /*
7967                  * Start the m_copy functions from the closest mbuf to the
7968                  * sb_offset in the socket buffer chain.
7969                  */
7970                 mb = sbsndptr_noadv(sb, sb_offset, &moff);
7971                 if (len <= MHLEN - hdrlen - max_linkhdr) {
7972                         m_copydata(mb, moff, (int)len,
7973                             mtod(m, caddr_t)+hdrlen);
7974                         if (SEQ_LT(tp->snd_nxt, tp->snd_max))
7975                                 sbsndptr_adv(sb, mb, len);
7976                         m->m_len += len;
7977                 } else {
7978                         struct sockbuf *msb;
7979
7980                         if (SEQ_LT(tp->snd_nxt, tp->snd_max))
7981                                 msb = NULL;
7982                         else
7983                                 msb = sb;
7984                         m->m_next = tcp_m_copym(mb, moff, &len,
7985                             if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb);
7986                         if (len <= (tp->t_maxseg - optlen)) {
7987                                 /* 
7988                                  * Must have ran out of mbufs for the copy
7989                                  * shorten it to no longer need tso. Lets
7990                                  * not put on sendalot since we are low on
7991                                  * mbufs.
7992                                  */
7993                                 tso = 0;
7994                         }
7995                         if (m->m_next == NULL) {
7996                                 SOCKBUF_UNLOCK(sb);
7997                                 (void)m_free(m);
7998                                 error = ENOBUFS;
7999                                 sack_rxmit = 0;
8000                                 goto out;
8001                         }
8002                 }
8003                 if ((tp->t_flags & TF_FORCEDATA) && len == 1) {
8004                         TCPSTAT_INC(tcps_sndprobe);
8005 #ifdef NETFLIX_STATS
8006                         if (SEQ_LT(tp->snd_nxt, tp->snd_max))
8007                                 stats_voi_update_abs_u32(tp->t_stats,
8008                                     VOI_TCP_RETXPB, len);
8009                         else
8010                                 stats_voi_update_abs_u64(tp->t_stats,
8011                                     VOI_TCP_TXPB, len);
8012 #endif
8013                 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
8014                         if (rsm && (rsm->r_flags & RACK_TLP)) {
8015                                 /*
8016                                  * TLP should not count in retran count, but
8017                                  * in its own bin
8018                                  */
8019                                 counter_u64_add(rack_tlp_retran, 1);
8020                                 counter_u64_add(rack_tlp_retran_bytes, len);
8021                         } else {
8022                                 tp->t_sndrexmitpack++;
8023                                 TCPSTAT_INC(tcps_sndrexmitpack);
8024                                 TCPSTAT_ADD(tcps_sndrexmitbyte, len);
8025                         }
8026 #ifdef NETFLIX_STATS
8027                         stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
8028                             len);
8029 #endif
8030                 } else {
8031                         TCPSTAT_INC(tcps_sndpack);
8032                         TCPSTAT_ADD(tcps_sndbyte, len);
8033 #ifdef NETFLIX_STATS
8034                         stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
8035                             len);
8036 #endif
8037                 }
8038                 /*
8039                  * If we're sending everything we've got, set PUSH. (This
8040                  * will keep happy those implementations which only give
8041                  * data to the user when a buffer fills or a PUSH comes in.)
8042                  */
8043                 if (sb_offset + len == sbused(sb) &&
8044                     sbused(sb) &&
8045                     !(flags & TH_SYN))
8046                         flags |= TH_PUSH;
8047
8048                 /*
8049                  * Are we doing hptsi, if so we must calculate the slot. We
8050                  * only do hptsi in ESTABLISHED and with no RESET being
8051                  * sent where we have data to send.
8052                  */
8053                 if (((tp->t_state == TCPS_ESTABLISHED) ||
8054                     (tp->t_state == TCPS_CLOSE_WAIT) ||
8055                     ((tp->t_state == TCPS_FIN_WAIT_1) &&
8056                     ((tp->t_flags & TF_SENTFIN) == 0) &&
8057                     ((flags & TH_FIN) == 0))) &&
8058                     ((flags & TH_RST) == 0) &&
8059                     (rack->rc_always_pace)) {
8060                         /*
8061                          * We use the most optimistic possible cwnd/srtt for
8062                          * sending calculations. This will make our
8063                          * calculation anticipate getting more through
8064                          * quicker then possible. But thats ok we don't want
8065                          * the peer to have a gap in data sending.
8066                          */
8067                         uint32_t srtt, cwnd, tr_perms = 0;
8068         
8069                         if (rack->r_ctl.rc_rack_min_rtt)
8070                                 srtt = rack->r_ctl.rc_rack_min_rtt;
8071                         else
8072                                 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT));
8073                         if (rack->r_ctl.rc_rack_largest_cwnd)
8074                                 cwnd = rack->r_ctl.rc_rack_largest_cwnd;
8075                         else
8076                                 cwnd = tp->snd_cwnd;
8077                         tr_perms = cwnd / srtt;
8078                         if (tr_perms == 0) {
8079                                 tr_perms = tp->t_maxseg;
8080                         }
8081                         tot_len_this_send += len;
8082                         /*
8083                          * Calculate how long this will take to drain, if
8084                          * the calculation comes out to zero, thats ok we
8085                          * will use send_a_lot to possibly spin around for
8086                          * more increasing tot_len_this_send to the point
8087                          * that its going to require a pace, or we hit the
8088                          * cwnd. Which in that case we are just waiting for
8089                          * a ACK.
8090                          */
8091                         slot = tot_len_this_send / tr_perms;
8092                         /* Now do we reduce the time so we don't run dry? */
8093                         if (slot && rack->rc_pace_reduce) {
8094                                 int32_t reduce;
8095
8096                                 reduce = (slot / rack->rc_pace_reduce);
8097                                 if (reduce < slot) {
8098                                         slot -= reduce;
8099                                 } else
8100                                         slot = 0;
8101                         }
8102                         if (rack->r_enforce_min_pace &&
8103                             (slot == 0) &&
8104                             (tot_len_this_send >= (rack->r_min_pace_seg_thresh * tp->t_maxseg))) {
8105                                 /* We are enforcing a minimum pace time of 1ms */
8106                                 slot = rack->r_enforce_min_pace;
8107                         }
8108                 }
8109                 SOCKBUF_UNLOCK(sb);
8110         } else {
8111                 SOCKBUF_UNLOCK(sb);
8112                 if (tp->t_flags & TF_ACKNOW)
8113                         TCPSTAT_INC(tcps_sndacks);
8114                 else if (flags & (TH_SYN | TH_FIN | TH_RST))
8115                         TCPSTAT_INC(tcps_sndctrl);
8116                 else if (SEQ_GT(tp->snd_up, tp->snd_una))
8117                         TCPSTAT_INC(tcps_sndurg);
8118                 else
8119                         TCPSTAT_INC(tcps_sndwinup);
8120
8121                 m = m_gethdr(M_NOWAIT, MT_DATA);
8122                 if (m == NULL) {
8123                         error = ENOBUFS;
8124                         sack_rxmit = 0;
8125                         goto out;
8126                 }
8127 #ifdef INET6
8128                 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
8129                     MHLEN >= hdrlen) {
8130                         M_ALIGN(m, hdrlen);
8131                 } else
8132 #endif
8133                         m->m_data += max_linkhdr;
8134                 m->m_len = hdrlen;
8135         }
8136         SOCKBUF_UNLOCK_ASSERT(sb);
8137         m->m_pkthdr.rcvif = (struct ifnet *)0;
8138 #ifdef MAC
8139         mac_inpcb_create_mbuf(inp, m);
8140 #endif
8141 #ifdef INET6
8142         if (isipv6) {
8143                 ip6 = mtod(m, struct ip6_hdr *);
8144 #ifdef NETFLIX_TCPOUDP
8145                 if (tp->t_port) {
8146                         udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr));
8147                         udp->uh_sport = htons(V_tcp_udp_tunneling_port);
8148                         udp->uh_dport = tp->t_port;
8149                         ulen = hdrlen + len - sizeof(struct ip6_hdr);
8150                         udp->uh_ulen = htons(ulen);
8151                         th = (struct tcphdr *)(udp + 1);
8152                 } else
8153 #endif
8154                         th = (struct tcphdr *)(ip6 + 1);
8155                 tcpip_fillheaders(inp, ip6, th);
8156         } else
8157 #endif                          /* INET6 */
8158         {
8159                 ip = mtod(m, struct ip *);
8160 #ifdef TCPDEBUG
8161                 ipov = (struct ipovly *)ip;
8162 #endif
8163 #ifdef NETFLIX_TCPOUDP
8164                 if (tp->t_port) {
8165                         udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip));
8166                         udp->uh_sport = htons(V_tcp_udp_tunneling_port);
8167                         udp->uh_dport = tp->t_port;
8168                         ulen = hdrlen + len - sizeof(struct ip);
8169                         udp->uh_ulen = htons(ulen);
8170                         th = (struct tcphdr *)(udp + 1);
8171                 } else
8172 #endif
8173                         th = (struct tcphdr *)(ip + 1);
8174                 tcpip_fillheaders(inp, ip, th);
8175         }
8176         /*
8177          * Fill in fields, remembering maximum advertised window for use in
8178          * delaying messages about window sizes. If resending a FIN, be sure
8179          * not to use a new sequence number.
8180          */
8181         if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
8182             tp->snd_nxt == tp->snd_max)
8183                 tp->snd_nxt--;
8184         /*
8185          * If we are starting a connection, send ECN setup SYN packet. If we
8186          * are on a retransmit, we may resend those bits a number of times
8187          * as per RFC 3168.
8188          */
8189         if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) {
8190                 if (tp->t_rxtshift >= 1) {
8191                         if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
8192                                 flags |= TH_ECE | TH_CWR;
8193                 } else
8194                         flags |= TH_ECE | TH_CWR;
8195         }
8196         if (tp->t_state == TCPS_ESTABLISHED &&
8197             (tp->t_flags & TF_ECN_PERMIT)) {
8198                 /*
8199                  * If the peer has ECN, mark data packets with ECN capable
8200                  * transmission (ECT). Ignore pure ack packets,
8201                  * retransmissions and window probes.
8202                  */
8203                 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
8204                     !((tp->t_flags & TF_FORCEDATA) && len == 1)) {
8205 #ifdef INET6
8206                         if (isipv6)
8207                                 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
8208                         else
8209 #endif
8210                                 ip->ip_tos |= IPTOS_ECN_ECT0;
8211                         TCPSTAT_INC(tcps_ecn_ect0);
8212                 }
8213                 /*
8214                  * Reply with proper ECN notifications.
8215                  */
8216                 if (tp->t_flags & TF_ECN_SND_CWR) {
8217                         flags |= TH_CWR;
8218                         tp->t_flags &= ~TF_ECN_SND_CWR;
8219                 }
8220                 if (tp->t_flags & TF_ECN_SND_ECE)
8221                         flags |= TH_ECE;
8222         }
8223         /*
8224          * If we are doing retransmissions, then snd_nxt will not reflect
8225          * the first unsent octet.  For ACK only packets, we do not want the
8226          * sequence number of the retransmitted packet, we want the sequence
8227          * number of the next unsent octet.  So, if there is no data (and no
8228          * SYN or FIN), use snd_max instead of snd_nxt when filling in
8229          * ti_seq.  But if we are in persist state, snd_max might reflect
8230          * one byte beyond the right edge of the window, so use snd_nxt in
8231          * that case, since we know we aren't doing a retransmission.
8232          * (retransmit and persist are mutually exclusive...)
8233          */
8234         if (sack_rxmit == 0) {
8235                 if (len || (flags & (TH_SYN | TH_FIN)) ||
8236                     rack->rc_in_persist) {
8237                         th->th_seq = htonl(tp->snd_nxt);
8238                         rack_seq = tp->snd_nxt;
8239                 } else if (flags & TH_RST) {
8240                         /*
8241                          * For a Reset send the last cum ack in sequence
8242                          * (this like any other choice may still generate a
8243                          * challenge ack, if a ack-update packet is in
8244                          * flight).
8245                          */
8246                         th->th_seq = htonl(tp->snd_una);
8247                         rack_seq = tp->snd_una;
8248                 } else {
8249                         th->th_seq = htonl(tp->snd_max);
8250                         rack_seq = tp->snd_max;
8251                 }
8252         } else {
8253                 th->th_seq = htonl(rsm->r_start);
8254                 rack_seq = rsm->r_start;
8255         }
8256         th->th_ack = htonl(tp->rcv_nxt);
8257         if (optlen) {
8258                 bcopy(opt, th + 1, optlen);
8259                 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
8260         }
8261         th->th_flags = flags;
8262         /*
8263          * Calculate receive window.  Don't shrink window, but avoid silly
8264          * window syndrome.
8265          */
8266         if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
8267             recwin < (long)tp->t_maxseg)
8268                 recwin = 0;
8269         if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
8270             recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
8271                 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
8272         if (recwin > (long)TCP_MAXWIN << tp->rcv_scale)
8273                 recwin = (long)TCP_MAXWIN << tp->rcv_scale;
8274
8275         /*
8276          * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
8277          * <SYN,ACK>) segment itself is never scaled.  The <SYN,ACK> case is
8278          * handled in syncache.
8279          */
8280         if (flags & TH_SYN)
8281                 th->th_win = htons((u_short)
8282                     (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
8283         else
8284                 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
8285         /*
8286          * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
8287          * window.  This may cause the remote transmitter to stall.  This
8288          * flag tells soreceive() to disable delayed acknowledgements when
8289          * draining the buffer.  This can occur if the receiver is
8290          * attempting to read more data than can be buffered prior to
8291          * transmitting on the connection.
8292          */
8293         if (th->th_win == 0) {
8294                 tp->t_sndzerowin++;
8295                 tp->t_flags |= TF_RXWIN0SENT;
8296         } else
8297                 tp->t_flags &= ~TF_RXWIN0SENT;
8298         if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
8299                 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
8300                 th->th_flags |= TH_URG;
8301         } else
8302                 /*
8303                  * If no urgent pointer to send, then we pull the urgent
8304                  * pointer to the left edge of the send window so that it
8305                  * doesn't drift into the send window on sequence number
8306                  * wraparound.
8307                  */
8308                 tp->snd_up = tp->snd_una;       /* drag it along */
8309
8310 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
8311         if (to.to_flags & TOF_SIGNATURE) {
8312                 /*
8313                  * Calculate MD5 signature and put it into the place
8314                  * determined before.
8315                  * NOTE: since TCP options buffer doesn't point into
8316                  * mbuf's data, calculate offset and use it.
8317                  */
8318                 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
8319                     (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
8320                         /*
8321                          * Do not send segment if the calculation of MD5
8322                          * digest has failed.
8323                          */
8324                         goto out;
8325                 }
8326         }
8327 #endif
8328
8329         /*
8330          * Put TCP length in extended header, and then checksum extended
8331          * header and data.
8332          */
8333         m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
8334 #ifdef INET6
8335         if (isipv6) {
8336                 /*
8337                  * ip6_plen is not need to be filled now, and will be filled
8338                  * in ip6_output.
8339                  */
8340                 if (tp->t_port) {
8341                         m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
8342                         m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
8343                         udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
8344                         th->th_sum = htons(0);
8345                 } else {
8346                         m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
8347                         m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
8348                         th->th_sum = in6_cksum_pseudo(ip6,
8349                             sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
8350                             0);
8351                 }
8352         }
8353 #endif
8354 #if defined(INET6) && defined(INET)
8355         else
8356 #endif
8357 #ifdef INET
8358         {
8359                 if (tp->t_port) {
8360                         m->m_pkthdr.csum_flags = CSUM_UDP;
8361                         m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
8362                         udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
8363                            ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
8364                         th->th_sum = htons(0);
8365                 } else {
8366                         m->m_pkthdr.csum_flags = CSUM_TCP;
8367                         m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
8368                         th->th_sum = in_pseudo(ip->ip_src.s_addr,
8369                             ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
8370                             IPPROTO_TCP + len + optlen));
8371                 }
8372                 /* IP version must be set here for ipv4/ipv6 checking later */
8373                 KASSERT(ip->ip_v == IPVERSION,
8374                     ("%s: IP version incorrect: %d", __func__, ip->ip_v));
8375         }
8376 #endif
8377
8378         /*
8379          * Enable TSO and specify the size of the segments. The TCP pseudo
8380          * header checksum is always provided. XXX: Fixme: This is currently
8381          * not the case for IPv6.
8382          */
8383         if (tso) {
8384                 KASSERT(len > tp->t_maxseg - optlen,
8385                     ("%s: len <= tso_segsz", __func__));
8386                 m->m_pkthdr.csum_flags |= CSUM_TSO;
8387                 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
8388         }
8389 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
8390         KASSERT(len + hdrlen + ipoptlen - ipsec_optlen == m_length(m, NULL),
8391             ("%s: mbuf chain shorter than expected: %d + %u + %u - %u != %u",
8392             __func__, len, hdrlen, ipoptlen, ipsec_optlen, m_length(m, NULL)));
8393 #else
8394         KASSERT(len + hdrlen + ipoptlen == m_length(m, NULL),
8395             ("%s: mbuf chain shorter than expected: %d + %u + %u != %u",
8396             __func__, len, hdrlen, ipoptlen, m_length(m, NULL)));
8397 #endif
8398
8399 #ifdef TCP_HHOOK
8400         /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
8401         hhook_run_tcp_est_out(tp, th, &to, len, tso);
8402 #endif
8403
8404 #ifdef TCPDEBUG
8405         /*
8406          * Trace.
8407          */
8408         if (so->so_options & SO_DEBUG) {
8409                 u_short save = 0;
8410
8411 #ifdef INET6
8412                 if (!isipv6)
8413 #endif
8414                 {
8415                         save = ipov->ih_len;
8416                         ipov->ih_len = htons(m->m_pkthdr.len    /* - hdrlen +
8417                               * (th->th_off << 2) */ );
8418                 }
8419                 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
8420 #ifdef INET6
8421                 if (!isipv6)
8422 #endif
8423                         ipov->ih_len = save;
8424         }
8425 #endif                          /* TCPDEBUG */
8426
8427         /* We're getting ready to send; log now. */
8428         if (tp->t_logstate != TCP_LOG_STATE_OFF) {
8429                 union tcp_log_stackspecific log;
8430
8431                 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
8432                 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts;
8433                 log.u_bbr.ininput = rack->rc_inp->inp_in_input;
8434                 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
8435                 if (rsm || sack_rxmit) {
8436                         log.u_bbr.flex8 = 1;
8437                 } else {
8438                         log.u_bbr.flex8 = 0;
8439                 }
8440                 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
8441                     len, &log, false, NULL, NULL, 0, NULL);
8442         } else
8443                 lgb = NULL;
8444
8445         /*
8446          * Fill in IP length and desired time to live and send to IP level.
8447          * There should be a better way to handle ttl and tos; we could keep
8448          * them in the template, but need a way to checksum without them.
8449          */
8450         /*
8451          * m->m_pkthdr.len should have been set before cksum calcuration,
8452          * because in6_cksum() need it.
8453          */
8454 #ifdef INET6
8455         if (isipv6) {
8456                 /*
8457                  * we separately set hoplimit for every segment, since the
8458                  * user might want to change the value via setsockopt. Also,
8459                  * desired default hop limit might be changed via Neighbor
8460                  * Discovery.
8461                  */
8462                 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
8463
8464                 /*
8465                  * Set the packet size here for the benefit of DTrace
8466                  * probes. ip6_output() will set it properly; it's supposed
8467                  * to include the option header lengths as well.
8468                  */
8469                 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
8470
8471                 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
8472                         tp->t_flags2 |= TF2_PLPMTU_PMTUD;
8473                 else
8474                         tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
8475
8476                 if (tp->t_state == TCPS_SYN_SENT)
8477                         TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
8478
8479                 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
8480                 /* TODO: IPv6 IP6TOS_ECT bit on */
8481                 error = ip6_output(m, tp->t_inpcb->in6p_outputopts,
8482                     &inp->inp_route6,
8483                     ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
8484                     NULL, NULL, inp);
8485
8486                 if (error == EMSGSIZE && inp->inp_route6.ro_rt != NULL)
8487                         mtu = inp->inp_route6.ro_rt->rt_mtu;
8488         }
8489 #endif                          /* INET6 */
8490 #if defined(INET) && defined(INET6)
8491         else
8492 #endif
8493 #ifdef INET
8494         {
8495                 ip->ip_len = htons(m->m_pkthdr.len);
8496 #ifdef INET6
8497                 if (inp->inp_vflag & INP_IPV6PROTO)
8498                         ip->ip_ttl = in6_selecthlim(inp, NULL);
8499 #endif                          /* INET6 */
8500                 /*
8501                  * If we do path MTU discovery, then we set DF on every
8502                  * packet. This might not be the best thing to do according
8503                  * to RFC3390 Section 2. However the tcp hostcache migitates
8504                  * the problem so it affects only the first tcp connection
8505                  * with a host.
8506                  *
8507                  * NB: Don't set DF on small MTU/MSS to have a safe
8508                  * fallback.
8509                  */
8510                 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
8511                         tp->t_flags2 |= TF2_PLPMTU_PMTUD;
8512                         if (tp->t_port == 0 || len < V_tcp_minmss) {
8513                                 ip->ip_off |= htons(IP_DF);
8514                         }
8515                 } else {
8516                         tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
8517                 }
8518
8519                 if (tp->t_state == TCPS_SYN_SENT)
8520                         TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
8521
8522                 TCP_PROBE5(send, NULL, tp, ip, tp, th);
8523
8524                 error = ip_output(m, tp->t_inpcb->inp_options, &inp->inp_route,
8525                     ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
8526                     inp);
8527                 if (error == EMSGSIZE && inp->inp_route.ro_rt != NULL)
8528                         mtu = inp->inp_route.ro_rt->rt_mtu;
8529         }
8530 #endif                          /* INET */
8531
8532 out:
8533         if (lgb) {
8534                 lgb->tlb_errno = error;
8535                 lgb = NULL;
8536         }
8537         /*
8538          * In transmit state, time the transmission and arrange for the
8539          * retransmit.  In persist state, just set snd_max.
8540          */
8541         if (error == 0) {
8542                 if (len == 0)
8543                         counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
8544                 else if (len == 1) {
8545                         counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
8546                 } else if (len > 1) {
8547                         int idx;
8548
8549                         idx = (len / tp->t_maxseg) + 3;
8550                         if (idx >= TCP_MSS_ACCT_ATIMER)
8551                                 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
8552                         else
8553                                 counter_u64_add(rack_out_size[idx], 1);
8554                 }
8555         }
8556         if (sub_from_prr && (error == 0)) {
8557                 rack->r_ctl.rc_prr_sndcnt -= len;
8558         }
8559         sub_from_prr = 0;
8560         rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts,
8561             pass, rsm);
8562         if ((tp->t_flags & TF_FORCEDATA) == 0 ||
8563             (rack->rc_in_persist == 0)) {
8564 #ifdef NETFLIX_STATS
8565                 tcp_seq startseq = tp->snd_nxt;
8566 #endif
8567
8568                 /*
8569                  * Advance snd_nxt over sequence space of this segment.
8570                  */
8571                 if (error)
8572                         /* We don't log or do anything with errors */
8573                         goto timer;
8574
8575                 if (flags & (TH_SYN | TH_FIN)) {
8576                         if (flags & TH_SYN)
8577                                 tp->snd_nxt++;
8578                         if (flags & TH_FIN) {
8579                                 tp->snd_nxt++;
8580                                 tp->t_flags |= TF_SENTFIN;
8581                         }
8582                 }
8583                 /* In the ENOBUFS case we do *not* update snd_max */
8584                 if (sack_rxmit)
8585                         goto timer;
8586
8587                 tp->snd_nxt += len;
8588                 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
8589                         if (tp->snd_una == tp->snd_max) {
8590                                 /*
8591                                  * Update the time we just added data since
8592                                  * none was outstanding.
8593                                  */
8594                                 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
8595                                 tp->t_acktime = ticks;
8596                         }
8597                         tp->snd_max = tp->snd_nxt;
8598 #ifdef NETFLIX_STATS
8599                         if (!(tp->t_flags & TF_GPUTINPROG) && len) {
8600                                 tp->t_flags |= TF_GPUTINPROG;
8601                                 tp->gput_seq = startseq;
8602                                 tp->gput_ack = startseq +
8603                                     ulmin(sbavail(sb) - sb_offset, sendwin);
8604                                 tp->gput_ts = tcp_ts_getticks();
8605                         }
8606 #endif
8607                 }
8608                 /*
8609                  * Set retransmit timer if not currently set, and not doing
8610                  * a pure ack or a keep-alive probe. Initial value for
8611                  * retransmit timer is smoothed round-trip time + 2 *
8612                  * round-trip time variance. Initialize shift counter which
8613                  * is used for backoff of retransmit time.
8614                  */
8615 timer:
8616                 if ((tp->snd_wnd == 0) &&
8617                     TCPS_HAVEESTABLISHED(tp->t_state)) {
8618                         /*
8619                          * If the persists timer was set above (right before
8620                          * the goto send), and still needs to be on. Lets
8621                          * make sure all is canceled. If the persist timer
8622                          * is not running, we want to get it up.
8623                          */
8624                         if (rack->rc_in_persist == 0) {
8625                                 rack_enter_persist(tp, rack, cts);
8626                         }
8627                 }
8628         } else {
8629                 /*
8630                  * Persist case, update snd_max but since we are in persist
8631                  * mode (no window) we do not update snd_nxt.
8632                  */
8633                 int32_t xlen = len;
8634
8635                 if (error)
8636                         goto nomore;
8637
8638                 if (flags & TH_SYN)
8639                         ++xlen;
8640                 if (flags & TH_FIN) {
8641                         ++xlen;
8642                         tp->t_flags |= TF_SENTFIN;
8643                 }
8644                 /* In the ENOBUFS case we do *not* update snd_max */
8645                 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) {
8646                         if (tp->snd_una == tp->snd_max) {
8647                                 /*
8648                                  * Update the time we just added data since
8649                                  * none was outstanding.
8650                                  */
8651                                 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
8652                                 tp->t_acktime = ticks;
8653                         }
8654                         tp->snd_max = tp->snd_nxt + len;
8655                 }
8656         }
8657 nomore:
8658         if (error) {
8659                 SOCKBUF_UNLOCK_ASSERT(sb);      /* Check gotos. */
8660                 /*
8661                  * Failures do not advance the seq counter above. For the
8662                  * case of ENOBUFS we will fall out and retry in 1ms with
8663                  * the hpts. Everything else will just have to retransmit
8664                  * with the timer.
8665                  *
8666                  * In any case, we do not want to loop around for another
8667                  * send without a good reason.
8668                  */
8669                 sendalot = 0;
8670                 switch (error) {
8671                 case EPERM:
8672                         tp->t_flags &= ~TF_FORCEDATA;
8673                         tp->t_softerror = error;
8674                         return (error);
8675                 case ENOBUFS:
8676                         if (slot == 0) {
8677                                 /*
8678                                  * Pace us right away to retry in a some
8679                                  * time
8680                                  */
8681                                 slot = 1 + rack->rc_enobuf;
8682                                 if (rack->rc_enobuf < 255)
8683                                         rack->rc_enobuf++;
8684                                 if (slot > (rack->rc_rack_rtt / 2)) {
8685                                         slot = rack->rc_rack_rtt / 2;
8686                                 }
8687                                 if (slot < 10)
8688                                         slot = 10;
8689                         }
8690                         counter_u64_add(rack_saw_enobuf, 1);
8691                         error = 0;
8692                         goto enobufs;
8693                 case EMSGSIZE:
8694                         /*
8695                          * For some reason the interface we used initially
8696                          * to send segments changed to another or lowered
8697                          * its MTU. If TSO was active we either got an
8698                          * interface without TSO capabilits or TSO was
8699                          * turned off. If we obtained mtu from ip_output()
8700                          * then update it and try again.
8701                          */
8702                         if (tso)
8703                                 tp->t_flags &= ~TF_TSO;
8704                         if (mtu != 0) {
8705                                 tcp_mss_update(tp, -1, mtu, NULL, NULL);
8706                                 goto again;
8707                         }
8708                         slot = 10;
8709                         rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1);
8710                         tp->t_flags &= ~TF_FORCEDATA;
8711                         return (error);
8712                 case ENETUNREACH:
8713                         counter_u64_add(rack_saw_enetunreach, 1);
8714                 case EHOSTDOWN:
8715                 case EHOSTUNREACH:
8716                 case ENETDOWN:
8717                         if (TCPS_HAVERCVDSYN(tp->t_state)) {
8718                                 tp->t_softerror = error;
8719                         }
8720                         /* FALLTHROUGH */
8721                 default:
8722                         slot = 10;
8723                         rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1);
8724                         tp->t_flags &= ~TF_FORCEDATA;
8725                         return (error);
8726                 }
8727         } else {
8728                 rack->rc_enobuf = 0;
8729         }
8730         TCPSTAT_INC(tcps_sndtotal);
8731
8732         /*
8733          * Data sent (as far as we can tell). If this advertises a larger
8734          * window than any other segment, then remember the size of the
8735          * advertised window. Any pending ACK has now been sent.
8736          */
8737         if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
8738                 tp->rcv_adv = tp->rcv_nxt + recwin;
8739         tp->last_ack_sent = tp->rcv_nxt;
8740         tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
8741 enobufs:
8742         rack->r_tlp_running = 0;
8743         if ((flags & TH_RST) || (would_have_fin == 1)) {
8744                 /*
8745                  * We don't send again after a RST. We also do *not* send
8746                  * again if we would have had a find, but now have
8747                  * outstanding data.
8748                  */
8749                 slot = 0;
8750                 sendalot = 0;
8751         }
8752         if (slot) {
8753                 /* set the rack tcb into the slot N */
8754                 counter_u64_add(rack_paced_segments, 1);
8755         } else if (sendalot) {
8756                 if (len)
8757                         counter_u64_add(rack_unpaced_segments, 1);
8758                 sack_rxmit = 0;
8759                 tp->t_flags &= ~TF_FORCEDATA;
8760                 goto again;
8761         } else if (len) {
8762                 counter_u64_add(rack_unpaced_segments, 1);
8763         }
8764         tp->t_flags &= ~TF_FORCEDATA;
8765         rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1);
8766         return (error);
8767 }
8768
8769 /*
8770  * rack_ctloutput() must drop the inpcb lock before performing copyin on
8771  * socket option arguments.  When it re-acquires the lock after the copy, it
8772  * has to revalidate that the connection is still valid for the socket
8773  * option.
8774  */
8775 static int
8776 rack_set_sockopt(struct socket *so, struct sockopt *sopt,
8777     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
8778 {
8779         int32_t error = 0, optval;
8780
8781         switch (sopt->sopt_name) {
8782         case TCP_RACK_PROP_RATE:
8783         case TCP_RACK_PROP:
8784         case TCP_RACK_TLP_REDUCE:
8785         case TCP_RACK_EARLY_RECOV:
8786         case TCP_RACK_PACE_ALWAYS:
8787         case TCP_DELACK:
8788         case TCP_RACK_PACE_REDUCE:
8789         case TCP_RACK_PACE_MAX_SEG:
8790         case TCP_RACK_PRR_SENDALOT:
8791         case TCP_RACK_MIN_TO:
8792         case TCP_RACK_EARLY_SEG:
8793         case TCP_RACK_REORD_THRESH:
8794         case TCP_RACK_REORD_FADE:
8795         case TCP_RACK_TLP_THRESH:
8796         case TCP_RACK_PKT_DELAY:
8797         case TCP_RACK_TLP_USE:
8798         case TCP_RACK_TLP_INC_VAR:
8799         case TCP_RACK_IDLE_REDUCE_HIGH:
8800         case TCP_RACK_MIN_PACE:
8801         case TCP_RACK_MIN_PACE_SEG:
8802         case TCP_BBR_RACK_RTT_USE:
8803         case TCP_DATA_AFTER_CLOSE:
8804                 break;
8805         default:
8806                 return (tcp_default_ctloutput(so, sopt, inp, tp));
8807                 break;
8808         }
8809         INP_WUNLOCK(inp);
8810         error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
8811         if (error)
8812                 return (error);
8813         INP_WLOCK(inp);
8814         if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
8815                 INP_WUNLOCK(inp);
8816                 return (ECONNRESET);
8817         }
8818         tp = intotcpcb(inp);
8819         rack = (struct tcp_rack *)tp->t_fb_ptr;
8820         switch (sopt->sopt_name) {
8821         case TCP_RACK_PROP_RATE:
8822                 if ((optval <= 0) || (optval >= 100)) {
8823                         error = EINVAL;
8824                         break;
8825                 }
8826                 RACK_OPTS_INC(tcp_rack_prop_rate);
8827                 rack->r_ctl.rc_prop_rate = optval;
8828                 break;
8829         case TCP_RACK_TLP_USE:
8830                 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
8831                         error = EINVAL;
8832                         break;
8833                 }
8834                 RACK_OPTS_INC(tcp_tlp_use);
8835                 rack->rack_tlp_threshold_use = optval;
8836                 break;
8837         case TCP_RACK_PROP:
8838                 /* RACK proportional rate reduction (bool) */
8839                 RACK_OPTS_INC(tcp_rack_prop);
8840                 rack->r_ctl.rc_prop_reduce = optval;
8841                 break;
8842         case TCP_RACK_TLP_REDUCE:
8843                 /* RACK TLP cwnd reduction (bool) */
8844                 RACK_OPTS_INC(tcp_rack_tlp_reduce);
8845                 rack->r_ctl.rc_tlp_cwnd_reduce = optval;
8846                 break;
8847         case TCP_RACK_EARLY_RECOV:
8848                 /* Should recovery happen early (bool) */
8849                 RACK_OPTS_INC(tcp_rack_early_recov);
8850                 rack->r_ctl.rc_early_recovery = optval;
8851                 break;
8852         case TCP_RACK_PACE_ALWAYS:
8853                 /* Use the always pace method (bool)  */
8854                 RACK_OPTS_INC(tcp_rack_pace_always);
8855                 if (optval > 0)
8856                         rack->rc_always_pace = 1;
8857                 else
8858                         rack->rc_always_pace = 0;
8859                 break;
8860         case TCP_RACK_PACE_REDUCE:
8861                 /* RACK Hptsi reduction factor (divisor) */
8862                 RACK_OPTS_INC(tcp_rack_pace_reduce);
8863                 if (optval)
8864                         /* Must be non-zero */
8865                         rack->rc_pace_reduce = optval;
8866                 else
8867                         error = EINVAL;
8868                 break;
8869         case TCP_RACK_PACE_MAX_SEG:
8870                 /* Max segments in a pace */
8871                 RACK_OPTS_INC(tcp_rack_max_seg);
8872                 rack->rc_pace_max_segs = optval;
8873                 break;
8874         case TCP_RACK_PRR_SENDALOT:
8875                 /* Allow PRR to send more than one seg */
8876                 RACK_OPTS_INC(tcp_rack_prr_sendalot);
8877                 rack->r_ctl.rc_prr_sendalot = optval;
8878                 break;
8879         case TCP_RACK_MIN_TO:
8880                 /* Minimum time between rack t-o's in ms */
8881                 RACK_OPTS_INC(tcp_rack_min_to);
8882                 rack->r_ctl.rc_min_to = optval;
8883                 break;
8884         case TCP_RACK_EARLY_SEG:
8885                 /* If early recovery max segments */
8886                 RACK_OPTS_INC(tcp_rack_early_seg);
8887                 rack->r_ctl.rc_early_recovery_segs = optval;
8888                 break;
8889         case TCP_RACK_REORD_THRESH:
8890                 /* RACK reorder threshold (shift amount) */
8891                 RACK_OPTS_INC(tcp_rack_reord_thresh);
8892                 if ((optval > 0) && (optval < 31))
8893                         rack->r_ctl.rc_reorder_shift = optval;
8894                 else
8895                         error = EINVAL;
8896                 break;
8897         case TCP_RACK_REORD_FADE:
8898                 /* Does reordering fade after ms time */
8899                 RACK_OPTS_INC(tcp_rack_reord_fade);
8900                 rack->r_ctl.rc_reorder_fade = optval;
8901                 break;
8902         case TCP_RACK_TLP_THRESH:
8903                 /* RACK TLP theshold i.e. srtt+(srtt/N) */
8904                 RACK_OPTS_INC(tcp_rack_tlp_thresh);
8905                 if (optval)
8906                         rack->r_ctl.rc_tlp_threshold = optval;
8907                 else
8908                         error = EINVAL;
8909                 break;
8910         case TCP_RACK_PKT_DELAY:
8911                 /* RACK added ms i.e. rack-rtt + reord + N */
8912                 RACK_OPTS_INC(tcp_rack_pkt_delay);
8913                 rack->r_ctl.rc_pkt_delay = optval;
8914                 break;
8915         case TCP_RACK_TLP_INC_VAR:
8916                 /* Does TLP include rtt variance in t-o */
8917                 RACK_OPTS_INC(tcp_rack_tlp_inc_var);
8918                 rack->r_ctl.rc_prr_inc_var = optval;
8919                 break;
8920         case TCP_RACK_IDLE_REDUCE_HIGH:
8921                 RACK_OPTS_INC(tcp_rack_idle_reduce_high);
8922                 if (optval)
8923                         rack->r_idle_reduce_largest = 1;
8924                 else
8925                         rack->r_idle_reduce_largest = 0;
8926                 break;
8927         case TCP_DELACK:
8928                 if (optval == 0)
8929                         tp->t_delayed_ack = 0;
8930                 else
8931                         tp->t_delayed_ack = 1;
8932                 if (tp->t_flags & TF_DELACK) {
8933                         tp->t_flags &= ~TF_DELACK;
8934                         tp->t_flags |= TF_ACKNOW;
8935                         rack_output(tp);
8936                 }
8937                 break;
8938         case TCP_RACK_MIN_PACE:
8939                 RACK_OPTS_INC(tcp_rack_min_pace);
8940                 if (optval > 3)
8941                         rack->r_enforce_min_pace = 3;
8942                 else
8943                         rack->r_enforce_min_pace = optval;
8944                 break;
8945         case TCP_RACK_MIN_PACE_SEG:
8946                 RACK_OPTS_INC(tcp_rack_min_pace_seg);
8947                 if (optval >= 16)
8948                         rack->r_min_pace_seg_thresh = 15;
8949                 else
8950                         rack->r_min_pace_seg_thresh = optval;
8951                 break;
8952         case TCP_BBR_RACK_RTT_USE:
8953                 if ((optval != USE_RTT_HIGH) &&
8954                     (optval != USE_RTT_LOW) &&
8955                     (optval != USE_RTT_AVG))
8956                         error = EINVAL;
8957                 else
8958                         rack->r_ctl.rc_rate_sample_method = optval;
8959                 break;
8960         case TCP_DATA_AFTER_CLOSE:
8961                 if (optval)
8962                         rack->rc_allow_data_af_clo = 1;
8963                 else
8964                         rack->rc_allow_data_af_clo = 0;
8965                 break;
8966         default:
8967                 return (tcp_default_ctloutput(so, sopt, inp, tp));
8968                 break;
8969         }
8970 #ifdef NETFLIX_STATS
8971         tcp_log_socket_option(tp, sopt->sopt_name, optval, error);
8972 #endif
8973         INP_WUNLOCK(inp);
8974         return (error);
8975 }
8976
8977 static int
8978 rack_get_sockopt(struct socket *so, struct sockopt *sopt,
8979     struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack)
8980 {
8981         int32_t error, optval;
8982
8983         /*
8984          * Because all our options are either boolean or an int, we can just
8985          * pull everything into optval and then unlock and copy. If we ever
8986          * add a option that is not a int, then this will have quite an
8987          * impact to this routine.
8988          */
8989         switch (sopt->sopt_name) {
8990         case TCP_RACK_PROP_RATE:
8991                 optval = rack->r_ctl.rc_prop_rate;
8992                 break;
8993         case TCP_RACK_PROP:
8994                 /* RACK proportional rate reduction (bool) */
8995                 optval = rack->r_ctl.rc_prop_reduce;
8996                 break;
8997         case TCP_RACK_TLP_REDUCE:
8998                 /* RACK TLP cwnd reduction (bool) */
8999                 optval = rack->r_ctl.rc_tlp_cwnd_reduce;
9000                 break;
9001         case TCP_RACK_EARLY_RECOV:
9002                 /* Should recovery happen early (bool) */
9003                 optval = rack->r_ctl.rc_early_recovery;
9004                 break;
9005         case TCP_RACK_PACE_REDUCE:
9006                 /* RACK Hptsi reduction factor (divisor) */
9007                 optval = rack->rc_pace_reduce;
9008                 break;
9009         case TCP_RACK_PACE_MAX_SEG:
9010                 /* Max segments in a pace */
9011                 optval = rack->rc_pace_max_segs;
9012                 break;
9013         case TCP_RACK_PACE_ALWAYS:
9014                 /* Use the always pace method */
9015                 optval = rack->rc_always_pace;
9016                 break;
9017         case TCP_RACK_PRR_SENDALOT:
9018                 /* Allow PRR to send more than one seg */
9019                 optval = rack->r_ctl.rc_prr_sendalot;
9020                 break;
9021         case TCP_RACK_MIN_TO:
9022                 /* Minimum time between rack t-o's in ms */
9023                 optval = rack->r_ctl.rc_min_to;
9024                 break;
9025         case TCP_RACK_EARLY_SEG:
9026                 /* If early recovery max segments */
9027                 optval = rack->r_ctl.rc_early_recovery_segs;
9028                 break;
9029         case TCP_RACK_REORD_THRESH:
9030                 /* RACK reorder threshold (shift amount) */
9031                 optval = rack->r_ctl.rc_reorder_shift;
9032                 break;
9033         case TCP_RACK_REORD_FADE:
9034                 /* Does reordering fade after ms time */
9035                 optval = rack->r_ctl.rc_reorder_fade;
9036                 break;
9037         case TCP_RACK_TLP_THRESH:
9038                 /* RACK TLP theshold i.e. srtt+(srtt/N) */
9039                 optval = rack->r_ctl.rc_tlp_threshold;
9040                 break;
9041         case TCP_RACK_PKT_DELAY:
9042                 /* RACK added ms i.e. rack-rtt + reord + N */
9043                 optval = rack->r_ctl.rc_pkt_delay;
9044                 break;
9045         case TCP_RACK_TLP_USE:
9046                 optval = rack->rack_tlp_threshold_use;
9047                 break;
9048         case TCP_RACK_TLP_INC_VAR:
9049                 /* Does TLP include rtt variance in t-o */
9050                 optval = rack->r_ctl.rc_prr_inc_var;
9051                 break;
9052         case TCP_RACK_IDLE_REDUCE_HIGH:
9053                 optval = rack->r_idle_reduce_largest;
9054                 break;
9055         case TCP_RACK_MIN_PACE:
9056                 optval = rack->r_enforce_min_pace;
9057                 break;
9058         case TCP_RACK_MIN_PACE_SEG:
9059                 optval = rack->r_min_pace_seg_thresh;
9060                 break;
9061         case TCP_BBR_RACK_RTT_USE:
9062                 optval = rack->r_ctl.rc_rate_sample_method;
9063                 break;
9064         case TCP_DELACK:
9065                 optval = tp->t_delayed_ack;
9066                 break;
9067         case TCP_DATA_AFTER_CLOSE:
9068                 optval = rack->rc_allow_data_af_clo;
9069                 break;
9070         default:
9071                 return (tcp_default_ctloutput(so, sopt, inp, tp));
9072                 break;
9073         }
9074         INP_WUNLOCK(inp);
9075         error = sooptcopyout(sopt, &optval, sizeof optval);
9076         return (error);
9077 }
9078
9079 static int
9080 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
9081 {
9082         int32_t error = EINVAL;
9083         struct tcp_rack *rack;
9084
9085         rack = (struct tcp_rack *)tp->t_fb_ptr;
9086         if (rack == NULL) {
9087                 /* Huh? */
9088                 goto out;
9089         }
9090         if (sopt->sopt_dir == SOPT_SET) {
9091                 return (rack_set_sockopt(so, sopt, inp, tp, rack));
9092         } else if (sopt->sopt_dir == SOPT_GET) {
9093                 return (rack_get_sockopt(so, sopt, inp, tp, rack));
9094         }
9095 out:
9096         INP_WUNLOCK(inp);
9097         return (error);
9098 }
9099
9100
9101 struct tcp_function_block __tcp_rack = {
9102         .tfb_tcp_block_name = __XSTRING(STACKNAME),
9103         .tfb_tcp_output = rack_output,
9104         .tfb_tcp_do_segment = rack_do_segment,
9105         .tfb_tcp_hpts_do_segment = rack_hpts_do_segment,
9106         .tfb_tcp_ctloutput = rack_ctloutput,
9107         .tfb_tcp_fb_init = rack_init,
9108         .tfb_tcp_fb_fini = rack_fini,
9109         .tfb_tcp_timer_stop_all = rack_stopall,
9110         .tfb_tcp_timer_activate = rack_timer_activate,
9111         .tfb_tcp_timer_active = rack_timer_active,
9112         .tfb_tcp_timer_stop = rack_timer_stop,
9113         .tfb_tcp_rexmit_tmr = rack_remxt_tmr,
9114         .tfb_tcp_handoff_ok = rack_handoff_ok
9115 };
9116
9117 static const char *rack_stack_names[] = {
9118         __XSTRING(STACKNAME),
9119 #ifdef STACKALIAS
9120         __XSTRING(STACKALIAS),
9121 #endif
9122 };
9123
9124 static int
9125 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
9126 {
9127         memset(mem, 0, size);
9128         return (0);
9129 }
9130
9131 static void
9132 rack_dtor(void *mem, int32_t size, void *arg)
9133 {
9134
9135 }
9136
9137 static bool rack_mod_inited = false;
9138
9139 static int
9140 tcp_addrack(module_t mod, int32_t type, void *data)
9141 {
9142         int32_t err = 0;
9143         int num_stacks;
9144
9145         switch (type) {
9146         case MOD_LOAD:
9147                 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
9148                     sizeof(struct rack_sendmap),
9149                     rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
9150
9151                 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
9152                     sizeof(struct tcp_rack),
9153                     rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
9154
9155                 sysctl_ctx_init(&rack_sysctl_ctx);
9156                 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
9157                     SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
9158                     OID_AUTO,
9159                     __XSTRING(STACKNAME),
9160                     CTLFLAG_RW, 0,
9161                     "");
9162                 if (rack_sysctl_root == NULL) {
9163                         printf("Failed to add sysctl node\n");
9164                         err = EFAULT;
9165                         goto free_uma;
9166                 }
9167                 rack_init_sysctls();
9168                 num_stacks = nitems(rack_stack_names);
9169                 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
9170                     rack_stack_names, &num_stacks);
9171                 if (err) {
9172                         printf("Failed to register %s stack name for "
9173                             "%s module\n", rack_stack_names[num_stacks],
9174                             __XSTRING(MODNAME));
9175                         sysctl_ctx_free(&rack_sysctl_ctx);
9176 free_uma:
9177                         uma_zdestroy(rack_zone);
9178                         uma_zdestroy(rack_pcb_zone);
9179                         rack_counter_destroy();
9180                         printf("Failed to register rack module -- err:%d\n", err);
9181                         return (err);
9182                 }
9183                 rack_mod_inited = true;
9184                 break;
9185         case MOD_QUIESCE:
9186                 err = deregister_tcp_functions(&__tcp_rack, true, false);
9187                 break;
9188         case MOD_UNLOAD:
9189                 err = deregister_tcp_functions(&__tcp_rack, false, true);
9190                 if (err == EBUSY)
9191                         break;
9192                 if (rack_mod_inited) {
9193                         uma_zdestroy(rack_zone);
9194                         uma_zdestroy(rack_pcb_zone);
9195                         sysctl_ctx_free(&rack_sysctl_ctx);
9196                         rack_counter_destroy();
9197                         rack_mod_inited = false;
9198                 }
9199                 err = 0;
9200                 break;
9201         default:
9202                 return (EOPNOTSUPP);
9203         }
9204         return (err);
9205 }
9206
9207 static moduledata_t tcp_rack = {
9208         .name = __XSTRING(MODNAME),
9209         .evhand = tcp_addrack,
9210         .priv = 0
9211 };
9212
9213 MODULE_VERSION(MODNAME, 1);
9214 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
9215 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);