6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Author: Randall Stewart <rrs@netflix.com>
30 * This work is based on the ACM Queue paper
31 * BBR - Congestion Based Congestion Control
32 * and also numerous discussions with Neal, Yuchung and Van.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 #include "opt_tcpdebug.h"
42 #include "opt_ratelimit.h"
43 #include "opt_kern_tls.h"
44 #include <sys/param.h>
46 #include <sys/module.h>
47 #include <sys/kernel.h>
49 #include <sys/hhook.h>
51 #include <sys/malloc.h>
54 #include <sys/qmath.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
60 #include <sys/sysctl.h>
61 #include <sys/systm.h>
64 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
66 #include <sys/refcount.h>
67 #include <sys/queue.h>
69 #include <sys/kthread.h>
71 #include <sys/mutex.h>
72 #include <sys/tim_filter.h>
75 #include <sys/kern_prefetch.h>
77 #include <net/route.h>
79 #include <net/ethernet.h>
82 #define TCPSTATES /* for logging */
84 #include <netinet/in.h>
85 #include <netinet/in_kdtrace.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/ip.h>
88 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
89 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
90 #include <netinet/ip_var.h>
91 #include <netinet/ip6.h>
92 #include <netinet6/in6_pcb.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet/tcp.h>
95 #include <netinet/tcp_fsm.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/tcp_timer.h>
98 #include <netinet/tcp_var.h>
99 #include <netinet/tcpip.h>
100 #include <netinet/tcp_hpts.h>
101 #include <netinet/cc/cc.h>
102 #include <netinet/tcp_log_buf.h>
104 #include <netinet/tcp_debug.h>
105 #endif /* TCPDEBUG */
107 #include <netinet/tcp_offload.h>
110 #include <netinet6/tcp6_var.h>
112 #include <netinet/tcp_fastopen.h>
114 #include <netipsec/ipsec_support.h>
116 #include <net/if_var.h>
118 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
119 #include <netipsec/ipsec.h>
120 #include <netipsec/ipsec6.h>
123 #include <netinet/udp.h>
124 #include <netinet/udp_var.h>
125 #include <machine/in_cksum.h>
128 #include <security/mac/mac_framework.h>
130 #include "rack_bbr_common.h"
133 * Common TCP Functions - These are shared by borth
138 ctf_get_opt_tls_size(struct socket *so, uint32_t rwnd)
140 struct ktls_session *tls;
144 tls = so->so_snd.sb_tls_info;
145 len = tls->params.max_frame_len; /* max tls payload */
146 len += tls->params.tls_hlen; /* tls header len */
147 len += tls->params.tls_tlen; /* tls trailer len */
148 if ((len * 4) > rwnd) {
150 * Stroke this will suck counter and what
151 * else should we do Drew? From the
152 * TCP perspective I am not sure
153 * what should be done...
155 if (tls->params.max_frame_len > 4096) {
156 tls->params.max_frame_len -= 4096;
157 if (tls->params.max_frame_len < 4096)
158 tls->params.max_frame_len = 4096;
168 * The function ctf_process_inbound_raw() is used by
169 * transport developers to do the steps needed to
170 * support MBUF Queuing i.e. the flags in
173 * - INP_SUPPORTS_MBUFQ
174 * - INP_MBUF_QUEUE_READY
175 * - INP_DONT_SACK_QUEUE
177 * These flags help control how LRO will deliver
178 * packets to the transport. You first set in inp_flags2
179 * the INP_SUPPORTS_MBUFQ to tell the LRO code that you
180 * will gladly take a queue of packets instead of a compressed
181 * single packet. You also set in your t_fb pointer the
182 * tfb_do_queued_segments to point to ctf_process_inbound_raw.
184 * This then gets you lists of inbound ACK's/Data instead
185 * of a condensed compressed ACK/DATA packet. Why would you
186 * want that? This will get you access to all the arrival
187 * times of at least LRO and possibly at the Hardware (if
188 * the interface card supports that) of the actual ACK/DATA.
189 * In some transport designs this is important since knowing
190 * the actual time we got the packet is useful information.
192 * Now there are some interesting Caveats that the transport
193 * designer needs to take into account when using this feature.
195 * 1) It is used with HPTS and pacing, when the pacing timer
196 * for output calls it will first call the input.
197 * 2) When you set INP_MBUF_QUEUE_READY this tells LRO
198 * queue normal packets, I am busy pacing out data and
199 * will process the queued packets before my tfb_tcp_output
200 * call from pacing. If a non-normal packet arrives, (e.g. sack)
201 * you will be awoken immediately.
202 * 3) Finally you can add the INP_DONT_SACK_QUEUE to not even
203 * be awoken if a SACK has arrived. You would do this when
204 * you were not only running a pacing for output timer
205 * but a Rack timer as well i.e. you know you are in recovery
206 * and are in the process (via the timers) of dealing with
209 * Now a critical thing you must be aware of here is that the
210 * use of the flags has a far greater scope then just your
211 * typical LRO. Why? Well thats because in the normal compressed
212 * LRO case at the end of a driver interupt all packets are going
213 * to get presented to the transport no matter if there is one
214 * or 100. With the MBUF_QUEUE model, this is not true. You will
215 * only be awoken to process the queue of packets when:
216 * a) The flags discussed above allow it.
218 * b) You exceed a ack or data limit (by default the
219 * ack limit is infinity (64k acks) and the data
220 * limit is 64k of new TCP data)
222 * c) The push bit has been set by the peer
226 ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int has_pkt)
229 * We are passed a raw change of mbuf packets
230 * that arrived in LRO. They are linked via
231 * the m_nextpkt link in the pkt-headers.
233 * We process each one by:
234 * a) saving off the next
235 * b) stripping off the ether-header
236 * c) formulating the arguments for
237 * the tfb_tcp_hpts_do_segment
238 * d) calling each mbuf to tfb_tcp_hpts_do_segment
239 * after adjusting the time to match the arrival time.
240 * Note that the LRO code assures no IP options are present.
242 * The symantics for calling tfb_tcp_hpts_do_segment are the
244 * 1) It returns 0 if all went well and you (the caller) need
245 * to release the lock.
246 * 2) If nxt_pkt is set, then the function will surpress calls
247 * to tfb_tcp_output() since you are promising to call again
248 * with another packet.
249 * 3) If it returns 1, then you must free all the packets being
250 * shipped in, the tcb has been destroyed (or about to be destroyed).
253 struct ether_header *eh;
256 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
259 struct ip *ip = NULL; /* Keep compiler happy. */
263 int32_t retval, nxt_pkt, tlen, off;
265 uint16_t drop_hdrlen;
266 uint8_t iptos, no_vn=0, bpf_req=0;
270 if (m && m->m_pkthdr.rcvif)
271 ifp = m->m_pkthdr.rcvif;
275 bpf_req = bpf_peers_present(ifp->if_bpf);
278 * We probably should not work around
279 * but kassert, since lro alwasy sets rcvif.
284 CURVNET_SET(ifp->if_vnet);
287 m_save = m->m_nextpkt;
289 /* Now lets get the ether header */
290 eh = mtod(m, struct ether_header *);
291 etype = ntohs(eh->ether_type);
292 /* Let the BPF see the packet */
294 ETHER_BPF_MTAP(ifp, m);
295 m_adj(m, sizeof(*eh));
296 /* Trim off the ethernet header */
301 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) {
302 m = m_pullup(m, sizeof(*ip6) + sizeof(*th));
304 TCPSTAT_INC(tcps_rcvshort);
309 ip6 = (struct ip6_hdr *)(eh + 1);
310 th = (struct tcphdr *)(ip6 + 1);
311 tlen = ntohs(ip6->ip6_plen);
312 drop_hdrlen = sizeof(*ip6);
313 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) {
314 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
315 th->th_sum = m->m_pkthdr.csum_data;
317 th->th_sum = in6_cksum_pseudo(ip6, tlen,
318 IPPROTO_TCP, m->m_pkthdr.csum_data);
319 th->th_sum ^= 0xffff;
321 th->th_sum = in6_cksum(m, IPPROTO_TCP, drop_hdrlen, tlen);
323 TCPSTAT_INC(tcps_rcvbadsum);
328 * Be proactive about unspecified IPv6 address in source.
329 * As we use all-zero to indicate unbounded/unconnected pcb,
330 * unspecified IPv6 address can be used to confuse us.
332 * Note that packets with unspecified IPv6 destination is
333 * already dropped in ip6_input.
335 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
340 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
347 if (m->m_len < sizeof (struct tcpiphdr)) {
348 if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
350 TCPSTAT_INC(tcps_rcvshort);
355 ip = (struct ip *)(eh + 1);
356 th = (struct tcphdr *)(ip + 1);
357 drop_hdrlen = sizeof(*ip);
359 tlen = ntohs(ip->ip_len) - sizeof(struct ip);
360 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
361 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
362 th->th_sum = m->m_pkthdr.csum_data;
364 th->th_sum = in_pseudo(ip->ip_src.s_addr,
366 htonl(m->m_pkthdr.csum_data + tlen +
368 th->th_sum ^= 0xffff;
371 struct ipovly *ipov = (struct ipovly *)ip;
373 * Checksum extended TCP header and data.
375 len = drop_hdrlen + tlen;
376 bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
377 ipov->ih_len = htons(tlen);
378 th->th_sum = in_cksum(m, len);
379 /* Reset length for SDT probes. */
380 ip->ip_len = htons(len);
383 /* Re-initialization for later version check */
384 ip->ip_v = IPVERSION;
385 ip->ip_hl = sizeof(*ip) >> 2;
388 TCPSTAT_INC(tcps_rcvbadsum);
397 * Convert TCP protocol specific fields to host format.
399 tcp_fields_to_host(th);
401 off = th->th_off << 2;
402 if (off < sizeof (struct tcphdr) || off > tlen) {
403 TCPSTAT_INC(tcps_rcvbadoff);
410 * Now lets setup the timeval to be when we should
411 * have been called (if we can).
413 m->m_pkthdr.lro_nsegs = 1;
414 if (m->m_flags & M_TSTMP_LRO) {
415 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000;
416 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000;
418 /* Should not be should we kassert instead? */
421 /* Now what about next packet? */
422 if (m_save || has_pkt)
426 retval = (*tp->t_fb->tfb_do_segment_nounlock)(m, th, so, tp, drop_hdrlen, tlen,
427 iptos, nxt_pkt, &tv);
429 /* We lost the lock and tcb probably */
432 m_save = m->m_nextpkt;
450 ctf_do_queued_segments(struct socket *so, struct tcpcb *tp, int have_pkt)
454 /* First lets see if we have old packets */
458 tp->t_tail_pkt = NULL;
459 if (ctf_process_inbound_raw(tp, so, m, have_pkt)) {
460 /* We lost the tcpcb (maybe a RST came in)? */
468 ctf_outstanding(struct tcpcb *tp)
470 return(tp->snd_max - tp->snd_una);
474 ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked)
476 if (rc_sacked <= ctf_outstanding(tp))
477 return(ctf_outstanding(tp) - rc_sacked);
481 panic("tp:%p rc_sacked:%d > out:%d",
482 tp, rc_sacked, ctf_outstanding(tp));
489 ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
490 int32_t rstreason, int32_t tlen)
493 tcp_dropwithreset(m, th, tp, tlen, rstreason);
494 INP_WUNLOCK(tp->t_inpcb);
496 tcp_dropwithreset(m, th, NULL, tlen, rstreason);
500 * ctf_drop_checks returns 1 for you should not proceed. It places
501 * in ret_val what should be returned 1/0 by the caller. The 1 indicates
502 * that the TCB is unlocked and probably dropped. The 0 indicates the
503 * TCB is still valid and locked.
506 ctf_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val)
514 todrop = tp->rcv_nxt - th->th_seq;
516 if (thflags & TH_SYN) {
526 * Following if statement from Stevens, vol. 2, p. 960.
529 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
531 * Any valid FIN must be to the left of the window.
532 * At this point the FIN must be a duplicate or out
533 * of sequence; drop it.
537 * Send an ACK to resynchronize and drop any data.
538 * But keep on processing for RST or ACK.
540 tp->t_flags |= TF_ACKNOW;
542 TCPSTAT_INC(tcps_rcvduppack);
543 TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
545 TCPSTAT_INC(tcps_rcvpartduppack);
546 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
549 * DSACK - add SACK block for dropped range
551 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) {
552 tcp_update_sack_list(tp, th->th_seq,
553 th->th_seq + todrop);
555 * ACK now, as the next in-sequence segment
556 * will clear the DSACK block again
558 tp->t_flags |= TF_ACKNOW;
560 *drop_hdrlen += todrop; /* drop from the top afterwards */
561 th->th_seq += todrop;
563 if (th->th_urp > todrop)
564 th->th_urp -= todrop;
571 * If segment ends after window, drop trailing data (and PUSH and
572 * FIN); if nothing left, just ACK.
574 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
576 TCPSTAT_INC(tcps_rcvpackafterwin);
577 if (todrop >= tlen) {
578 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
580 * If window is closed can only take segments at
581 * window edge, and have to drop data and PUSH from
582 * incoming segments. Continue processing, but
583 * remember to ack. Otherwise, drop segment and
586 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
587 tp->t_flags |= TF_ACKNOW;
588 TCPSTAT_INC(tcps_rcvwinprobe);
590 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
594 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
597 thflags &= ~(TH_PUSH | TH_FIN);
605 * The value in ret_val informs the caller
606 * if we dropped the tcb (and lock) or not.
607 * 1 = we dropped it, 0 = the TCB is still locked
611 ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val)
614 * Generate an ACK dropping incoming segment if it occupies sequence
615 * space, where the ACK reflects our state.
617 * We can now skip the test for the RST flag since all paths to this
618 * code happen after packets containing RST have been dropped.
620 * In the SYN-RECEIVED state, don't send an ACK unless the segment
621 * we received passes the SYN-RECEIVED ACK test. If it fails send a
622 * RST. This breaks the loop in the "LAND" DoS attack, and also
623 * prevents an ACK storm between two listening ports that have been
624 * sent forged SYN segments, each with the source address of the
627 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
628 (SEQ_GT(tp->snd_una, th->th_ack) ||
629 SEQ_GT(th->th_ack, tp->snd_max))) {
631 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
635 tp->t_flags |= TF_ACKNOW;
641 ctf_do_drop(struct mbuf *m, struct tcpcb *tp)
645 * Drop space held by incoming segment and return.
648 INP_WUNLOCK(tp->t_inpcb);
654 ctf_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp)
657 * RFC5961 Section 3.2
659 * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in
660 * window, we send challenge ACK.
662 * Note: to take into account delayed ACKs, we should test against
663 * last_ack_sent instead of rcv_nxt. Note 2: we handle special case
664 * of closed window, not covered by the RFC.
668 if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) &&
669 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
670 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) {
672 KASSERT(tp->t_state != TCPS_SYN_SENT,
673 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p",
676 if (V_tcp_insecure_rst ||
677 (tp->last_ack_sent == th->th_seq) ||
678 (tp->rcv_nxt == th->th_seq) ||
679 ((tp->last_ack_sent - 1) == th->th_seq)) {
680 TCPSTAT_INC(tcps_drops);
681 /* Drop the connection. */
682 switch (tp->t_state) {
683 case TCPS_SYN_RECEIVED:
684 so->so_error = ECONNREFUSED;
686 case TCPS_ESTABLISHED:
687 case TCPS_FIN_WAIT_1:
688 case TCPS_FIN_WAIT_2:
689 case TCPS_CLOSE_WAIT:
692 so->so_error = ECONNRESET;
694 tcp_state_change(tp, TCPS_CLOSED);
702 TCPSTAT_INC(tcps_badrst);
703 /* Send challenge ACK. */
704 tcp_respond(tp, mtod(m, void *), th, m,
705 tp->rcv_nxt, tp->snd_nxt, TH_ACK);
706 tp->last_ack_sent = tp->rcv_nxt;
715 * The value in ret_val informs the caller
716 * if we dropped the tcb (and lock) or not.
717 * 1 = we dropped it, 0 = the TCB is still locked
721 ctf_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ret_val)
726 TCPSTAT_INC(tcps_badsyn);
727 if (V_tcp_insecure_syn &&
728 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
729 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
730 tp = tcp_drop(tp, ECONNRESET);
734 /* Send challenge ACK. */
735 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
736 tp->snd_nxt, TH_ACK);
737 tp->last_ack_sent = tp->rcv_nxt;
740 ctf_do_drop(m, NULL);
745 * bbr_ts_check returns 1 for you should not proceed, the state
746 * machine should return. It places in ret_val what should
747 * be returned 1/0 by the caller (hpts_do_segment). The 1 indicates
748 * that the TCB is unlocked and probably dropped. The 0 indicates the
749 * TCB is still valid and locked.
752 ctf_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
753 int32_t tlen, int32_t thflags, int32_t * ret_val)
756 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) {
758 * Invalidate ts_recent. If this segment updates ts_recent,
759 * the age will be reset later and ts_recent will get a
760 * valid value. If it does not, setting ts_recent to zero
761 * will at least satisfy the requirement that zero be placed
762 * in the timestamp echo reply when ts_recent isn't valid.
763 * The age isn't reset until we get a valid ts_recent
764 * because we don't want out-of-order segments to be dropped
765 * when ts_recent is old.
769 TCPSTAT_INC(tcps_rcvduppack);
770 TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
771 TCPSTAT_INC(tcps_pawsdrop);
774 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
776 ctf_do_drop(m, NULL);
784 ctf_calc_rwin(struct socket *so, struct tcpcb *tp)
789 * Calculate amount of space in receive window, and then do TCP
790 * input processing. Receive window is amount of space in rcv queue,
791 * but not less than advertised window.
793 win = sbspace(&so->so_rcv);
796 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
800 ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
801 int32_t rstreason, int32_t tlen)
805 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT);
807 tcp_dropwithreset(m, th, tp, tlen, rstreason);
808 INP_WUNLOCK(tp->t_inpcb);
812 ctf_fixed_maxseg(struct tcpcb *tp)
816 if (tp->t_flags & TF_NOOPT)
817 return (tp->t_maxseg);
820 * Here we have a simplified code from tcp_addoptions(),
821 * without a proper loop, and having most of paddings hardcoded.
822 * We only consider fixed options that we would send every
823 * time I.e. SACK is not considered.
826 #define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4)
827 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
828 if (tp->t_flags & TF_RCVD_TSTMP)
829 optlen = TCPOLEN_TSTAMP_APPA;
832 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
833 if (tp->t_flags & TF_SIGNATURE)
834 optlen += PAD(TCPOLEN_SIGNATURE);
837 if (tp->t_flags & TF_REQ_TSTMP)
838 optlen = TCPOLEN_TSTAMP_APPA;
840 optlen = PAD(TCPOLEN_MAXSEG);
841 if (tp->t_flags & TF_REQ_SCALE)
842 optlen += PAD(TCPOLEN_WINDOW);
843 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
844 if (tp->t_flags & TF_SIGNATURE)
845 optlen += PAD(TCPOLEN_SIGNATURE);
847 if (tp->t_flags & TF_SACK_PERMIT)
848 optlen += PAD(TCPOLEN_SACK_PERMITTED);
851 optlen = min(optlen, TCP_MAXOLEN);
852 return (tp->t_maxseg - optlen);
856 ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_blocks)
858 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
859 union tcp_log_stackspecific log;
862 memset(&log, 0, sizeof(log));
863 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
864 log.u_bbr.flex8 = num_sack_blks;
865 if (num_sack_blks > 0) {
866 log.u_bbr.flex1 = sack_blocks[0].start;
867 log.u_bbr.flex2 = sack_blocks[0].end;
869 if (num_sack_blks > 1) {
870 log.u_bbr.flex3 = sack_blocks[1].start;
871 log.u_bbr.flex4 = sack_blocks[1].end;
873 if (num_sack_blks > 2) {
874 log.u_bbr.flex5 = sack_blocks[2].start;
875 log.u_bbr.flex6 = sack_blocks[2].end;
877 if (num_sack_blks > 3) {
878 log.u_bbr.applimited = sack_blocks[3].start;
879 log.u_bbr.pkts_out = sack_blocks[3].end;
881 TCP_LOG_EVENTP(tp, NULL,
882 &tp->t_inpcb->inp_socket->so_rcv,
883 &tp->t_inpcb->inp_socket->so_snd,
884 TCP_SACK_FILTER_RES, 0,
885 0, &log, false, &tv);
890 ctf_decay_count(uint32_t count, uint32_t decay)
893 * Given a count, decay it by a set percentage. The
894 * percentage is in thousands i.e. 100% = 1000,
897 uint64_t perc_count, decay_per;
898 uint32_t decayed_count;
900 /* We don't raise it */
905 perc_count *= decay_per;
908 * So now perc_count holds the
911 decayed_count = count - (uint32_t)perc_count;
912 return(decayed_count);