2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_inet6.h"
39 #include "opt_tcpdebug.h"
41 /* For debugging we want counters and BB logging */
42 /* #define TCP_REASS_COUNTERS 1 */
43 /* #define TCP_REASS_LOGGING 1 */
45 #include <sys/param.h>
46 #include <sys/kernel.h>
47 #include <sys/eventhandler.h>
48 #include <sys/malloc.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/systm.h>
59 #include <net/if_var.h>
60 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_options.h>
70 #include <netinet/ip6.h>
71 #include <netinet6/in6_pcb.h>
72 #include <netinet6/ip6_var.h>
73 #include <netinet6/nd6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/tcp_fsm.h>
76 #include <netinet/tcp_seq.h>
77 #include <netinet/tcp_timer.h>
78 #include <netinet/tcp_var.h>
79 #ifdef TCP_REASS_LOGGING
80 #include <netinet/tcp_log_buf.h>
81 #include <netinet/tcp_hpts.h>
83 #include <netinet6/tcp6_var.h>
84 #include <netinet/tcpip.h>
86 #include <netinet/tcp_debug.h>
89 #define TCP_R_LOG_ADD 1
90 #define TCP_R_LOG_LIMIT_REACHED 2
91 #define TCP_R_LOG_APPEND 3
92 #define TCP_R_LOG_PREPEND 4
93 #define TCP_R_LOG_REPLACE 5
94 #define TCP_R_LOG_MERGE_INTO 6
95 #define TCP_R_LOG_NEW_ENTRY 7
96 #define TCP_R_LOG_READ 8
97 #define TCP_R_LOG_ZERO 9
98 #define TCP_R_LOG_DUMP 10
99 #define TCP_R_LOG_TRIM 11
101 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass,
102 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
103 "TCP Segment Reassembly Queue");
105 static SYSCTL_NODE(_net_inet_tcp_reass, OID_AUTO, stats,
106 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
107 "TCP Segment Reassembly stats");
109 static int tcp_reass_maxseg = 0;
110 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
111 &tcp_reass_maxseg, 0,
112 "Global maximum number of TCP Segments in Reassembly Queue");
114 static uma_zone_t tcp_reass_zone;
115 SYSCTL_UMA_CUR(_net_inet_tcp_reass, OID_AUTO, cursegments, 0,
117 "Global number of TCP Segments currently in Reassembly Queue");
119 static u_int tcp_reass_maxqueuelen = 100;
120 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, maxqueuelen, CTLFLAG_RWTUN,
121 &tcp_reass_maxqueuelen, 0,
122 "Maximum number of TCP Segments per Reassembly Queue");
124 static int tcp_new_limits = 0;
125 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, new_limit, CTLFLAG_RWTUN,
127 "Do we use the new limit method we are discussing?");
129 static u_int tcp_reass_queue_guard = 16;
130 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, queueguard, CTLFLAG_RWTUN,
131 &tcp_reass_queue_guard, 16,
132 "Number of TCP Segments in Reassembly Queue where we flip over to guard mode");
134 #ifdef TCP_REASS_COUNTERS
136 counter_u64_t reass_entry;
137 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, entry, CTLFLAG_RD,
138 &reass_entry, "A segment entered reassembly ");
140 counter_u64_t reass_path1;
141 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path1, CTLFLAG_RD,
142 &reass_path1, "Took path 1");
144 counter_u64_t reass_path2;
145 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path2, CTLFLAG_RD,
146 &reass_path2, "Took path 2");
148 counter_u64_t reass_path3;
149 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path3, CTLFLAG_RD,
150 &reass_path3, "Took path 3");
152 counter_u64_t reass_path4;
153 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path4, CTLFLAG_RD,
154 &reass_path4, "Took path 4");
156 counter_u64_t reass_path5;
157 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path5, CTLFLAG_RD,
158 &reass_path5, "Took path 5");
160 counter_u64_t reass_path6;
161 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path6, CTLFLAG_RD,
162 &reass_path6, "Took path 6");
164 counter_u64_t reass_path7;
165 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path7, CTLFLAG_RD,
166 &reass_path7, "Took path 7");
168 counter_u64_t reass_fullwalk;
169 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, fullwalk, CTLFLAG_RD,
170 &reass_fullwalk, "Took a full walk ");
172 counter_u64_t reass_nospace;
173 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, nospace, CTLFLAG_RD,
174 &reass_nospace, "Had no mbuf capacity ");
176 counter_u64_t merge_fwd;
177 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_fwd, CTLFLAG_RD,
178 &merge_fwd, "Ran merge fwd");
180 counter_u64_t merge_into;
181 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_into, CTLFLAG_RD,
182 &merge_into, "Ran merge into");
184 counter_u64_t tcp_zero_input;
185 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, zero_input, CTLFLAG_RD,
186 &tcp_zero_input, "The reassembly buffer saw a zero len segment etc");
190 /* Initialize TCP reassembly queue */
192 tcp_reass_zone_change(void *tag)
195 /* Set the zone limit and read back the effective value. */
196 tcp_reass_maxseg = nmbclusters / 16;
197 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
201 #ifdef TCP_REASS_LOGGING
204 tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p,
205 tcp_seq seq, int len, uint8_t action, int instance)
210 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
211 union tcp_log_stackspecific log;
213 memset(&log, 0, sizeof(log));
214 cts = tcp_get_usecs(&tv);
215 log.u_bbr.flex1 = seq;
216 log.u_bbr.cur_del_rate = (uint64_t)q;
217 log.u_bbr.delRate = (uint64_t)p;
219 log.u_bbr.flex2 = q->tqe_start;
220 log.u_bbr.flex3 = q->tqe_len;
221 log.u_bbr.flex4 = q->tqe_mbuf_cnt;
222 log.u_bbr.hptsi_gain = q->tqe_flags;
225 log.u_bbr.flex5 = p->tqe_start;
226 log.u_bbr.pkts_out = p->tqe_len;
227 log.u_bbr.epoch = p->tqe_mbuf_cnt;
228 log.u_bbr.cwnd_gain = p->tqe_flags;
230 log.u_bbr.flex6 = tp->t_segqmbuflen;
231 log.u_bbr.flex7 = instance;
232 log.u_bbr.flex8 = action;
233 log.u_bbr.timeStamp = cts;
234 TCP_LOG_EVENTP(tp, NULL,
235 &tp->t_inpcb->inp_socket->so_rcv,
236 &tp->t_inpcb->inp_socket->so_snd,
238 len, &log, false, &tv);
243 tcp_reass_log_dump(struct tcpcb *tp)
247 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
248 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
249 tcp_log_reassm(tp, q, NULL, q->tqe_start, q->tqe_len, TCP_R_LOG_DUMP, 0);
255 tcp_reass_log_new_in(struct tcpcb *tp, tcp_seq seq, int len, struct mbuf *m,
256 int logval, struct tseg_qent *q)
267 tcp_log_reassm(tp, q, NULL, seq, len, logval, cnt);
273 tcp_reass_global_init(void)
276 tcp_reass_maxseg = nmbclusters / 16;
277 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
279 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
280 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
281 /* Set the zone limit and read back the effective value. */
282 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
284 #ifdef TCP_REASS_COUNTERS
285 reass_path1 = counter_u64_alloc(M_WAITOK);
286 reass_path2 = counter_u64_alloc(M_WAITOK);
287 reass_path3 = counter_u64_alloc(M_WAITOK);
288 reass_path4 = counter_u64_alloc(M_WAITOK);
289 reass_path5 = counter_u64_alloc(M_WAITOK);
290 reass_path6 = counter_u64_alloc(M_WAITOK);
291 reass_path7 = counter_u64_alloc(M_WAITOK);
292 reass_fullwalk = counter_u64_alloc(M_WAITOK);
293 reass_nospace = counter_u64_alloc(M_WAITOK);
294 reass_entry = counter_u64_alloc(M_WAITOK);
295 merge_fwd = counter_u64_alloc(M_WAITOK);
296 merge_into = counter_u64_alloc(M_WAITOK);
297 tcp_zero_input = counter_u64_alloc(M_WAITOK);
299 EVENTHANDLER_REGISTER(nmbclusters_change,
300 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
305 tcp_reass_flush(struct tcpcb *tp)
307 struct tseg_qent *qe;
309 INP_WLOCK_ASSERT(tp->t_inpcb);
311 while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) {
312 TAILQ_REMOVE(&tp->t_segq, qe, tqe_q);
314 uma_zfree(tcp_reass_zone, qe);
317 tp->t_segqmbuflen = 0;
318 KASSERT((tp->t_segqlen == 0),
319 ("TCP reass queue %p segment count is %d instead of 0 after flush.",
324 tcp_reass_append(struct tcpcb *tp, struct tseg_qent *last,
325 struct mbuf *m, struct tcphdr *th, int tlen,
326 struct mbuf *mlast, int lenofoh)
329 #ifdef TCP_REASS_LOGGING
330 tcp_log_reassm(tp, last, NULL, th->th_seq, tlen, TCP_R_LOG_APPEND, 0);
332 last->tqe_len += tlen;
333 last->tqe_m->m_pkthdr.len += tlen;
334 /* Preserve the FIN bit if its there */
335 last->tqe_flags |= (th->th_flags & TH_FIN);
336 last->tqe_last->m_next = m;
337 last->tqe_last = mlast;
338 last->tqe_mbuf_cnt += lenofoh;
340 TCPSTAT_INC(tcps_rcvoopack);
341 TCPSTAT_ADD(tcps_rcvoobyte, tlen);
342 #ifdef TCP_REASS_LOGGING
343 tcp_reass_log_new_in(tp, last->tqe_start, lenofoh, last->tqe_m,
350 tcp_reass_prepend(struct tcpcb *tp, struct tseg_qent *first, struct mbuf *m, struct tcphdr *th,
351 int tlen, struct mbuf *mlast, int lenofoh)
355 #ifdef TCP_REASS_LOGGING
356 tcp_log_reassm(tp, first, NULL, th->th_seq, tlen, TCP_R_LOG_PREPEND, 0);
358 if (SEQ_GT((th->th_seq + tlen), first->tqe_start)) {
359 /* The new data overlaps into the old */
360 i = (th->th_seq + tlen) - first->tqe_start;
361 #ifdef TCP_REASS_LOGGING
362 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 1);
364 m_adj(first->tqe_m, i);
366 first->tqe_start += i;
368 /* Ok now setup our chain to point to the old first */
369 mlast->m_next = first->tqe_m;
371 first->tqe_len += tlen;
372 first->tqe_start = th->th_seq;
373 first->tqe_m->m_pkthdr.len = first->tqe_len;
374 first->tqe_mbuf_cnt += lenofoh;
376 TCPSTAT_INC(tcps_rcvoopack);
377 TCPSTAT_ADD(tcps_rcvoobyte, tlen);
378 #ifdef TCP_REASS_LOGGING
379 tcp_reass_log_new_in(tp, first->tqe_start, lenofoh, first->tqe_m,
386 tcp_reass_replace(struct tcpcb *tp, struct tseg_qent *q, struct mbuf *m,
387 tcp_seq seq, int len, struct mbuf *mlast, int mbufoh, uint8_t flags)
390 * Free the data in q, and replace
391 * it with the new segment.
395 #ifdef TCP_REASS_LOGGING
396 tcp_log_reassm(tp, q, NULL, seq, len, TCP_R_LOG_REPLACE, 0);
399 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
400 ("Tp:%p seg queue goes negative", tp));
401 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
402 q->tqe_mbuf_cnt = mbufoh;
406 if (len > q->tqe_len)
407 len_dif = len - q->tqe_len;
411 TCPSTAT_INC(tcps_rcvoopack);
412 TCPSTAT_ADD(tcps_rcvoobyte, len_dif);
414 q->tqe_flags = (flags & TH_FIN);
415 q->tqe_m->m_pkthdr.len = q->tqe_len;
416 tp->t_segqmbuflen += mbufoh;
421 tcp_reass_merge_into(struct tcpcb *tp, struct tseg_qent *ent,
425 * Merge q into ent and free q from the list.
427 #ifdef TCP_REASS_LOGGING
428 tcp_log_reassm(tp, q, ent, 0, 0, TCP_R_LOG_MERGE_INTO, 0);
430 #ifdef TCP_REASS_COUNTERS
431 counter_u64_add(merge_into, 1);
433 ent->tqe_last->m_next = q->tqe_m;
434 ent->tqe_last = q->tqe_last;
435 ent->tqe_len += q->tqe_len;
436 ent->tqe_mbuf_cnt += q->tqe_mbuf_cnt;
437 ent->tqe_m->m_pkthdr.len += q->tqe_len;
438 ent->tqe_flags |= (q->tqe_flags & TH_FIN);
439 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
440 uma_zfree(tcp_reass_zone, q);
446 tcp_reass_merge_forward(struct tcpcb *tp, struct tseg_qent *ent)
448 struct tseg_qent *q, *qtmp;
452 * Given an entry merge forward anyplace
453 * that ent overlaps forward.
456 max = ent->tqe_start + ent->tqe_len;
457 q = TAILQ_NEXT(ent, tqe_q);
462 TAILQ_FOREACH_FROM_SAFE(q, &tp->t_segq, tqe_q, qtmp) {
463 if (SEQ_GT(q->tqe_start, max)) {
467 /* We have some or all that are overlapping */
468 if (SEQ_GEQ(max, (q->tqe_start + q->tqe_len))) {
469 /* It consumes it all */
470 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
472 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
473 uma_zfree(tcp_reass_zone, q);
478 * Trim the q entry to dovetail to this one
479 * and then merge q into ent updating max
482 i = max - q->tqe_start;
483 #ifdef TCP_REASS_LOGGING
484 tcp_log_reassm(tp, q, NULL, 0, i, TCP_R_LOG_TRIM, 2);
489 tcp_reass_merge_into(tp, ent, q);
490 max = ent->tqe_start + ent->tqe_len;
492 #ifdef TCP_REASS_COUNTERS
493 counter_u64_add(merge_fwd, 1);
498 tcp_reass_overhead_of_chain(struct mbuf *m, struct mbuf **mlast)
502 if (m->m_flags & M_EXT)
503 len += m->m_ext.ext_size;
504 while (m->m_next != NULL) {
507 if (m->m_flags & M_EXT)
508 len += m->m_ext.ext_size;
515 * NOTE!!! the new tcp-reassembly code *must not* use
516 * m_adj() with a negative index. That alters the chain
517 * of mbufs (by possibly chopping trailing mbufs). At
518 * the front of tcp_reass we count the mbuf overhead
519 * and setup the tail pointer. If we use m_adj(m, -5)
520 * we could corrupt the tail pointer. Currently the
521 * code only uses m_adj(m, postive-num). If this
522 * changes appropriate changes to update mlast would
526 tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
527 int *tlenp, struct mbuf *m)
529 struct tseg_qent *q, *last, *first;
530 struct tseg_qent *p = NULL;
531 struct tseg_qent *nq = NULL;
532 struct tseg_qent *te = NULL;
533 struct mbuf *mlast = NULL;
535 struct socket *so = tp->t_inpcb->inp_socket;
537 int flags, i, lenofoh;
539 INP_WLOCK_ASSERT(tp->t_inpcb);
541 * XXX: tcp_reass() is rather inefficient with its data structures
542 * and should be rewritten (see NetBSD for optimizations).
545 KASSERT(th == NULL || (seq_start != NULL && tlenp != NULL),
546 ("tcp_reass called with illegal parameter combination "
547 "(tp=%p, th=%p, seq_start=%p, tlenp=%p, m=%p)",
548 tp, th, seq_start, tlenp, m));
550 * Call with th==NULL after become established to
551 * force pre-ESTABLISHED data up to user socket.
555 KASSERT(SEQ_GEQ(th->th_seq, tp->rcv_nxt),
556 ("Attempt to add old entry to reassembly queue (th=%p, tp=%p)",
558 #ifdef TCP_REASS_LOGGING
559 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_ADD, NULL);
561 #ifdef TCP_REASS_COUNTERS
562 counter_u64_add(reass_entry, 1);
565 * Check for zero length data.
567 if ((*tlenp == 0) && ((th->th_flags & TH_FIN) == 0)) {
569 * A zero length segment does no
570 * one any good. We could check
571 * the rcv_nxt <-> rcv_wnd but thats
572 * already done for us by the caller.
574 #ifdef TCP_REASS_COUNTERS
575 counter_u64_add(tcp_zero_input, 1);
578 #ifdef TCP_REASS_LOGGING
579 tcp_reass_log_dump(tp);
586 lenofoh = tcp_reass_overhead_of_chain(m, &mlast);
587 sb = &tp->t_inpcb->inp_socket->so_rcv;
588 if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
589 (sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) {
591 TCPSTAT_INC(tcps_rcvreassfull);
592 #ifdef TCP_REASS_COUNTERS
593 counter_u64_add(reass_nospace, 1);
595 #ifdef TCP_REASS_LOGGING
596 tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0);
598 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
599 log(LOG_DEBUG, "%s; %s: mbuf count limit reached, "
600 "segment dropped\n", s, __func__);
605 #ifdef TCP_REASS_LOGGING
606 tcp_reass_log_dump(tp);
611 * First lets deal with two common cases, the
612 * segment appends to the back of our collected
613 * segments. Or the segment is the next in line.
615 last = TAILQ_LAST_FAST(&tp->t_segq, tseg_qent, tqe_q);
617 if ((th->th_flags & TH_FIN) &&
618 SEQ_LT((th->th_seq + *tlenp), (last->tqe_start + last->tqe_len))) {
620 * Someone is trying to game us, dump
627 if ((SEQ_GEQ(th->th_seq, last->tqe_start)) &&
628 (SEQ_GEQ((last->tqe_start + last->tqe_len), th->th_seq))) {
629 /* Common case, trailing segment is added */
633 * reassembly buffer |---| |---| |---|
636 #ifdef TCP_REASS_COUNTERS
637 counter_u64_add(reass_path1, 1);
639 if (SEQ_GT((last->tqe_start + last->tqe_len), th->th_seq)) {
640 i = (last->tqe_start + last->tqe_len) - th->th_seq;
642 #ifdef TCP_REASS_LOGGING
643 tcp_log_reassm(tp, last, NULL, 0, i, TCP_R_LOG_TRIM, 3);
649 /* Complete overlap */
650 TCPSTAT_INC(tcps_rcvduppack);
651 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
653 *tlenp = last->tqe_len;
654 *seq_start = last->tqe_start;
658 if (last->tqe_flags & TH_FIN) {
660 * We have data after the FIN on the last?
666 tcp_reass_append(tp, last, m, th, *tlenp, mlast, lenofoh);
667 tp->t_segqmbuflen += lenofoh;
668 *seq_start = last->tqe_start;
669 *tlenp = last->tqe_len;
671 } else if (SEQ_GT(th->th_seq, (last->tqe_start + last->tqe_len))) {
673 * Second common case, we missed
674 * another one and have something more
680 * reassembly buffer |---| |---| |---|
683 if (last->tqe_flags & TH_FIN) {
685 * We have data after the FIN on the last?
691 #ifdef TCP_REASS_COUNTERS
692 counter_u64_add(reass_path2, 1);
698 /* First segment (it's NULL). */
701 first = TAILQ_FIRST(&tp->t_segq);
702 if (SEQ_LT(th->th_seq, first->tqe_start) &&
703 SEQ_GEQ((th->th_seq + *tlenp),first->tqe_start) &&
704 SEQ_LT((th->th_seq + *tlenp), (first->tqe_start + first->tqe_len))) {
706 * The head of the queue is prepended by this and
707 * it may be the one I want most.
712 * rea: |---| |---| |---|
714 * Note the case we do not deal with here is:
715 * rea= |---| |---| |---|
717 * Due to the fact that it could be
718 * new |--------------------|
719 * And we might need to merge forward.
722 struct mbuf *firstmbuf;
725 #ifdef TCP_REASS_COUNTERS
726 counter_u64_add(reass_path3, 1);
728 if (SEQ_LT(th->th_seq, tp->rcv_nxt)) {
730 * The resend was even before
731 * what we have. We need to trim it.
732 * Note TSNH (it should be trimmed
733 * before the call to tcp_reass()).
736 panic("th->th_seq:%u rcv_nxt:%u tp:%p not pre-trimmed",
737 th->th_seq, tp->rcv_nxt, tp);
739 i = tp->rcv_nxt - th->th_seq;
740 #ifdef TCP_REASS_LOGGING
741 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 4);
749 firstmbuf = first->tqe_m;
751 tcp_reass_prepend(tp, first, m, th, *tlenp, mlast, lenofoh);
753 if (firstmbuf == first->tqe_m) {
754 panic("First stayed same m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
755 m, firstmbuf, first->tqe_m, tp, first);
756 } else if (first->tqe_m != m) {
757 panic("First did not change to m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
758 m, firstmbuf, first->tqe_m, tp, first);
761 tp->t_segqmbuflen += lenofoh;
762 *seq_start = first->tqe_start;
763 *tlenp = first->tqe_len;
765 } else if (SEQ_LT((th->th_seq + *tlenp), first->tqe_start)) {
766 /* New segment is before our earliest segment. */
777 * Find a segment which begins after this one does.
779 #ifdef TCP_REASS_COUNTERS
780 counter_u64_add(reass_fullwalk, 1);
782 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
783 if (SEQ_GT(q->tqe_start, th->th_seq))
786 p = TAILQ_PREV(q, tsegqe_head, tqe_q);
788 * Now is this fit just in-between only?
792 * res= |--| |--| |--|
795 if (SEQ_LT((th->th_seq + *tlenp), q->tqe_start) &&
796 ((p == NULL) || (SEQ_GT(th->th_seq, (p->tqe_start + p->tqe_len))))) {
801 * If we reach here we have some (possibly all) overlap
803 * res= |--| |--| |--|
805 * or new= |-----------------|
808 * or new= |-----------|
811 (SEQ_LEQ(th->th_seq, (p->tqe_start + p->tqe_len)))) {
812 /* conversion to int (in i) handles seq wraparound */
814 #ifdef TCP_REASS_COUNTERS
815 counter_u64_add(reass_path4, 1);
817 i = p->tqe_start + p->tqe_len - th->th_seq;
823 * reassembly buffer |---|
826 TCPSTAT_INC(tcps_rcvduppack);
827 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
829 *seq_start = p->tqe_start;
832 * Try to present any queued data
833 * at the left window edge to the user.
834 * This is needed after the 3-WHS
835 * completes. Note this probably
836 * will not work and we will return.
844 * reassembly buffer |---|
845 * new segment |-----|
847 #ifdef TCP_REASS_COUNTERS
848 counter_u64_add(reass_path5, 1);
850 #ifdef TCP_REASS_LOGGING
851 tcp_log_reassm(tp, p, NULL, 0, i, TCP_R_LOG_TRIM, 5);
858 if (th->th_seq == (p->tqe_start + p->tqe_len)) {
860 * If dovetails in with this one
866 * reassembly buffer |--| |---|
868 * (note: it was trimmed above if it overlapped)
870 tcp_reass_append(tp, p, m, th, *tlenp, mlast, lenofoh);
871 tp->t_segqmbuflen += lenofoh;
874 panic("Impossible cut th_seq:%u p->seq:%u(%d) p:%p tp:%p",
875 th->th_seq, p->tqe_start, p->tqe_len,
885 * The new data runs over the
886 * top of previously sack'd data (in q).
887 * It may be partially overlapping, or
888 * it may overlap the entire segment.
890 #ifdef TCP_REASS_COUNTERS
891 counter_u64_add(reass_path6, 1);
893 if (SEQ_GEQ((th->th_seq + *tlenp), (q->tqe_start + q->tqe_len))) {
894 /* It consumes it all */
898 * reassembly buffer |--| |---|
899 * new segment |----------|
901 #ifdef TCP_REASS_COUNTERS
902 counter_u64_add(reass_path7, 1);
904 tcp_reass_replace(tp, q, m, th->th_seq, *tlenp, mlast, lenofoh, th->th_flags);
907 * We just need to prepend the data
908 * to this. It does not overrun
914 * reassembly buffer |--| |---|
915 * new segment |----------|
917 tcp_reass_prepend(tp, q, m, th, *tlenp, mlast, lenofoh);
918 tp->t_segqmbuflen += lenofoh;
921 /* Now does it go further than that? */
922 tcp_reass_merge_forward(tp, q);
923 *seq_start = q->tqe_start;
928 * When we reach here we can't combine it
929 * with any existing segment.
931 * Limit the number of segments that can be queued to reduce the
932 * potential for mbuf exhaustion. For best performance, we want to be
933 * able to queue a full window's worth of segments. The size of the
934 * socket receive buffer determines our advertised window and grows
935 * automatically when socket buffer autotuning is enabled. Use it as the
936 * basis for our queue limit.
938 * However, allow the user to specify a ceiling for the number of
939 * segments in each queue.
941 * Always let the missing segment through which caused this queue.
942 * NB: Access to the socket buffer is left intentionally unlocked as we
943 * can tolerate stale information here.
945 * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat
946 * should work but causes packets to be dropped when they shouldn't.
947 * Investigate why and re-evaluate the below limit after the behaviour
951 if (th->th_seq == tp->rcv_nxt && TCPS_HAVEESTABLISHED(tp->t_state)) {
952 tp->rcv_nxt += *tlenp;
953 flags = th->th_flags & TH_FIN;
954 TCPSTAT_INC(tcps_rcvoopack);
955 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
956 SOCKBUF_LOCK(&so->so_rcv);
957 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
960 sbappendstream_locked(&so->so_rcv, m, 0);
962 SOCKBUF_UNLOCK(&so->so_rcv);
963 tp->t_flags |= TF_WAKESOR;
966 if (tcp_new_limits) {
967 if ((tp->t_segqlen > tcp_reass_queue_guard) &&
970 * This is really a lie, we are not full but
971 * are getting a segment that is above
972 * guard threshold. If it is and its below
973 * a mbuf size (256) we drop it if it
974 * can't fill in some place.
976 TCPSTAT_INC(tcps_rcvreassfull);
978 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
979 log(LOG_DEBUG, "%s; %s: queue limit reached, "
980 "segment dropped\n", s, __func__);
984 #ifdef TCP_REASS_LOGGING
985 tcp_reass_log_dump(tp);
990 if (tp->t_segqlen >= min((so->so_rcv.sb_hiwat / tp->t_maxseg) + 1,
991 tcp_reass_maxqueuelen)) {
992 TCPSTAT_INC(tcps_rcvreassfull);
994 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
995 log(LOG_DEBUG, "%s; %s: queue limit reached, "
996 "segment dropped\n", s, __func__);
1000 #ifdef TCP_REASS_LOGGING
1001 tcp_reass_log_dump(tp);
1007 * Allocate a new queue entry. If we can't, or hit the zone limit
1008 * just drop the pkt.
1010 te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
1012 TCPSTAT_INC(tcps_rcvmemdrop);
1015 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL,
1017 log(LOG_DEBUG, "%s; %s: global zone limit "
1018 "reached, segment dropped\n", s, __func__);
1025 TCPSTAT_INC(tcps_rcvoopack);
1026 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
1027 /* Insert the new segment queue entry into place. */
1029 te->tqe_flags = th->th_flags;
1030 te->tqe_len = *tlenp;
1031 te->tqe_start = th->th_seq;
1032 te->tqe_last = mlast;
1033 te->tqe_mbuf_cnt = lenofoh;
1034 tp->t_segqmbuflen += te->tqe_mbuf_cnt;
1036 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q);
1038 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q);
1040 #ifdef TCP_REASS_LOGGING
1041 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_NEW_ENTRY, te);
1045 * Present data to user, advancing rcv_nxt through
1046 * completed sequence space.
1048 if (!TCPS_HAVEESTABLISHED(tp->t_state))
1050 q = TAILQ_FIRST(&tp->t_segq);
1051 KASSERT(q == NULL || SEQ_GEQ(q->tqe_start, tp->rcv_nxt),
1052 ("Reassembly queue for %p has stale entry at head", tp));
1053 if (!q || q->tqe_start != tp->rcv_nxt) {
1054 #ifdef TCP_REASS_LOGGING
1055 tcp_reass_log_dump(tp);
1059 SOCKBUF_LOCK(&so->so_rcv);
1061 tp->rcv_nxt += q->tqe_len;
1062 flags = q->tqe_flags & TH_FIN;
1063 nq = TAILQ_NEXT(q, tqe_q);
1064 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
1065 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1068 #ifdef TCP_REASS_LOGGING
1069 tcp_reass_log_new_in(tp, q->tqe_start, q->tqe_len, q->tqe_m, TCP_R_LOG_READ, q);
1071 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 1);
1073 tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 1);
1076 sbappendstream_locked(&so->so_rcv, q->tqe_m, 0);
1078 #ifdef TCP_REASS_LOGGING
1080 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 2);
1082 tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 2);
1085 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
1086 ("tp:%p seg queue goes negative", tp));
1087 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
1088 uma_zfree(tcp_reass_zone, q);
1091 } while (q && q->tqe_start == tp->rcv_nxt);
1092 if (TAILQ_EMPTY(&tp->t_segq) &&
1093 (tp->t_segqmbuflen != 0)) {
1095 panic("tp:%p segq:%p len:%d queue empty",
1096 tp, &tp->t_segq, tp->t_segqmbuflen);
1098 #ifdef TCP_REASS_LOGGING
1100 tcp_log_reassm(tp, NULL, NULL, th->th_seq, *tlenp, TCP_R_LOG_ZERO, 0);
1102 tcp_log_reassm(tp, NULL, NULL, 0, 0, TCP_R_LOG_ZERO, 0);
1105 tp->t_segqmbuflen = 0;
1108 #ifdef TCP_REASS_LOGGING
1109 tcp_reass_log_dump(tp);
1111 SOCKBUF_UNLOCK(&so->so_rcv);
1112 tp->t_flags |= TF_WAKESOR;