2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_inet6.h"
39 #include "opt_tcpdebug.h"
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/eventhandler.h>
44 #include <sys/malloc.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sysctl.h>
49 #include <sys/syslog.h>
50 #include <sys/systm.h>
55 #include <net/if_var.h>
56 #include <net/route.h>
59 #include <netinet/in.h>
60 #include <netinet/in_pcb.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip_var.h>
65 #include <netinet/ip_options.h>
66 #include <netinet/ip6.h>
67 #include <netinet6/in6_pcb.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet6/nd6.h>
70 #include <netinet/tcp.h>
71 #include <netinet/tcp_fsm.h>
72 #include <netinet/tcp_seq.h>
73 #include <netinet/tcp_timer.h>
74 #include <netinet/tcp_var.h>
75 #include <netinet/tcp_log_buf.h>
76 #include <netinet/tcp_hpts.h>
77 #include <netinet6/tcp6_var.h>
78 #include <netinet/tcpip.h>
80 #include <netinet/tcp_debug.h>
83 #define TCP_R_LOG_ADD 1
84 #define TCP_R_LOG_LIMIT_REACHED 2
85 #define TCP_R_LOG_APPEND 3
86 #define TCP_R_LOG_PREPEND 4
87 #define TCP_R_LOG_REPLACE 5
88 #define TCP_R_LOG_MERGE_INTO 6
89 #define TCP_R_LOG_NEW_ENTRY 7
90 #define TCP_R_LOG_READ 8
91 #define TCP_R_LOG_ZERO 9
92 #define TCP_R_LOG_DUMP 10
93 #define TCP_R_LOG_TRIM 11
95 /* For debugging we want counters and BB logging */
96 /* #define TCP_REASS_COUNTERS 1 */
97 /* #define TCP_REASS_LOGGING 1 */
99 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
100 "TCP Segment Reassembly Queue");
102 static SYSCTL_NODE(_net_inet_tcp_reass, OID_AUTO, stats, CTLFLAG_RW, 0,
103 "TCP Segment Reassembly stats");
106 static int tcp_reass_maxseg = 0;
107 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
108 &tcp_reass_maxseg, 0,
109 "Global maximum number of TCP Segments in Reassembly Queue");
111 static uma_zone_t tcp_reass_zone;
112 SYSCTL_UMA_CUR(_net_inet_tcp_reass, OID_AUTO, cursegments, 0,
114 "Global number of TCP Segments currently in Reassembly Queue");
116 static u_int tcp_reass_maxqueuelen = 100;
117 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, maxqueuelen, CTLFLAG_RWTUN,
118 &tcp_reass_maxqueuelen, 0,
119 "Maximum number of TCP Segments per Reassembly Queue");
121 static int tcp_new_limits = 0;
122 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, new_limit, CTLFLAG_RWTUN,
124 "Do we use the new limit method we are discussing?");
126 static u_int tcp_reass_queue_guard = 16;
127 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, queueguard, CTLFLAG_RWTUN,
128 &tcp_reass_queue_guard, 16,
129 "Number of TCP Segments in Reassembly Queue where we flip over to guard mode");
131 #ifdef TCP_REASS_COUNTERS
133 counter_u64_t reass_entry;
134 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, entry, CTLFLAG_RD,
135 &reass_entry, "A segment entered reassembly ");
137 counter_u64_t reass_path1;
138 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path1, CTLFLAG_RD,
139 &reass_path1, "Took path 1");
141 counter_u64_t reass_path2;
142 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path2, CTLFLAG_RD,
143 &reass_path2, "Took path 2");
145 counter_u64_t reass_path3;
146 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path3, CTLFLAG_RD,
147 &reass_path3, "Took path 3");
149 counter_u64_t reass_path4;
150 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path4, CTLFLAG_RD,
151 &reass_path4, "Took path 4");
153 counter_u64_t reass_path5;
154 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path5, CTLFLAG_RD,
155 &reass_path5, "Took path 5");
157 counter_u64_t reass_path6;
158 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path6, CTLFLAG_RD,
159 &reass_path6, "Took path 6");
161 counter_u64_t reass_path7;
162 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path7, CTLFLAG_RD,
163 &reass_path7, "Took path 7");
165 counter_u64_t reass_fullwalk;
166 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, fullwalk, CTLFLAG_RD,
167 &reass_fullwalk, "Took a full walk ");
169 counter_u64_t reass_nospace;
170 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, nospace, CTLFLAG_RD,
171 &reass_nospace, "Had no mbuf capacity ");
173 counter_u64_t merge_fwd;
174 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_fwd, CTLFLAG_RD,
175 &merge_fwd, "Ran merge fwd");
177 counter_u64_t merge_into;
178 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_into, CTLFLAG_RD,
179 &merge_into, "Ran merge into");
181 counter_u64_t tcp_zero_input;
182 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, zero_input, CTLFLAG_RD,
183 &tcp_zero_input, "The reassembly buffer saw a zero len segment etc");
187 /* Initialize TCP reassembly queue */
189 tcp_reass_zone_change(void *tag)
192 /* Set the zone limit and read back the effective value. */
193 tcp_reass_maxseg = nmbclusters / 16;
194 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
198 #ifdef TCP_REASS_LOGGING
201 tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p,
202 tcp_seq seq, int len, uint8_t action, int instance)
207 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
208 union tcp_log_stackspecific log;
210 memset(&log, 0, sizeof(log));
211 cts = tcp_get_usecs(&tv);
212 log.u_bbr.flex1 = seq;
213 log.u_bbr.cur_del_rate = (uint64_t)q;
214 log.u_bbr.delRate = (uint64_t)p;
216 log.u_bbr.flex2 = q->tqe_start;
217 log.u_bbr.flex3 = q->tqe_len;
218 log.u_bbr.flex4 = q->tqe_mbuf_cnt;
219 log.u_bbr.hptsi_gain = q->tqe_flags;
222 log.u_bbr.flex5 = p->tqe_start;
223 log.u_bbr.pkts_out = p->tqe_len;
224 log.u_bbr.epoch = p->tqe_mbuf_cnt;
225 log.u_bbr.cwnd_gain = p->tqe_flags;
227 log.u_bbr.flex6 = tp->t_segqmbuflen;
228 log.u_bbr.flex7 = instance;
229 log.u_bbr.flex8 = action;
230 log.u_bbr.timeStamp = cts;
231 TCP_LOG_EVENTP(tp, NULL,
232 &tp->t_inpcb->inp_socket->so_rcv,
233 &tp->t_inpcb->inp_socket->so_snd,
235 len, &log, false, &tv);
240 tcp_reass_log_dump(struct tcpcb *tp)
244 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
245 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
246 tcp_log_reassm(tp, q, NULL, q->tqe_start, q->tqe_len, TCP_R_LOG_DUMP, 0);
252 tcp_reass_log_new_in(struct tcpcb *tp, tcp_seq seq, int len, struct mbuf *m,
253 int logval, struct tseg_qent *q)
264 tcp_log_reassm(tp, q, NULL, seq, len, logval, cnt);
270 tcp_reass_global_init(void)
273 tcp_reass_maxseg = nmbclusters / 16;
274 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
276 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
277 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
278 /* Set the zone limit and read back the effective value. */
279 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
281 #ifdef TCP_REASS_COUNTERS
282 reass_path1 = counter_u64_alloc(M_WAITOK);
283 reass_path2 = counter_u64_alloc(M_WAITOK);
284 reass_path3 = counter_u64_alloc(M_WAITOK);
285 reass_path4 = counter_u64_alloc(M_WAITOK);
286 reass_path5 = counter_u64_alloc(M_WAITOK);
287 reass_path6 = counter_u64_alloc(M_WAITOK);
288 reass_path7 = counter_u64_alloc(M_WAITOK);
289 reass_fullwalk = counter_u64_alloc(M_WAITOK);
290 reass_nospace = counter_u64_alloc(M_WAITOK);
291 reass_entry = counter_u64_alloc(M_WAITOK);
292 merge_fwd = counter_u64_alloc(M_WAITOK);
293 merge_into = counter_u64_alloc(M_WAITOK);
294 tcp_zero_input = counter_u64_alloc(M_WAITOK);
296 EVENTHANDLER_REGISTER(nmbclusters_change,
297 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
302 tcp_reass_flush(struct tcpcb *tp)
304 struct tseg_qent *qe;
306 INP_WLOCK_ASSERT(tp->t_inpcb);
308 while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) {
309 TAILQ_REMOVE(&tp->t_segq, qe, tqe_q);
311 uma_zfree(tcp_reass_zone, qe);
314 tp->t_segqmbuflen = 0;
315 KASSERT((tp->t_segqlen == 0),
316 ("TCP reass queue %p segment count is %d instead of 0 after flush.",
321 tcp_reass_append(struct tcpcb *tp, struct tseg_qent *last,
322 struct mbuf *m, struct tcphdr *th, int tlen,
323 struct mbuf *mlast, int lenofoh)
326 #ifdef TCP_REASS_LOGGING
327 tcp_log_reassm(tp, last, NULL, th->th_seq, tlen, TCP_R_LOG_APPEND, 0);
329 last->tqe_len += tlen;
330 last->tqe_m->m_pkthdr.len += tlen;
331 /* Preserve the FIN bit if its there */
332 last->tqe_flags |= (th->th_flags & TH_FIN);
333 last->tqe_last->m_next = m;
334 last->tqe_last = mlast;
335 last->tqe_mbuf_cnt += lenofoh;
337 TCPSTAT_INC(tcps_rcvoopack);
338 TCPSTAT_ADD(tcps_rcvoobyte, tlen);
339 #ifdef TCP_REASS_LOGGING
340 tcp_reass_log_new_in(tp, last->tqe_start, lenofoh, last->tqe_m,
347 tcp_reass_prepend(struct tcpcb *tp, struct tseg_qent *first, struct mbuf *m, struct tcphdr *th,
348 int tlen, struct mbuf *mlast, int lenofoh)
352 #ifdef TCP_REASS_LOGGING
353 tcp_log_reassm(tp, first, NULL, th->th_seq, tlen, TCP_R_LOG_PREPEND, 0);
355 if (SEQ_GT((th->th_seq + tlen), first->tqe_start)) {
356 /* The new data overlaps into the old */
357 i = (th->th_seq + tlen) - first->tqe_start;
358 #ifdef TCP_REASS_LOGGING
359 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 1);
361 m_adj(first->tqe_m, i);
363 first->tqe_start += i;
365 /* Ok now setup our chain to point to the old first */
366 mlast->m_next = first->tqe_m;
368 first->tqe_len += tlen;
369 first->tqe_start = th->th_seq;
370 first->tqe_m->m_pkthdr.len = first->tqe_len;
371 first->tqe_mbuf_cnt += lenofoh;
373 TCPSTAT_INC(tcps_rcvoopack);
374 TCPSTAT_ADD(tcps_rcvoobyte, tlen);
375 #ifdef TCP_REASS_LOGGING
376 tcp_reass_log_new_in(tp, first->tqe_start, lenofoh, first->tqe_m,
383 tcp_reass_replace(struct tcpcb *tp, struct tseg_qent *q, struct mbuf *m,
384 tcp_seq seq, int len, struct mbuf *mlast, int mbufoh, uint8_t flags)
387 * Free the data in q, and replace
388 * it with the new segment.
392 #ifdef TCP_REASS_LOGGING
393 tcp_log_reassm(tp, q, NULL, seq, len, TCP_R_LOG_REPLACE, 0);
396 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
397 ("Tp:%p seg queue goes negative", tp));
398 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
399 q->tqe_mbuf_cnt = mbufoh;
403 if (len > q->tqe_len)
404 len_dif = len - q->tqe_len;
408 TCPSTAT_INC(tcps_rcvoopack);
409 TCPSTAT_ADD(tcps_rcvoobyte, len_dif);
411 q->tqe_flags = (flags & TH_FIN);
412 q->tqe_m->m_pkthdr.len = q->tqe_len;
413 tp->t_segqmbuflen += mbufoh;
418 tcp_reass_merge_into(struct tcpcb *tp, struct tseg_qent *ent,
422 * Merge q into ent and free q from the list.
424 #ifdef TCP_REASS_LOGGING
425 tcp_log_reassm(tp, q, ent, 0, 0, TCP_R_LOG_MERGE_INTO, 0);
427 #ifdef TCP_REASS_COUNTERS
428 counter_u64_add(merge_into, 1);
430 ent->tqe_last->m_next = q->tqe_m;
431 ent->tqe_last = q->tqe_last;
432 ent->tqe_len += q->tqe_len;
433 ent->tqe_mbuf_cnt += q->tqe_mbuf_cnt;
434 ent->tqe_m->m_pkthdr.len += q->tqe_len;
435 ent->tqe_flags |= (q->tqe_flags & TH_FIN);
436 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
437 uma_zfree(tcp_reass_zone, q);
443 tcp_reass_merge_forward(struct tcpcb *tp, struct tseg_qent *ent)
445 struct tseg_qent *q, *qtmp;
449 * Given an entry merge forward anyplace
450 * that ent overlaps forward.
453 max = ent->tqe_start + ent->tqe_len;
454 q = TAILQ_NEXT(ent, tqe_q);
459 TAILQ_FOREACH_FROM_SAFE(q, &tp->t_segq, tqe_q, qtmp) {
460 if (SEQ_GT(q->tqe_start, max)) {
464 /* We have some or all that are overlapping */
465 if (SEQ_GEQ(max, (q->tqe_start + q->tqe_len))) {
466 /* It consumes it all */
467 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
469 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
470 uma_zfree(tcp_reass_zone, q);
475 * Trim the q entry to dovetail to this one
476 * and then merge q into ent updating max
479 i = max - q->tqe_start;
480 #ifdef TCP_REASS_LOGGING
481 tcp_log_reassm(tp, q, NULL, 0, i, TCP_R_LOG_TRIM, 2);
486 tcp_reass_merge_into(tp, ent, q);
487 max = ent->tqe_start + ent->tqe_len;
489 #ifdef TCP_REASS_COUNTERS
490 counter_u64_add(merge_fwd, 1);
495 tcp_reass_overhead_of_chain(struct mbuf *m, struct mbuf **mlast)
499 if (m->m_flags & M_EXT)
500 len += m->m_ext.ext_size;
501 while (m->m_next != NULL) {
504 if (m->m_flags & M_EXT)
505 len += m->m_ext.ext_size;
513 * NOTE!!! the new tcp-reassembly code *must not* use
514 * m_adj() with a negative index. That alters the chain
515 * of mbufs (by possibly chopping trailing mbufs). At
516 * the front of tcp_reass we count the mbuf overhead
517 * and setup the tail pointer. If we use m_adj(m, -5)
518 * we could corrupt the tail pointer. Currently the
519 * code only uses m_adj(m, postive-num). If this
520 * changes appropriate changes to update mlast would
524 tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
525 int *tlenp, struct mbuf *m)
527 struct tseg_qent *q, *last, *first;
528 struct tseg_qent *p = NULL;
529 struct tseg_qent *nq = NULL;
530 struct tseg_qent *te = NULL;
531 struct tseg_qent tqs;
532 struct mbuf *mlast = NULL;
534 struct socket *so = tp->t_inpcb->inp_socket;
536 int flags, i, lenofoh;
538 INP_WLOCK_ASSERT(tp->t_inpcb);
540 * XXX: tcp_reass() is rather inefficient with its data structures
541 * and should be rewritten (see NetBSD for optimizations).
545 * Call with th==NULL after become established to
546 * force pre-ESTABLISHED data up to user socket.
550 KASSERT(SEQ_GEQ(th->th_seq, tp->rcv_nxt),
551 ("Attempt to add old entry to reassembly queue (th=%p, tp=%p)",
553 #ifdef TCP_REASS_LOGGING
554 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_ADD, NULL);
556 #ifdef TCP_REASS_COUNTERS
557 counter_u64_add(reass_entry, 1);
560 * Check for zero length data.
562 if ((*tlenp == 0) && ((th->th_flags & TH_FIN) == 0)) {
564 * A zero length segment does no
565 * one any good. We could check
566 * the rcv_nxt <-> rcv_wnd but thats
567 * already done for us by the caller.
569 #ifdef TCP_REASS_COUNTERS
570 counter_u64_add(tcp_zero_input, 1);
573 #ifdef TCP_REASS_LOGGING
574 tcp_reass_log_dump(tp);
581 lenofoh = tcp_reass_overhead_of_chain(m, &mlast);
582 sb = &tp->t_inpcb->inp_socket->so_rcv;
583 if ((sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) {
585 TCPSTAT_INC(tcps_rcvreassfull);
586 #ifdef TCP_REASS_COUNTERS
587 counter_u64_add(reass_nospace, 1);
589 #ifdef TCP_REASS_LOGGING
590 tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0);
594 #ifdef TCP_REASS_LOGGING
595 tcp_reass_log_dump(tp);
600 * First lets deal with two common cases, the
601 * segment appends to the back of our collected
602 * segments. Or the segment is the next in line.
604 last = TAILQ_LAST_FAST(&tp->t_segq, tseg_qent, tqe_q);
606 if ((th->th_flags & TH_FIN) &&
607 SEQ_LT((th->th_seq + *tlenp), (last->tqe_start + last->tqe_len))) {
609 * Someone is trying to game us, dump
616 if ((SEQ_GEQ(th->th_seq, last->tqe_start)) &&
617 (SEQ_GEQ((last->tqe_start + last->tqe_len), th->th_seq))) {
618 /* Common case, trailing segment is added */
622 * reassembly buffer |---| |---| |---|
625 #ifdef TCP_REASS_COUNTERS
626 counter_u64_add(reass_path1, 1);
628 if (SEQ_GT((last->tqe_start + last->tqe_len), th->th_seq)) {
629 i = (last->tqe_start + last->tqe_len) - th->th_seq;
631 #ifdef TCP_REASS_LOGGING
632 tcp_log_reassm(tp, last, NULL, 0, i, TCP_R_LOG_TRIM, 3);
638 /* Complete overlap */
639 TCPSTAT_INC(tcps_rcvduppack);
640 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
642 *tlenp = last->tqe_len;
643 *seq_start = last->tqe_start;
647 if (last->tqe_flags & TH_FIN) {
649 * We have data after the FIN on the last?
655 tcp_reass_append(tp, last, m, th, *tlenp, mlast, lenofoh);
656 tp->t_segqmbuflen += lenofoh;
657 *seq_start = last->tqe_start;
658 *tlenp = last->tqe_len;
660 } else if (SEQ_GT(th->th_seq, (last->tqe_start + last->tqe_len))) {
662 * Second common case, we missed
663 * another one and have something more
669 * reassembly buffer |---| |---| |---|
672 if (last->tqe_flags & TH_FIN) {
674 * We have data after the FIN on the last?
680 #ifdef TCP_REASS_COUNTERS
681 counter_u64_add(reass_path2, 1);
687 /* First segment (it's NULL). */
690 first = TAILQ_FIRST(&tp->t_segq);
691 if (SEQ_LT(th->th_seq, first->tqe_start) &&
692 SEQ_GEQ((th->th_seq + *tlenp),first->tqe_start) &&
693 SEQ_LT((th->th_seq + *tlenp), (first->tqe_start + first->tqe_len))) {
695 * The head of the queue is prepended by this and
696 * it may be the one I want most.
701 * rea: |---| |---| |---|
703 * Note the case we do not deal with here is:
704 * rea= |---| |---| |---|
706 * Due to the fact that it could be
707 * new |--------------------|
708 * And we might need to merge forward.
711 struct mbuf *firstmbuf;
714 #ifdef TCP_REASS_COUNTERS
715 counter_u64_add(reass_path3, 1);
717 if (SEQ_LT(th->th_seq, tp->rcv_nxt)) {
719 * The resend was even before
720 * what we have. We need to trim it.
721 * Note TSNH (it should be trimmed
722 * before the call to tcp_reass()).
725 panic("th->th_seq:%u rcv_nxt:%u tp:%p not pre-trimmed",
726 th->th_seq, tp->rcv_nxt, tp);
728 i = tp->rcv_nxt - th->th_seq;
729 #ifdef TCP_REASS_LOGGING
730 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 4);
738 firstmbuf = first->tqe_m;
740 tcp_reass_prepend(tp, first, m, th, *tlenp, mlast, lenofoh);
742 if (firstmbuf == first->tqe_m) {
743 panic("First stayed same m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
744 m, firstmbuf, first->tqe_m, tp, first);
745 } else if (first->tqe_m != m) {
746 panic("First did not change to m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
747 m, firstmbuf, first->tqe_m, tp, first);
750 tp->t_segqmbuflen += lenofoh;
751 *seq_start = first->tqe_start;
752 *tlenp = first->tqe_len;
754 } else if (SEQ_LT((th->th_seq + *tlenp), first->tqe_start)) {
755 /* New segment is before our earliest segment. */
766 * Find a segment which begins after this one does.
768 #ifdef TCP_REASS_COUNTERS
769 counter_u64_add(reass_fullwalk, 1);
771 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
772 if (SEQ_GT(q->tqe_start, th->th_seq))
775 p = TAILQ_PREV(q, tsegqe_head, tqe_q);
777 * Now is this fit just in-between only?
781 * res= |--| |--| |--|
784 if (SEQ_LT((th->th_seq + *tlenp), q->tqe_start) &&
785 ((p == NULL) || (SEQ_GT(th->th_seq, (p->tqe_start + p->tqe_len))))) {
790 * If we reach here we have some (possibly all) overlap
792 * res= |--| |--| |--|
794 * or new= |-----------------|
797 * or new= |-----------|
800 (SEQ_LEQ(th->th_seq, (p->tqe_start + p->tqe_len)))) {
801 /* conversion to int (in i) handles seq wraparound */
803 #ifdef TCP_REASS_COUNTERS
804 counter_u64_add(reass_path4, 1);
806 i = p->tqe_start + p->tqe_len - th->th_seq;
812 * reassembly buffer |---|
815 TCPSTAT_INC(tcps_rcvduppack);
816 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
818 *seq_start = p->tqe_start;
821 * Try to present any queued data
822 * at the left window edge to the user.
823 * This is needed after the 3-WHS
824 * completes. Note this probably
825 * will not work and we will return.
833 * reassembly buffer |---|
834 * new segment |-----|
836 #ifdef TCP_REASS_COUNTERS
837 counter_u64_add(reass_path5, 1);
839 #ifdef TCP_REASS_LOGGING
840 tcp_log_reassm(tp, p, NULL, 0, i, TCP_R_LOG_TRIM, 5);
847 if (th->th_seq == (p->tqe_start + p->tqe_len)) {
849 * If dovetails in with this one
855 * reassembly buffer |--| |---|
857 * (note: it was trimmed above if it overlapped)
859 tcp_reass_append(tp, p, m, th, *tlenp, mlast, lenofoh);
860 tp->t_segqmbuflen += lenofoh;
863 panic("Impossible cut th_seq:%u p->seq:%u(%d) p:%p tp:%p",
864 th->th_seq, p->tqe_start, p->tqe_len,
874 * The new data runs over the
875 * top of previously sack'd data (in q).
876 * It may be partially overlapping, or
877 * it may overlap the entire segment.
879 #ifdef TCP_REASS_COUNTERS
880 counter_u64_add(reass_path6, 1);
882 if (SEQ_GEQ((th->th_seq + *tlenp), (q->tqe_start + q->tqe_len))) {
883 /* It consumes it all */
887 * reassembly buffer |--| |---|
888 * new segment |----------|
890 #ifdef TCP_REASS_COUNTERS
891 counter_u64_add(reass_path7, 1);
893 tcp_reass_replace(tp, q, m, th->th_seq, *tlenp, mlast, lenofoh, th->th_flags);
896 * We just need to prepend the data
897 * to this. It does not overrun
903 * reassembly buffer |--| |---|
904 * new segment |----------|
906 tcp_reass_prepend(tp, q, m, th, *tlenp, mlast, lenofoh);
907 tp->t_segqmbuflen += lenofoh;
910 /* Now does it go further than that? */
911 tcp_reass_merge_forward(tp, q);
912 *seq_start = q->tqe_start;
917 * When we reach here we can't combine it
918 * with any existing segment.
920 * Limit the number of segments that can be queued to reduce the
921 * potential for mbuf exhaustion. For best performance, we want to be
922 * able to queue a full window's worth of segments. The size of the
923 * socket receive buffer determines our advertised window and grows
924 * automatically when socket buffer autotuning is enabled. Use it as the
925 * basis for our queue limit.
927 * However, allow the user to specify a ceiling for the number of
928 * segments in each queue.
930 * Always let the missing segment through which caused this queue.
931 * NB: Access to the socket buffer is left intentionally unlocked as we
932 * can tolerate stale information here.
934 * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat
935 * should work but causes packets to be dropped when they shouldn't.
936 * Investigate why and re-evaluate the below limit after the behaviour
940 if (tcp_new_limits) {
941 if ((tp->t_segqlen > tcp_reass_queue_guard) &&
944 * This is really a lie, we are not full but
945 * are getting a segment that is above
946 * guard threshold. If it is and its below
947 * a mbuf size (256) we drop it if it
948 * can't fill in some place.
950 TCPSTAT_INC(tcps_rcvreassfull);
952 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
953 log(LOG_DEBUG, "%s; %s: queue limit reached, "
954 "segment dropped\n", s, __func__);
958 #ifdef TCP_REASS_LOGGING
959 tcp_reass_log_dump(tp);
965 if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
966 tp->t_segqlen >= min((so->so_rcv.sb_hiwat / tp->t_maxseg) + 1,
967 tcp_reass_maxqueuelen)) {
968 TCPSTAT_INC(tcps_rcvreassfull);
970 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
971 log(LOG_DEBUG, "%s; %s: queue limit reached, "
972 "segment dropped\n", s, __func__);
976 #ifdef TCP_REASS_LOGGING
977 tcp_reass_log_dump(tp);
983 * Allocate a new queue entry. If we can't, or hit the zone limit
986 te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
988 TCPSTAT_INC(tcps_rcvmemdrop);
991 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL,
993 log(LOG_DEBUG, "%s; %s: global zone limit "
994 "reached, segment dropped\n", s, __func__);
1001 TCPSTAT_INC(tcps_rcvoopack);
1002 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
1003 /* Insert the new segment queue entry into place. */
1005 te->tqe_flags = th->th_flags;
1006 te->tqe_len = *tlenp;
1007 te->tqe_start = th->th_seq;
1008 te->tqe_last = mlast;
1009 te->tqe_mbuf_cnt = lenofoh;
1010 tp->t_segqmbuflen += te->tqe_mbuf_cnt;
1012 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q);
1014 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q);
1016 #ifdef TCP_REASS_LOGGING
1017 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_NEW_ENTRY, te);
1021 * Present data to user, advancing rcv_nxt through
1022 * completed sequence space.
1024 if (!TCPS_HAVEESTABLISHED(tp->t_state))
1026 q = TAILQ_FIRST(&tp->t_segq);
1027 KASSERT(q == NULL || SEQ_GEQ(q->tqe_start, tp->rcv_nxt),
1028 ("Reassembly queue for %p has stale entry at head", tp));
1029 if (!q || q->tqe_start != tp->rcv_nxt) {
1030 #ifdef TCP_REASS_LOGGING
1031 tcp_reass_log_dump(tp);
1035 SOCKBUF_LOCK(&so->so_rcv);
1037 tp->rcv_nxt += q->tqe_len;
1038 flags = q->tqe_flags & TH_FIN;
1039 nq = TAILQ_NEXT(q, tqe_q);
1040 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
1041 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1044 #ifdef TCP_REASS_LOGGING
1045 tcp_reass_log_new_in(tp, q->tqe_start, q->tqe_len, q->tqe_m, TCP_R_LOG_READ, q);
1046 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 1);
1048 sbappendstream_locked(&so->so_rcv, q->tqe_m, 0);
1050 #ifdef TCP_REASS_LOGGING
1051 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 2);
1053 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
1054 ("tp:%p seg queue goes negative", tp));
1055 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
1057 uma_zfree(tcp_reass_zone, q);
1060 } while (q && q->tqe_start == tp->rcv_nxt);
1061 if (TAILQ_EMPTY(&tp->t_segq) &&
1062 (tp->t_segqmbuflen != 0)) {
1064 panic("tp:%p segq:%p len:%d queue empty",
1065 tp, &tp->t_segq, tp->t_segqmbuflen);
1067 #ifdef TCP_REASS_LOGGING
1068 tcp_log_reassm(tp, NULL, NULL, th->th_seq, *tlenp, TCP_R_LOG_ZERO, 0);
1070 tp->t_segqmbuflen = 0;
1073 #ifdef TCP_REASS_LOGGING
1074 tcp_reass_log_dump(tp);
1076 sorwakeup_locked(so);