]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_reass.c
Merge commit '850ef5ae11d69ea3381bd310f564f025fc8caea3'
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_reass.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include <sys/cdefs.h>
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35
36 /* For debugging we want counters and BB logging */
37 /* #define TCP_REASS_COUNTERS 1 */
38 /* #define TCP_REASS_LOGGING 1 */
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/eventhandler.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/sysctl.h>
48 #include <sys/syslog.h>
49 #include <sys/systm.h>
50
51 #include <vm/uma.h>
52
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/route.h>
56 #include <net/vnet.h>
57
58 #include <netinet/in.h>
59 #include <netinet/in_pcb.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip.h>
63 #include <netinet/ip_var.h>
64 #include <netinet/ip_options.h>
65 #include <netinet/ip6.h>
66 #include <netinet6/in6_pcb.h>
67 #include <netinet6/ip6_var.h>
68 #include <netinet6/nd6.h>
69 #include <netinet/tcp.h>
70 #include <netinet/tcp_fsm.h>
71 #include <netinet/tcp_seq.h>
72 #include <netinet/tcp_timer.h>
73 #include <netinet/tcp_var.h>
74 #ifdef TCP_REASS_LOGGING
75 #include <netinet/tcp_log_buf.h>
76 #include <netinet/tcp_hpts.h>
77 #endif
78 #include <netinet/tcpip.h>
79
80 #define TCP_R_LOG_ADD           1
81 #define TCP_R_LOG_LIMIT_REACHED 2
82 #define TCP_R_LOG_APPEND        3
83 #define TCP_R_LOG_PREPEND       4
84 #define TCP_R_LOG_REPLACE       5
85 #define TCP_R_LOG_MERGE_INTO    6
86 #define TCP_R_LOG_NEW_ENTRY     7
87 #define TCP_R_LOG_READ          8
88 #define TCP_R_LOG_ZERO          9
89 #define TCP_R_LOG_DUMP          10
90 #define TCP_R_LOG_TRIM          11
91
92 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass,
93     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
94     "TCP Segment Reassembly Queue");
95
96 static SYSCTL_NODE(_net_inet_tcp_reass, OID_AUTO, stats,
97     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
98     "TCP Segment Reassembly stats");
99
100 static int tcp_reass_maxseg = 0;
101 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
102     &tcp_reass_maxseg, 0,
103     "Global maximum number of TCP Segments in Reassembly Queue");
104
105 static uma_zone_t tcp_reass_zone;
106 SYSCTL_UMA_CUR(_net_inet_tcp_reass, OID_AUTO, cursegments, 0,
107     &tcp_reass_zone,
108     "Global number of TCP Segments currently in Reassembly Queue");
109
110 static u_int tcp_reass_maxqueuelen = 100;
111 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, maxqueuelen, CTLFLAG_RWTUN,
112     &tcp_reass_maxqueuelen, 0,
113     "Maximum number of TCP Segments per Reassembly Queue");
114
115 static int tcp_new_limits = 0;
116 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, new_limit, CTLFLAG_RWTUN,
117     &tcp_new_limits, 0,
118     "Do we use the new limit method we are discussing?");
119
120 static u_int tcp_reass_queue_guard = 16;
121 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, queueguard, CTLFLAG_RWTUN,
122     &tcp_reass_queue_guard, 16,
123     "Number of TCP Segments in Reassembly Queue where we flip over to guard mode");
124
125 #ifdef TCP_REASS_COUNTERS
126
127 counter_u64_t reass_entry;
128 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, entry, CTLFLAG_RD,
129     &reass_entry, "A segment entered reassembly ");
130
131 counter_u64_t reass_path1;
132 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path1, CTLFLAG_RD,
133     &reass_path1, "Took path 1");
134
135 counter_u64_t reass_path2;
136 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path2, CTLFLAG_RD,
137     &reass_path2, "Took path 2");
138
139 counter_u64_t reass_path3;
140 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path3, CTLFLAG_RD,
141     &reass_path3, "Took path 3");
142
143 counter_u64_t reass_path4;
144 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path4, CTLFLAG_RD,
145     &reass_path4, "Took path 4");
146
147 counter_u64_t reass_path5;
148 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path5, CTLFLAG_RD,
149     &reass_path5, "Took path 5");
150
151 counter_u64_t reass_path6;
152 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path6, CTLFLAG_RD,
153     &reass_path6, "Took path 6");
154
155 counter_u64_t reass_path7;
156 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path7, CTLFLAG_RD,
157     &reass_path7, "Took path 7");
158
159 counter_u64_t reass_fullwalk;
160 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, fullwalk, CTLFLAG_RD,
161     &reass_fullwalk, "Took a full walk ");
162
163 counter_u64_t reass_nospace;
164 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, nospace, CTLFLAG_RD,
165     &reass_nospace, "Had no mbuf capacity ");
166
167 counter_u64_t merge_fwd;
168 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_fwd, CTLFLAG_RD,
169     &merge_fwd, "Ran merge fwd");
170
171 counter_u64_t merge_into;
172 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_into, CTLFLAG_RD,
173     &merge_into, "Ran merge into");
174
175 counter_u64_t tcp_zero_input;
176 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, zero_input, CTLFLAG_RD,
177     &tcp_zero_input, "The reassembly buffer saw a zero len segment etc");
178
179 #endif
180
181 /* Initialize TCP reassembly queue */
182 static void
183 tcp_reass_zone_change(void *tag)
184 {
185
186         /* Set the zone limit and read back the effective value. */
187         tcp_reass_maxseg = nmbclusters / 16;
188         tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
189             tcp_reass_maxseg);
190 }
191
192 #ifdef TCP_REASS_LOGGING
193
194 static void
195 tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p,
196     tcp_seq seq, int len, uint8_t action, int instance)
197 {
198         struct socket *so = tptosocket(tp);
199         uint32_t cts;
200         struct timeval tv;
201
202         if (tp->t_logstate != TCP_LOG_STATE_OFF) {
203                 union tcp_log_stackspecific log;
204
205                 memset(&log, 0, sizeof(log));
206                 cts = tcp_get_usecs(&tv);
207                 log.u_bbr.flex1 = seq;
208                 log.u_bbr.cur_del_rate = (uint64_t)q;
209                 log.u_bbr.delRate = (uint64_t)p;
210                 if (q != NULL) {
211                         log.u_bbr.flex2 = q->tqe_start;
212                         log.u_bbr.flex3 = q->tqe_len;
213                         log.u_bbr.flex4 = q->tqe_mbuf_cnt;
214                         log.u_bbr.hptsi_gain = q->tqe_flags;
215                 }
216                 if (p != NULL)  {
217                         log.u_bbr.flex5 = p->tqe_start;
218                         log.u_bbr.pkts_out = p->tqe_len;
219                         log.u_bbr.epoch = p->tqe_mbuf_cnt;
220                         log.u_bbr.cwnd_gain = p->tqe_flags;
221                 }
222                 log.u_bbr.flex6 = tp->t_segqmbuflen;
223                 log.u_bbr.flex7 = instance;
224                 log.u_bbr.flex8 = action;
225                 log.u_bbr.timeStamp = cts;
226                 TCP_LOG_EVENTP(tp, NULL, &so->so_rcv, &so->so_snd,
227                     TCP_LOG_REASS, 0,
228                     len, &log, false, &tv);
229         }
230 }
231
232 static void
233 tcp_reass_log_dump(struct tcpcb *tp)
234 {
235         struct tseg_qent *q;
236
237         if (tp->t_logstate != TCP_LOG_STATE_OFF) {
238                 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
239                         tcp_log_reassm(tp, q, NULL, q->tqe_start, q->tqe_len, TCP_R_LOG_DUMP, 0);
240                 }
241         };
242 }
243
244 static void
245 tcp_reass_log_new_in(struct tcpcb *tp, tcp_seq seq, int len, struct mbuf *m,
246     int logval, struct tseg_qent *q)
247 {
248         int cnt;
249         struct mbuf *t;
250
251         cnt = 0;
252         t = m;
253         while (t) {
254                 cnt += t->m_len;
255                 t = t->m_next;
256         }
257         tcp_log_reassm(tp, q, NULL, seq, len, logval, cnt);
258 }
259
260 #endif
261
262 void
263 tcp_reass_global_init(void)
264 {
265
266         tcp_reass_maxseg = nmbclusters / 16;
267         TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
268             &tcp_reass_maxseg);
269         tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
270             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
271         /* Set the zone limit and read back the effective value. */
272         tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
273             tcp_reass_maxseg);
274 #ifdef TCP_REASS_COUNTERS
275         reass_path1 = counter_u64_alloc(M_WAITOK);
276         reass_path2 = counter_u64_alloc(M_WAITOK);
277         reass_path3 = counter_u64_alloc(M_WAITOK);
278         reass_path4 = counter_u64_alloc(M_WAITOK);
279         reass_path5 = counter_u64_alloc(M_WAITOK);
280         reass_path6 = counter_u64_alloc(M_WAITOK);
281         reass_path7 = counter_u64_alloc(M_WAITOK);
282         reass_fullwalk = counter_u64_alloc(M_WAITOK);
283         reass_nospace = counter_u64_alloc(M_WAITOK);
284         reass_entry = counter_u64_alloc(M_WAITOK);
285         merge_fwd = counter_u64_alloc(M_WAITOK);
286         merge_into = counter_u64_alloc(M_WAITOK);
287         tcp_zero_input = counter_u64_alloc(M_WAITOK);
288 #endif
289         EVENTHANDLER_REGISTER(nmbclusters_change,
290             tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
291
292 }
293
294 void
295 tcp_reass_flush(struct tcpcb *tp)
296 {
297         struct tseg_qent *qe;
298
299         INP_WLOCK_ASSERT(tptoinpcb(tp));
300
301         while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) {
302                 TAILQ_REMOVE(&tp->t_segq, qe, tqe_q);
303                 m_freem(qe->tqe_m);
304                 uma_zfree(tcp_reass_zone, qe);
305                 tp->t_segqlen--;
306         }
307         tp->t_segqmbuflen = 0;
308         KASSERT((tp->t_segqlen == 0),
309             ("TCP reass queue %p segment count is %d instead of 0 after flush.",
310             tp, tp->t_segqlen));
311 }
312
313 static void
314 tcp_reass_append(struct tcpcb *tp, struct tseg_qent *last,
315     struct mbuf *m, struct tcphdr *th, int tlen,
316     struct mbuf *mlast, int lenofoh)
317 {
318
319 #ifdef TCP_REASS_LOGGING
320         tcp_log_reassm(tp, last, NULL, th->th_seq, tlen, TCP_R_LOG_APPEND, 0);
321 #endif
322         last->tqe_len += tlen;
323         last->tqe_m->m_pkthdr.len += tlen;
324         /* Preserve the FIN bit if its there */
325         last->tqe_flags |= (tcp_get_flags(th) & TH_FIN);
326         last->tqe_last->m_next = m;
327         last->tqe_last = mlast;
328         last->tqe_mbuf_cnt += lenofoh;
329         tp->t_rcvoopack++;
330         TCPSTAT_INC(tcps_rcvoopack);
331         TCPSTAT_ADD(tcps_rcvoobyte, tlen);
332 #ifdef TCP_REASS_LOGGING
333         tcp_reass_log_new_in(tp, last->tqe_start, lenofoh, last->tqe_m,
334                              TCP_R_LOG_APPEND,
335                              last);
336 #endif
337 }
338
339 static void
340 tcp_reass_prepend(struct tcpcb *tp, struct tseg_qent *first, struct mbuf *m, struct tcphdr *th,
341                   int tlen, struct mbuf *mlast, int lenofoh)
342 {
343         int i;
344
345 #ifdef TCP_REASS_LOGGING
346         tcp_log_reassm(tp, first, NULL, th->th_seq, tlen, TCP_R_LOG_PREPEND, 0);
347 #endif
348         if (SEQ_GT((th->th_seq + tlen), first->tqe_start)) {
349                 /* The new data overlaps into the old */
350                 i = (th->th_seq + tlen) - first->tqe_start;
351 #ifdef TCP_REASS_LOGGING
352                 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 1);
353 #endif
354                 m_adj(first->tqe_m, i);
355                 first->tqe_len -= i;
356                 first->tqe_start += i;
357         }
358         /* Ok now setup our chain to point to the old first */
359         mlast->m_next = first->tqe_m;
360         first->tqe_m = m;
361         first->tqe_len += tlen;
362         first->tqe_start = th->th_seq;
363         first->tqe_m->m_pkthdr.len = first->tqe_len;
364         first->tqe_mbuf_cnt += lenofoh;
365         tp->t_rcvoopack++;
366         TCPSTAT_INC(tcps_rcvoopack);
367         TCPSTAT_ADD(tcps_rcvoobyte, tlen);
368 #ifdef TCP_REASS_LOGGING
369         tcp_reass_log_new_in(tp, first->tqe_start, lenofoh, first->tqe_m,
370                              TCP_R_LOG_PREPEND,
371                              first);
372 #endif
373 }
374
375 static void
376 tcp_reass_replace(struct tcpcb *tp, struct tseg_qent *q, struct mbuf *m,
377     tcp_seq seq, int len, struct mbuf *mlast, int mbufoh, uint16_t flags)
378 {
379         /*
380          * Free the data in q, and replace
381          * it with the new segment.
382          */
383         int len_dif;
384
385 #ifdef TCP_REASS_LOGGING
386         tcp_log_reassm(tp, q, NULL, seq, len, TCP_R_LOG_REPLACE, 0);
387 #endif
388         m_freem(q->tqe_m);
389         KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
390                 ("Tp:%p seg queue goes negative", tp));
391         tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
392         q->tqe_mbuf_cnt = mbufoh;
393         q->tqe_m = m;
394         q->tqe_last = mlast;
395         q->tqe_start = seq;
396         if (len > q->tqe_len)
397                 len_dif = len - q->tqe_len;
398         else
399                 len_dif = 0;
400         tp->t_rcvoopack++;
401         TCPSTAT_INC(tcps_rcvoopack);
402         TCPSTAT_ADD(tcps_rcvoobyte, len_dif);
403         q->tqe_len = len;
404         q->tqe_flags = (flags & TH_FIN);
405         q->tqe_m->m_pkthdr.len = q->tqe_len;
406         tp->t_segqmbuflen += mbufoh;
407
408 }
409
410 static void
411 tcp_reass_merge_into(struct tcpcb *tp, struct tseg_qent *ent,
412     struct tseg_qent *q)
413 {
414         /*
415          * Merge q into ent and free q from the list.
416          */
417 #ifdef TCP_REASS_LOGGING
418         tcp_log_reassm(tp, q, ent, 0, 0, TCP_R_LOG_MERGE_INTO, 0);
419 #endif
420 #ifdef TCP_REASS_COUNTERS
421         counter_u64_add(merge_into, 1);
422 #endif
423         ent->tqe_last->m_next = q->tqe_m;
424         ent->tqe_last = q->tqe_last;
425         ent->tqe_len += q->tqe_len;
426         ent->tqe_mbuf_cnt += q->tqe_mbuf_cnt;
427         ent->tqe_m->m_pkthdr.len += q->tqe_len;
428         ent->tqe_flags |= (q->tqe_flags & TH_FIN);
429         TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
430         uma_zfree(tcp_reass_zone, q);
431         tp->t_segqlen--;
432
433 }
434
435 static void
436 tcp_reass_merge_forward(struct tcpcb *tp, struct tseg_qent *ent)
437 {
438         struct tseg_qent *q, *qtmp;
439         int i;
440         tcp_seq max;
441         /*
442          * Given an entry merge forward anyplace
443          * that ent overlaps forward.
444          */
445
446         max = ent->tqe_start + ent->tqe_len;
447         q = TAILQ_NEXT(ent, tqe_q);
448         if (q == NULL) {
449                 /* Nothing left */
450                 return;
451         }
452         TAILQ_FOREACH_FROM_SAFE(q, &tp->t_segq, tqe_q, qtmp) {
453                 if (SEQ_GT(q->tqe_start, max)) {
454                         /* Beyond q */
455                         break;
456                 }
457                 /* We have some or all that are overlapping */
458                 if (SEQ_GEQ(max, (q->tqe_start + q->tqe_len))) {
459                         /* It consumes it all */
460                         tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
461                         m_freem(q->tqe_m);
462                         TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
463                         uma_zfree(tcp_reass_zone, q);
464                         tp->t_segqlen--;
465                         continue;
466                 }
467                 /*
468                  * Trim the q entry to dovetail to this one
469                  * and then merge q into ent updating max
470                  * in the process.
471                  */
472                 i = max - q->tqe_start;
473 #ifdef TCP_REASS_LOGGING
474                 tcp_log_reassm(tp, q, NULL, 0, i, TCP_R_LOG_TRIM, 2);
475 #endif
476                 m_adj(q->tqe_m, i);
477                 q->tqe_len -= i;
478                 q->tqe_start += i;
479                 tcp_reass_merge_into(tp, ent, q);
480                 max = ent->tqe_start + ent->tqe_len;
481         }
482 #ifdef TCP_REASS_COUNTERS
483         counter_u64_add(merge_fwd, 1);
484 #endif
485 }
486
487 static int
488 tcp_reass_overhead_of_chain(struct mbuf *m, struct mbuf **mlast)
489 {
490         int len = MSIZE;
491
492         if (m->m_flags & M_EXT)
493                 len += m->m_ext.ext_size;
494         while (m->m_next != NULL) {
495                 m = m->m_next;
496                 len += MSIZE;
497                 if (m->m_flags & M_EXT)
498                         len += m->m_ext.ext_size;
499         }
500         *mlast = m;
501         return (len);
502 }
503
504 /*
505  * NOTE!!! the new tcp-reassembly code *must not* use
506  * m_adj() with a negative index. That alters the chain
507  * of mbufs (by possibly chopping trailing mbufs). At
508  * the front of tcp_reass we count the mbuf overhead
509  * and setup the tail pointer. If we use m_adj(m, -5)
510  * we could corrupt the tail pointer. Currently the
511  * code only uses m_adj(m, postive-num). If this
512  * changes appropriate changes to update mlast would
513  * be needed.
514  */
515 int
516 tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
517           int *tlenp, struct mbuf *m)
518 {
519         struct tseg_qent *q, *last, *first;
520         struct tseg_qent *p = NULL;
521         struct tseg_qent *nq = NULL;
522         struct tseg_qent *te = NULL;
523         struct mbuf *mlast = NULL;
524         struct inpcb *inp = tptoinpcb(tp);
525         struct socket *so = tptosocket(tp);
526         struct sockbuf *sb = &so->so_rcv;
527         char *s = NULL;
528         int flags, i, lenofoh;
529
530         INP_WLOCK_ASSERT(inp);
531         /*
532          * XXX: tcp_reass() is rather inefficient with its data structures
533          * and should be rewritten (see NetBSD for optimizations).
534          */
535
536         KASSERT(th == NULL || (seq_start != NULL && tlenp != NULL),
537                 ("tcp_reass called with illegal parameter combination "
538                  "(tp=%p, th=%p, seq_start=%p, tlenp=%p, m=%p)",
539                  tp, th, seq_start, tlenp, m));
540         /*
541          * Call with th==NULL after become established to
542          * force pre-ESTABLISHED data up to user socket.
543          */
544         if (th == NULL)
545                 goto present;
546         KASSERT(SEQ_GEQ(th->th_seq, tp->rcv_nxt),
547                 ("Attempt to add old entry to reassembly queue (th=%p, tp=%p)",
548                  th, tp));
549 #ifdef TCP_REASS_LOGGING
550         tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_ADD, NULL);
551 #endif
552 #ifdef TCP_REASS_COUNTERS
553         counter_u64_add(reass_entry, 1);
554 #endif
555         /*
556          * Check for zero length data.
557          */
558         if ((*tlenp == 0) && ((tcp_get_flags(th) & TH_FIN) == 0)) {
559                 /*
560                  * A zero length segment does no
561                  * one any good. We could check
562                  * the rcv_nxt <-> rcv_wnd but thats
563                  * already done for us by the caller.
564                  */
565 strip_fin:
566 #ifdef TCP_REASS_COUNTERS
567                 counter_u64_add(tcp_zero_input, 1);
568 #endif
569                 m_freem(m);
570 #ifdef TCP_REASS_LOGGING
571                 tcp_reass_log_dump(tp);
572 #endif
573                 return (0);
574         } else if ((*tlenp == 0) &&
575                    (tcp_get_flags(th) & TH_FIN) &&
576                    !TCPS_HAVEESTABLISHED(tp->t_state)) {
577                 /*
578                  * We have not established, and we
579                  * have a FIN and no data. Lets treat
580                  * this as the same as if the FIN were
581                  * not present. We don't want to save
582                  * the FIN bit in a reassembly buffer
583                  * we want to get established first before
584                  * we do that (the peer will retransmit).
585                  */
586                 goto strip_fin;
587         }
588         /*
589          * Will it fit?
590          */
591         lenofoh = tcp_reass_overhead_of_chain(m, &mlast);
592         if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
593             (sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) {
594                 /* No room */
595                 TCPSTAT_INC(tcps_rcvreassfull);
596 #ifdef TCP_REASS_COUNTERS
597                 counter_u64_add(reass_nospace, 1);
598 #endif
599 #ifdef TCP_REASS_LOGGING
600                 tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0);
601 #endif
602                 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
603                         log(LOG_DEBUG, "%s; %s: mbuf count limit reached, "
604                             "segment dropped\n", s, __func__);
605                         free(s, M_TCPLOG);
606                 }
607                 m_freem(m);
608                 *tlenp = 0;
609 #ifdef TCP_REASS_LOGGING
610                 tcp_reass_log_dump(tp);
611 #endif
612                 return (0);
613         }
614         /*
615          * First lets deal with two common cases, the
616          * segment appends to the back of our collected
617          * segments. Or the segment is the next in line.
618          */
619         last = TAILQ_LAST_FAST(&tp->t_segq, tseg_qent, tqe_q);
620         if (last != NULL) {
621                 if ((tcp_get_flags(th) & TH_FIN) &&
622                     SEQ_LT((th->th_seq + *tlenp), (last->tqe_start + last->tqe_len))) {
623                         /*
624                          * Someone is trying to game us, dump
625                          * the segment.
626                          */
627                         *tlenp = 0;
628                         m_freem(m);
629                         return (0);
630                 }
631                 if ((SEQ_GEQ(th->th_seq, last->tqe_start)) &&
632                     (SEQ_GEQ((last->tqe_start + last->tqe_len), th->th_seq))) {
633                         /* Common case, trailing segment is added */
634                         /**
635                          *                                 +--last
636                          *                                 v
637                          *  reassembly buffer |---|  |---| |---|
638                          *  new segment                       |---|
639                          */
640 #ifdef TCP_REASS_COUNTERS
641                         counter_u64_add(reass_path1, 1);
642 #endif
643                         if (SEQ_GT((last->tqe_start + last->tqe_len), th->th_seq)) {
644                                 i = (last->tqe_start + last->tqe_len) - th->th_seq;
645                                 if (i < *tlenp) {
646 #ifdef TCP_REASS_LOGGING
647                                         tcp_log_reassm(tp, last, NULL, 0, i, TCP_R_LOG_TRIM, 3);
648                                         th->th_seq += i;
649 #endif
650                                         m_adj(m, i);
651                                         *tlenp -= i;
652                                 } else {
653                                         /* Complete overlap */
654                                         TCPSTAT_INC(tcps_rcvduppack);
655                                         TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
656                                         m_freem(m);
657                                         *tlenp = last->tqe_len;
658                                         *seq_start = last->tqe_start;
659                                         return (0);
660                                 }
661                         }
662                         if (last->tqe_flags & TH_FIN) {
663                                 /*
664                                  * We have data after the FIN on the last?
665                                  */
666                                 *tlenp = 0;
667                                 m_freem(m);
668                                 return(0);
669                         }
670                         tcp_reass_append(tp, last, m, th, *tlenp, mlast, lenofoh);
671                         tp->t_segqmbuflen += lenofoh;
672                         *seq_start = last->tqe_start;
673                         *tlenp = last->tqe_len;
674                         return (0);
675                 } else if (SEQ_GT(th->th_seq, (last->tqe_start + last->tqe_len))) {
676                         /*
677                          * Second common case, we missed
678                          * another one and have something more
679                          * for the end.
680                          */
681                         /**
682                          *                                 +--last
683                          *                                 v
684                          *  reassembly buffer |---|  |---| |---|
685                          *  new segment                           |---|
686                          */
687                         if (last->tqe_flags & TH_FIN) {
688                                 /*
689                                  * We have data after the FIN on the last?
690                                  */
691                                 *tlenp = 0;
692                                 m_freem(m);
693                                 return(0);
694                         }
695 #ifdef TCP_REASS_COUNTERS
696                         counter_u64_add(reass_path2, 1);
697 #endif
698                         p = last;
699                         goto new_entry;
700                 }
701         } else {
702                 /* First segment (it's NULL). */
703                 goto new_entry;
704         }
705         first = TAILQ_FIRST(&tp->t_segq);
706         if (SEQ_LT(th->th_seq, first->tqe_start) &&
707             SEQ_GEQ((th->th_seq + *tlenp),first->tqe_start) &&
708             SEQ_LT((th->th_seq + *tlenp), (first->tqe_start + first->tqe_len))) {
709                 /*
710                  * The head of the queue is prepended by this and
711                  * it may be the one I want most.
712                  */
713                 /**
714                  *       first-------+
715                  *                   v
716                  *  rea:             |---|  |---| |---|
717                  *  new:         |---|
718                  * Note the case we do not deal with here is:
719                  *   rea=     |---|   |---|   |---|
720                  *   new=  |----|
721                  * Due to the fact that it could be
722                  *   new   |--------------------|
723                  * And we might need to merge forward.
724                  */
725 #ifdef INVARIANTS
726                 struct mbuf *firstmbuf;
727 #endif
728
729 #ifdef TCP_REASS_COUNTERS
730                 counter_u64_add(reass_path3, 1);
731 #endif
732                 if (SEQ_LT(th->th_seq, tp->rcv_nxt)) {
733                         /*
734                          * The resend was even before
735                          * what we have. We need to trim it.
736                          * Note TSNH (it should be trimmed
737                          * before the call to tcp_reass()).
738                          */
739 #ifdef INVARIANTS
740                         panic("th->th_seq:%u rcv_nxt:%u tp:%p not pre-trimmed",
741                               th->th_seq, tp->rcv_nxt, tp);
742 #else
743                         i = tp->rcv_nxt - th->th_seq;
744 #ifdef TCP_REASS_LOGGING
745                         tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 4);
746 #endif
747                         m_adj(m, i);
748                         th->th_seq += i;
749                         *tlenp -= i;
750 #endif
751                 }
752 #ifdef INVARIANTS
753                 firstmbuf = first->tqe_m;
754 #endif
755                 tcp_reass_prepend(tp, first, m, th, *tlenp, mlast, lenofoh);
756 #ifdef INVARIANTS
757                 if (firstmbuf == first->tqe_m) {
758                         panic("First stayed same m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
759                               m, firstmbuf, first->tqe_m, tp, first);
760                 } else if (first->tqe_m != m) {
761                         panic("First did not change to m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
762                               m, firstmbuf, first->tqe_m, tp, first);
763                 }
764 #endif
765                 tp->t_segqmbuflen += lenofoh;
766                 *seq_start = first->tqe_start;
767                 *tlenp = first->tqe_len;
768                 goto present;
769         } else if (SEQ_LT((th->th_seq + *tlenp), first->tqe_start)) {
770                 /* New segment is before our earliest segment. */
771                 /**
772                  *           first---->+
773                  *                      v
774                  *  rea=                |---| ....
775                  *  new"         |---|
776                  *
777                  */
778                 goto new_entry;
779         }
780         /*
781          * Find a segment which begins after this one does.
782          */
783 #ifdef TCP_REASS_COUNTERS
784         counter_u64_add(reass_fullwalk, 1);
785 #endif
786         TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
787                 if (SEQ_GT(q->tqe_start, th->th_seq))
788                         break;
789         }
790         p = TAILQ_PREV(q, tsegqe_head, tqe_q);
791         /**
792          * Now is this fit just in-between only?
793          * i.e.:
794          *      p---+        +----q
795          *          v        v
796          *     res= |--|     |--|    |--|
797          *     nee       |-|
798          */
799         if (SEQ_LT((th->th_seq + *tlenp), q->tqe_start) &&
800             ((p == NULL) || (SEQ_GT(th->th_seq, (p->tqe_start + p->tqe_len))))) {
801                 /* Yep no overlap */
802                 goto new_entry;
803         }
804         /**
805          * If we reach here we have some (possibly all) overlap
806          * such as:
807          *     res=     |--|     |--|    |--|
808          *     new=  |----|
809          * or  new=  |-----------------|
810          * or  new=      |--------|
811          * or  new=            |---|
812          * or  new=            |-----------|
813          */
814         if ((p != NULL) &&
815             (SEQ_LEQ(th->th_seq, (p->tqe_start + p->tqe_len)))) {
816                 /* conversion to int (in i) handles seq wraparound */
817
818 #ifdef TCP_REASS_COUNTERS
819                 counter_u64_add(reass_path4, 1);
820 #endif
821                 i = p->tqe_start + p->tqe_len - th->th_seq;
822                 if (i >= 0) {
823                         if (i >= *tlenp) {
824                                 /**
825                                  *       prev seg---->+
826                                  *                    v
827                                  *  reassembly buffer |---|
828                                  *  new segment        |-|
829                                  */
830                                 TCPSTAT_INC(tcps_rcvduppack);
831                                 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
832                                 *tlenp = p->tqe_len;
833                                 *seq_start = p->tqe_start;
834                                 m_freem(m);
835                                 /*
836                                  * Try to present any queued data
837                                  * at the left window edge to the user.
838                                  * This is needed after the 3-WHS
839                                  * completes. Note this probably
840                                  * will not work and we will return.
841                                  */
842                                 return (0);
843                         }
844                         if (i > 0) {
845                                 /**
846                                  *       prev seg---->+
847                                  *                    v
848                                  *  reassembly buffer |---|
849                                  *  new segment         |-----|
850                                  */
851 #ifdef TCP_REASS_COUNTERS
852                                 counter_u64_add(reass_path5, 1);
853 #endif
854 #ifdef TCP_REASS_LOGGING
855                                 tcp_log_reassm(tp, p, NULL, 0, i, TCP_R_LOG_TRIM, 5);
856 #endif
857                                 m_adj(m, i);
858                                 *tlenp -= i;
859                                 th->th_seq += i;
860                         }
861                 }
862                 if (th->th_seq == (p->tqe_start + p->tqe_len)) {
863                         /*
864                          * If dovetails in with this one
865                          * append it.
866                          */
867                         /**
868                          *       prev seg---->+
869                          *                    v
870                          *  reassembly buffer |--|     |---|
871                          *  new segment          |--|
872                          * (note: it was trimmed above if it overlapped)
873                          */
874                         tcp_reass_append(tp, p, m, th, *tlenp, mlast, lenofoh);
875                         tp->t_segqmbuflen += lenofoh;
876                 } else {
877 #ifdef INVARIANTS
878                         panic("Impossible cut th_seq:%u p->seq:%u(%d) p:%p tp:%p",
879                               th->th_seq, p->tqe_start, p->tqe_len,
880                               p, tp);
881 #endif
882                         *tlenp = 0;
883                         m_freem(m);
884                         return (0);
885                 }
886                 q = p;
887         } else {
888                 /*
889                  * The new data runs over the
890                  * top of previously sack'd data (in q).
891                  * It may be partially overlapping, or
892                  * it may overlap the entire segment.
893                  */
894 #ifdef TCP_REASS_COUNTERS
895                 counter_u64_add(reass_path6, 1);
896 #endif
897                 if (SEQ_GEQ((th->th_seq + *tlenp), (q->tqe_start + q->tqe_len))) {
898                         /* It consumes it all */
899                         /**
900                          *             next seg---->+
901                          *                          v
902                          *  reassembly buffer |--|     |---|
903                          *  new segment              |----------|
904                          */
905 #ifdef TCP_REASS_COUNTERS
906                         counter_u64_add(reass_path7, 1);
907 #endif
908                         tcp_reass_replace(tp, q, m, th->th_seq, *tlenp, mlast, lenofoh, tcp_get_flags(th));
909                 } else {
910                         /*
911                          * We just need to prepend the data
912                          * to this. It does not overrun
913                          * the end.
914                          */
915                         /**
916                          *                next seg---->+
917                          *                             v
918                          *  reassembly buffer |--|     |---|
919                          *  new segment                   |----------|
920                          */
921                         tcp_reass_prepend(tp, q, m, th, *tlenp, mlast, lenofoh);
922                         tp->t_segqmbuflen += lenofoh;
923                 }
924         }
925         /* Now does it go further than that? */
926         tcp_reass_merge_forward(tp, q);
927         *seq_start = q->tqe_start;
928         *tlenp = q->tqe_len;
929         goto present;
930
931         /*
932          * When we reach here we can't combine it
933          * with any existing segment.
934          *
935          * Limit the number of segments that can be queued to reduce the
936          * potential for mbuf exhaustion. For best performance, we want to be
937          * able to queue a full window's worth of segments. The size of the
938          * socket receive buffer determines our advertised window and grows
939          * automatically when socket buffer autotuning is enabled. Use it as the
940          * basis for our queue limit.
941          *
942          * However, allow the user to specify a ceiling for the number of
943          * segments in each queue.
944          *
945          * Always let the missing segment through which caused this queue.
946          * NB: Access to the socket buffer is left intentionally unlocked as we
947          * can tolerate stale information here.
948          *
949          * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat
950          * should work but causes packets to be dropped when they shouldn't.
951          * Investigate why and re-evaluate the below limit after the behaviour
952          * is understood.
953          */
954 new_entry:
955         if (th->th_seq == tp->rcv_nxt && TCPS_HAVEESTABLISHED(tp->t_state)) {
956                 tp->rcv_nxt += *tlenp;
957                 flags = tcp_get_flags(th) & TH_FIN;
958                 TCPSTAT_INC(tcps_rcvoopack);
959                 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
960                 SOCKBUF_LOCK(&so->so_rcv);
961                 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
962                         m_freem(m);
963                 } else {
964                         sbappendstream_locked(&so->so_rcv, m, 0);
965                 }
966                 tp->t_flags |= TF_WAKESOR;
967                 return (flags);
968         }
969         if (tcp_new_limits) {
970                 if ((tp->t_segqlen > tcp_reass_queue_guard) &&
971                     (*tlenp < MSIZE)) {
972                         /*
973                          * This is really a lie, we are not full but
974                          * are getting a segment that is above
975                          * guard threshold. If it is and its below
976                          * a mbuf size (256) we drop it if it
977                          * can't fill in some place.
978                          */
979                         TCPSTAT_INC(tcps_rcvreassfull);
980                         *tlenp = 0;
981                         if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
982                                 log(LOG_DEBUG, "%s; %s: queue limit reached, "
983                                     "segment dropped\n", s, __func__);
984                                 free(s, M_TCPLOG);
985                         }
986                         m_freem(m);
987 #ifdef TCP_REASS_LOGGING
988                         tcp_reass_log_dump(tp);
989 #endif
990                         return (0);
991                 }
992         } else {
993                 if (tp->t_segqlen >= min((so->so_rcv.sb_hiwat / tp->t_maxseg) + 1,
994                                          tcp_reass_maxqueuelen)) {
995                         TCPSTAT_INC(tcps_rcvreassfull);
996                         *tlenp = 0;
997                         if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
998                                 log(LOG_DEBUG, "%s; %s: queue limit reached, "
999                                     "segment dropped\n", s, __func__);
1000                                 free(s, M_TCPLOG);
1001                         }
1002                         m_freem(m);
1003 #ifdef TCP_REASS_LOGGING
1004                         tcp_reass_log_dump(tp);
1005 #endif
1006                         return (0);
1007                 }
1008         }
1009         /*
1010          * Allocate a new queue entry. If we can't, or hit the zone limit
1011          * just drop the pkt.
1012          */
1013         te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
1014         if (te == NULL) {
1015                 TCPSTAT_INC(tcps_rcvmemdrop);
1016                 m_freem(m);
1017                 *tlenp = 0;
1018                 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
1019                         log(LOG_DEBUG, "%s; %s: global zone limit "
1020                             "reached, segment dropped\n", s, __func__);
1021                         free(s, M_TCPLOG);
1022                 }
1023                 return (0);
1024         }
1025         tp->t_segqlen++;
1026         tp->t_rcvoopack++;
1027         TCPSTAT_INC(tcps_rcvoopack);
1028         TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
1029         /* Insert the new segment queue entry into place. */
1030         te->tqe_m = m;
1031         te->tqe_flags = tcp_get_flags(th);
1032         te->tqe_len = *tlenp;
1033         te->tqe_start = th->th_seq;
1034         te->tqe_last = mlast;
1035         te->tqe_mbuf_cnt = lenofoh;
1036         tp->t_segqmbuflen += te->tqe_mbuf_cnt;
1037         if (p == NULL) {
1038                 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q);
1039         } else {
1040                 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q);
1041         }
1042 #ifdef TCP_REASS_LOGGING
1043         tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_NEW_ENTRY, te);
1044 #endif
1045 present:
1046         /*
1047          * Present data to user, advancing rcv_nxt through
1048          * completed sequence space.
1049          */
1050         if (!TCPS_HAVEESTABLISHED(tp->t_state))
1051                 return (0);
1052         q = TAILQ_FIRST(&tp->t_segq);
1053         KASSERT(q == NULL || SEQ_GEQ(q->tqe_start, tp->rcv_nxt),
1054                 ("Reassembly queue for %p has stale entry at head", tp));
1055         if (!q || q->tqe_start != tp->rcv_nxt) {
1056 #ifdef TCP_REASS_LOGGING
1057                 tcp_reass_log_dump(tp);
1058 #endif
1059                 return (0);
1060         }
1061         SOCKBUF_LOCK(&so->so_rcv);
1062         do {
1063                 tp->rcv_nxt += q->tqe_len;
1064                 flags = q->tqe_flags & TH_FIN;
1065                 nq = TAILQ_NEXT(q, tqe_q);
1066                 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
1067                 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1068                         m_freem(q->tqe_m);
1069                 } else {
1070 #ifdef TCP_REASS_LOGGING
1071                         tcp_reass_log_new_in(tp, q->tqe_start, q->tqe_len, q->tqe_m, TCP_R_LOG_READ, q);
1072                         if (th != NULL) {
1073                                 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 1);
1074                         } else {
1075                                 tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 1);
1076                         }
1077 #endif
1078                         sbappendstream_locked(&so->so_rcv, q->tqe_m, 0);
1079                 }
1080 #ifdef TCP_REASS_LOGGING
1081                 if (th != NULL) {
1082                         tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 2);
1083                 } else {
1084                         tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 2);
1085                 }
1086 #endif
1087                 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
1088                         ("tp:%p seg queue goes negative", tp));
1089                 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
1090                 uma_zfree(tcp_reass_zone, q);
1091                 tp->t_segqlen--;
1092                 q = nq;
1093         } while (q && q->tqe_start == tp->rcv_nxt);
1094         if (TAILQ_EMPTY(&tp->t_segq) &&
1095             (tp->t_segqmbuflen != 0)) {
1096 #ifdef INVARIANTS
1097                 panic("tp:%p segq:%p len:%d queue empty",
1098                       tp, &tp->t_segq, tp->t_segqmbuflen);
1099 #else
1100 #ifdef TCP_REASS_LOGGING
1101                 if (th != NULL) {
1102                         tcp_log_reassm(tp, NULL, NULL, th->th_seq, *tlenp, TCP_R_LOG_ZERO, 0);
1103                 } else {
1104                         tcp_log_reassm(tp, NULL, NULL, 0, 0, TCP_R_LOG_ZERO, 0);
1105                 }
1106 #endif
1107                 tp->t_segqmbuflen = 0;
1108 #endif
1109         }
1110 #ifdef TCP_REASS_LOGGING
1111         tcp_reass_log_dump(tp);
1112 #endif
1113         tp->t_flags |= TF_WAKESOR;
1114         return (flags);
1115 }