]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_timewait.c
kern_linker.c: sort includes
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_timewait.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *      @(#)tcp_subr.c  8.2 (Berkeley) 5/24/95
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_tcpdebug.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #ifndef INVARIANTS
54 #include <sys/syslog.h>
55 #endif
56 #include <sys/protosw.h>
57 #include <sys/random.h>
58
59 #include <vm/uma.h>
60
61 #include <net/route.h>
62 #include <net/if.h>
63 #include <net/if_var.h>
64 #include <net/vnet.h>
65
66 #include <netinet/in.h>
67 #include <netinet/in_kdtrace.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_var.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip_icmp.h>
73 #include <netinet/ip_var.h>
74 #ifdef INET6
75 #include <netinet/ip6.h>
76 #include <netinet6/in6_pcb.h>
77 #include <netinet6/ip6_var.h>
78 #include <netinet6/scope6_var.h>
79 #include <netinet6/nd6.h>
80 #endif
81 #include <netinet/tcp.h>
82 #include <netinet/tcp_fsm.h>
83 #include <netinet/tcp_seq.h>
84 #include <netinet/tcp_timer.h>
85 #include <netinet/tcp_var.h>
86 #ifdef INET6
87 #include <netinet6/tcp6_var.h>
88 #endif
89 #include <netinet/tcpip.h>
90 #ifdef TCPDEBUG
91 #include <netinet/tcp_debug.h>
92 #endif
93 #ifdef INET6
94 #include <netinet6/ip6protosw.h>
95 #endif
96
97 #include <netinet/udp.h>
98 #include <netinet/udp_var.h>
99
100 #include <netipsec/ipsec_support.h>
101
102 #include <machine/in_cksum.h>
103
104 #include <security/mac/mac_framework.h>
105
106 VNET_DEFINE_STATIC(uma_zone_t, tcptw_zone);
107 #define V_tcptw_zone            VNET(tcptw_zone)
108 static int      maxtcptw;
109
110 /*
111  * The timed wait queue contains references to each of the TCP sessions
112  * currently in the TIME_WAIT state.  The queue pointers, including the
113  * queue pointers in each tcptw structure, are protected using the global
114  * timewait lock, which must be held over queue iteration and modification.
115  *
116  * Rules on tcptw usage:
117  *  - a inpcb is always freed _after_ its tcptw
118  *  - a tcptw relies on its inpcb reference counting for memory stability
119  *  - a tcptw is dereferenceable only while its inpcb is locked
120  */
121 VNET_DEFINE_STATIC(TAILQ_HEAD(, tcptw), twq_2msl);
122 #define V_twq_2msl              VNET(twq_2msl)
123
124 /* Global timewait lock */
125 VNET_DEFINE_STATIC(struct rwlock, tw_lock);
126 #define V_tw_lock               VNET(tw_lock)
127
128 #define TW_LOCK_INIT(tw, d)     rw_init_flags(&(tw), (d), 0)
129 #define TW_LOCK_DESTROY(tw)     rw_destroy(&(tw))
130 #define TW_RLOCK(tw)            rw_rlock(&(tw))
131 #define TW_WLOCK(tw)            rw_wlock(&(tw))
132 #define TW_RUNLOCK(tw)          rw_runlock(&(tw))
133 #define TW_WUNLOCK(tw)          rw_wunlock(&(tw))
134 #define TW_LOCK_ASSERT(tw)      rw_assert(&(tw), RA_LOCKED)
135 #define TW_RLOCK_ASSERT(tw)     rw_assert(&(tw), RA_RLOCKED)
136 #define TW_WLOCK_ASSERT(tw)     rw_assert(&(tw), RA_WLOCKED)
137 #define TW_UNLOCK_ASSERT(tw)    rw_assert(&(tw), RA_UNLOCKED)
138
139 static void     tcp_tw_2msl_reset(struct tcptw *, int);
140 static void     tcp_tw_2msl_stop(struct tcptw *, int);
141 static int      tcp_twrespond(struct tcptw *, int);
142
143 static int
144 tcptw_auto_size(void)
145 {
146         int halfrange;
147
148         /*
149          * Max out at half the ephemeral port range so that TIME_WAIT
150          * sockets don't tie up too many ephemeral ports.
151          */
152         if (V_ipport_lastauto > V_ipport_firstauto)
153                 halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
154         else
155                 halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
156         /* Protect against goofy port ranges smaller than 32. */
157         return (imin(imax(halfrange, 32), maxsockets / 5));
158 }
159
160 static int
161 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
162 {
163         int error, new;
164
165         if (maxtcptw == 0)
166                 new = tcptw_auto_size();
167         else
168                 new = maxtcptw;
169         error = sysctl_handle_int(oidp, &new, 0, req);
170         if (error == 0 && req->newptr)
171                 if (new >= 32) {
172                         maxtcptw = new;
173                         uma_zone_set_max(V_tcptw_zone, maxtcptw);
174                 }
175         return (error);
176 }
177
178 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw,
179     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
180     &maxtcptw, 0, sysctl_maxtcptw, "IU",
181     "Maximum number of compressed TCP TIME_WAIT entries");
182
183 VNET_DEFINE_STATIC(int, nolocaltimewait) = 0;
184 #define V_nolocaltimewait       VNET(nolocaltimewait)
185 SYSCTL_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_VNET | CTLFLAG_RW,
186     &VNET_NAME(nolocaltimewait), 0,
187     "Do not create compressed TCP TIME_WAIT entries for local connections");
188
189 void
190 tcp_tw_zone_change(void)
191 {
192
193         if (maxtcptw == 0)
194                 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
195 }
196
197 void
198 tcp_tw_init(void)
199 {
200
201         V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
202             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
203         TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
204         if (maxtcptw == 0)
205                 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
206         else
207                 uma_zone_set_max(V_tcptw_zone, maxtcptw);
208         TAILQ_INIT(&V_twq_2msl);
209         TW_LOCK_INIT(V_tw_lock, "tcptw");
210 }
211
212 #ifdef VIMAGE
213 void
214 tcp_tw_destroy(void)
215 {
216         struct tcptw *tw;
217         struct epoch_tracker et;
218
219         NET_EPOCH_ENTER(et);
220         while ((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
221                 tcp_twclose(tw, 0);
222         NET_EPOCH_EXIT(et);
223
224         TW_LOCK_DESTROY(V_tw_lock);
225         uma_zdestroy(V_tcptw_zone);
226 }
227 #endif
228
229 /*
230  * Move a TCP connection into TIME_WAIT state.
231  *    tcbinfo is locked.
232  *    inp is locked, and is unlocked before returning.
233  */
234 void
235 tcp_twstart(struct tcpcb *tp)
236 {
237         struct tcptw twlocal, *tw;
238         struct inpcb *inp = tp->t_inpcb;
239         struct socket *so;
240         uint32_t recwin;
241         bool acknow, local;
242 #ifdef INET6
243         bool isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
244 #endif
245
246         NET_EPOCH_ASSERT();
247         INP_WLOCK_ASSERT(inp);
248
249         /* A dropped inp should never transition to TIME_WAIT state. */
250         KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("tcp_twstart: "
251             "(inp->inp_flags & INP_DROPPED) != 0"));
252
253         if (V_nolocaltimewait) {
254 #ifdef INET6
255                 if (isipv6)
256                         local = in6_localaddr(&inp->in6p_faddr);
257                 else
258 #endif
259 #ifdef INET
260                         local = in_localip(inp->inp_faddr);
261 #else
262                         local = false;
263 #endif
264         } else
265                 local = false;
266
267         /*
268          * For use only by DTrace.  We do not reference the state
269          * after this point so modifying it in place is not a problem.
270          */
271         tcp_state_change(tp, TCPS_TIME_WAIT);
272
273         if (local)
274                 tw = &twlocal;
275         else
276                 tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
277         if (tw == NULL) {
278                 /*
279                  * Reached limit on total number of TIMEWAIT connections
280                  * allowed. Remove a connection from TIMEWAIT queue in LRU
281                  * fashion to make room for this connection.
282                  *
283                  * XXX:  Check if it possible to always have enough room
284                  * in advance based on guarantees provided by uma_zalloc().
285                  */
286                 tw = tcp_tw_2msl_scan(1);
287                 if (tw == NULL) {
288                         tp = tcp_close(tp);
289                         if (tp != NULL)
290                                 INP_WUNLOCK(inp);
291                         return;
292                 }
293         }
294         /*
295          * For !local case the tcptw will hold a reference on its inpcb
296          * until tcp_twclose is called.
297          */
298         tw->tw_inpcb = inp;
299
300         /*
301          * Recover last window size sent.
302          */
303         so = inp->inp_socket;
304         recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
305             (long)TCP_MAXWIN << tp->rcv_scale);
306         if (recwin < (so->so_rcv.sb_hiwat / 4) &&
307             recwin < tp->t_maxseg)
308                 recwin = 0;
309         if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
310             recwin < (tp->rcv_adv - tp->rcv_nxt))
311                 recwin = (tp->rcv_adv - tp->rcv_nxt);
312         tw->last_win = (u_short)(recwin >> tp->rcv_scale);
313
314         /*
315          * Set t_recent if timestamps are used on the connection.
316          */
317         if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
318             (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
319                 tw->t_recent = tp->ts_recent;
320                 tw->ts_offset = tp->ts_offset;
321         } else {
322                 tw->t_recent = 0;
323                 tw->ts_offset = 0;
324         }
325
326         tw->snd_nxt = tp->snd_nxt;
327         tw->t_port = tp->t_port;
328         tw->rcv_nxt = tp->rcv_nxt;
329         tw->iss     = tp->iss;
330         tw->irs     = tp->irs;
331         tw->t_starttime = tp->t_starttime;
332         tw->tw_time = 0;
333         tw->tw_flags = tp->t_flags;
334
335 /* XXX
336  * If this code will
337  * be used for fin-wait-2 state also, then we may need
338  * a ts_recent from the last segment.
339  */
340         acknow = tp->t_flags & TF_ACKNOW;
341
342         /*
343          * First, discard tcpcb state, which includes stopping its timers and
344          * freeing it.  tcp_discardcb() used to also release the inpcb, but
345          * that work is now done in the caller.
346          *
347          * Note: soisdisconnected() call used to be made in tcp_discardcb(),
348          * and might not be needed here any longer.
349          */
350         tcp_discardcb(tp);
351         soisdisconnected(so);
352         tw->tw_so_options = so->so_options;
353         inp->inp_flags |= INP_TIMEWAIT;
354         if (acknow)
355                 tcp_twrespond(tw, TH_ACK);
356         if (local)
357                 in_pcbdrop(inp);
358         else {
359                 in_pcbref(inp); /* Reference from tw */
360                 tw->tw_cred = crhold(so->so_cred);
361                 inp->inp_ppcb = tw;
362                 TCPSTATES_INC(TCPS_TIME_WAIT);
363                 tcp_tw_2msl_reset(tw, 0);
364         }
365
366         /*
367          * If the inpcb owns the sole reference to the socket, then we can
368          * detach and free the socket as it is not needed in time wait.
369          */
370         if (inp->inp_flags & INP_SOCKREF) {
371                 KASSERT(so->so_state & SS_PROTOREF,
372                     ("tcp_twstart: !SS_PROTOREF"));
373                 inp->inp_flags &= ~INP_SOCKREF;
374                 INP_WUNLOCK(inp);
375                 SOCK_LOCK(so);
376                 so->so_state &= ~SS_PROTOREF;
377                 sofree(so);
378         } else
379                 INP_WUNLOCK(inp);
380 }
381
382 /*
383  * Returns 1 if the TIME_WAIT state was killed and we should start over,
384  * looking for a pcb in the listen state.  Returns 0 otherwise.
385  * It be called with to == NULL only for pure SYN-segments.
386  */
387 int
388 tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
389     struct mbuf *m, int tlen)
390 {
391         struct tcptw *tw;
392         int thflags;
393         tcp_seq seq;
394
395         NET_EPOCH_ASSERT();
396         INP_WLOCK_ASSERT(inp);
397
398         /*
399          * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
400          * still present.  This is undesirable, but temporarily necessary
401          * until we work out how to handle inpcb's who's timewait state has
402          * been removed.
403          */
404         tw = intotw(inp);
405         if (tw == NULL)
406                 goto drop;
407
408         thflags = th->th_flags;
409         KASSERT(to != NULL || (thflags & (TH_SYN | TH_ACK)) == TH_SYN,
410                 ("tcp_twcheck: called without options on a non-SYN segment"));
411
412         /*
413          * NOTE: for FIN_WAIT_2 (to be added later),
414          * must validate sequence number before accepting RST
415          */
416
417         /*
418          * If the segment contains RST:
419          *      Drop the segment - see Stevens, vol. 2, p. 964 and
420          *      RFC 1337.
421          */
422         if (thflags & TH_RST)
423                 goto drop;
424
425 #if 0
426 /* PAWS not needed at the moment */
427         /*
428          * RFC 1323 PAWS: If we have a timestamp reply on this segment
429          * and it's less than ts_recent, drop it.
430          */
431         if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
432             TSTMP_LT(to.to_tsval, tp->ts_recent)) {
433                 if ((thflags & TH_ACK) == 0)
434                         goto drop;
435                 goto ack;
436         }
437         /*
438          * ts_recent is never updated because we never accept new segments.
439          */
440 #endif
441
442         /*
443          * If a new connection request is received
444          * while in TIME_WAIT, drop the old connection
445          * and start over if the sequence numbers
446          * are above the previous ones.
447          * Allow UDP port number changes in this case.
448          */
449         if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
450                 tcp_twclose(tw, 0);
451                 TCPSTAT_INC(tcps_tw_recycles);
452                 return (1);
453         }
454
455         /*
456          * Send RST if UDP port numbers don't match
457          */
458         if (tw->t_port != m->m_pkthdr.tcp_tun_port) {
459                 if (th->th_flags & TH_ACK) {
460                         tcp_respond(NULL, mtod(m, void *), th, m,
461                             (tcp_seq)0, th->th_ack, TH_RST);
462                 } else {
463                         if (th->th_flags & TH_SYN)
464                                 tlen++;
465                         if (th->th_flags & TH_FIN)
466                                 tlen++;
467                         tcp_respond(NULL, mtod(m, void *), th, m,
468                             th->th_seq+tlen, (tcp_seq)0, TH_RST|TH_ACK);
469                 }
470                 INP_WUNLOCK(inp);
471                 TCPSTAT_INC(tcps_tw_resets);
472                 return (0);
473         }
474
475         /*
476          * Drop the segment if it does not contain an ACK.
477          */
478         if ((thflags & TH_ACK) == 0)
479                 goto drop;
480
481         /*
482          * If timestamps were negotiated during SYN/ACK and a
483          * segment without a timestamp is received, silently drop
484          * the segment, unless the missing timestamps are tolerated.
485          * See section 3.2 of RFC 7323.
486          */
487         if (((to->to_flags & TOF_TS) == 0) && (tw->t_recent != 0) &&
488             (V_tcp_tolerate_missing_ts == 0)) {
489                 goto drop;
490         }
491
492         /*
493          * Reset the 2MSL timer if this is a duplicate FIN.
494          */
495         if (thflags & TH_FIN) {
496                 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
497                 if (seq + 1 == tw->rcv_nxt)
498                         tcp_tw_2msl_reset(tw, 1);
499         }
500
501         /*
502          * Acknowledge the segment if it has data or is not a duplicate ACK.
503          */
504         if (thflags != TH_ACK || tlen != 0 ||
505             th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) {
506                 TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
507                 tcp_twrespond(tw, TH_ACK);
508                 TCPSTAT_INC(tcps_tw_responds);
509                 goto dropnoprobe;
510         }
511 drop:
512         TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
513 dropnoprobe:
514         INP_WUNLOCK(inp);
515         m_freem(m);
516         return (0);
517 }
518
519 void
520 tcp_twclose(struct tcptw *tw, int reuse)
521 {
522         struct socket *so;
523         struct inpcb *inp;
524
525         /*
526          * At this point, we are in one of two situations:
527          *
528          * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
529          *     all state.
530          *
531          * (2) We have a socket -- if we own a reference, release it and
532          *     notify the socket layer.
533          */
534         inp = tw->tw_inpcb;
535         KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
536         KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
537         NET_EPOCH_ASSERT();
538         INP_WLOCK_ASSERT(inp);
539
540         tcp_tw_2msl_stop(tw, reuse);
541         inp->inp_ppcb = NULL;
542         in_pcbdrop(inp);
543
544         so = inp->inp_socket;
545         if (so != NULL) {
546                 /*
547                  * If there's a socket, handle two cases: first, we own a
548                  * strong reference, which we will now release, or we don't
549                  * in which case another reference exists (XXXRW: think
550                  * about this more), and we don't need to take action.
551                  */
552                 if (inp->inp_flags & INP_SOCKREF) {
553                         inp->inp_flags &= ~INP_SOCKREF;
554                         INP_WUNLOCK(inp);
555                         SOCK_LOCK(so);
556                         KASSERT(so->so_state & SS_PROTOREF,
557                             ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
558                         so->so_state &= ~SS_PROTOREF;
559                         sofree(so);
560                 } else {
561                         /*
562                          * If we don't own the only reference, the socket and
563                          * inpcb need to be left around to be handled by
564                          * tcp_usr_detach() later.
565                          */
566                         INP_WUNLOCK(inp);
567                 }
568         } else {
569                 /*
570                  * The socket has been already cleaned-up for us, only free the
571                  * inpcb.
572                  */
573                 in_pcbfree(inp);
574         }
575         TCPSTAT_INC(tcps_closed);
576 }
577
578 static int
579 tcp_twrespond(struct tcptw *tw, int flags)
580 {
581         struct inpcb *inp = tw->tw_inpcb;
582 #if defined(INET6) || defined(INET)
583         struct tcphdr *th = NULL;
584 #endif
585         struct mbuf *m;
586 #ifdef INET
587         struct ip *ip = NULL;
588 #endif
589         u_int hdrlen, optlen, ulen;
590         int error = 0;                  /* Keep compiler happy */
591         struct tcpopt to;
592 #ifdef INET6
593         struct ip6_hdr *ip6 = NULL;
594         int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
595 #endif
596         struct udphdr *udp = NULL;
597         hdrlen = 0;                     /* Keep compiler happy */
598
599         INP_WLOCK_ASSERT(inp);
600
601         m = m_gethdr(M_NOWAIT, MT_DATA);
602         if (m == NULL)
603                 return (ENOBUFS);
604         m->m_data += max_linkhdr;
605
606 #ifdef MAC
607         mac_inpcb_create_mbuf(inp, m);
608 #endif
609
610 #ifdef INET6
611         if (isipv6) {
612                 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
613                 ip6 = mtod(m, struct ip6_hdr *);
614                 if (tw->t_port) {
615                         udp = (struct udphdr *)(ip6 + 1);
616                         hdrlen += sizeof(struct udphdr);
617                         udp->uh_sport = htons(V_tcp_udp_tunneling_port);
618                         udp->uh_dport = tw->t_port;
619                         ulen = (hdrlen - sizeof(struct ip6_hdr));
620                         th = (struct tcphdr *)(udp + 1);
621                 } else
622                         th = (struct tcphdr *)(ip6 + 1);
623                 tcpip_fillheaders(inp, tw->t_port, ip6, th);
624         }
625 #endif
626 #if defined(INET6) && defined(INET)
627         else
628 #endif
629 #ifdef INET
630         {
631                 hdrlen = sizeof(struct tcpiphdr);
632                 ip = mtod(m, struct ip *);
633                 if (tw->t_port) {
634                         udp = (struct udphdr *)(ip + 1);
635                         hdrlen += sizeof(struct udphdr);
636                         udp->uh_sport = htons(V_tcp_udp_tunneling_port);
637                         udp->uh_dport = tw->t_port;
638                         ulen = (hdrlen - sizeof(struct ip));
639                         th = (struct tcphdr *)(udp + 1);
640                 } else
641                         th = (struct tcphdr *)(ip + 1);
642                 tcpip_fillheaders(inp, tw->t_port, ip, th);
643         }
644 #endif
645         to.to_flags = 0;
646
647         /*
648          * Send a timestamp and echo-reply if both our side and our peer
649          * have sent timestamps in our SYN's and this is not a RST.
650          */
651         if (tw->t_recent && flags == TH_ACK) {
652                 to.to_flags |= TOF_TS;
653                 to.to_tsval = tcp_ts_getticks() + tw->ts_offset;
654                 to.to_tsecr = tw->t_recent;
655         }
656 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
657         if (tw->tw_flags & TF_SIGNATURE)
658                 to.to_flags |= TOF_SIGNATURE;
659 #endif
660         optlen = tcp_addoptions(&to, (u_char *)(th + 1));
661
662         if (udp) {
663                 ulen += optlen;
664                 udp->uh_ulen = htons(ulen);
665         }
666         m->m_len = hdrlen + optlen;
667         m->m_pkthdr.len = m->m_len;
668
669         KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
670
671         th->th_seq = htonl(tw->snd_nxt);
672         th->th_ack = htonl(tw->rcv_nxt);
673         th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
674         th->th_flags = flags;
675         th->th_win = htons(tw->last_win);
676
677 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
678         if (tw->tw_flags & TF_SIGNATURE) {
679                 if (!TCPMD5_ENABLED() ||
680                     TCPMD5_OUTPUT(m, th, to.to_signature) != 0)
681                         return (-1);
682         }
683 #endif
684 #ifdef INET6
685         if (isipv6) {
686                 if (tw->t_port) {
687                         m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
688                         m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
689                         udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
690                         th->th_sum = htons(0);
691                 } else {
692                         m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
693                         m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
694                         th->th_sum = in6_cksum_pseudo(ip6,
695                             sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0);
696                 }
697                 ip6->ip6_hlim = in6_selecthlim(inp, NULL);
698                 TCP_PROBE5(send, NULL, NULL, ip6, NULL, th);
699                 error = ip6_output(m, inp->in6p_outputopts, NULL,
700                     (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
701         }
702 #endif
703 #if defined(INET6) && defined(INET)
704         else
705 #endif
706 #ifdef INET
707         {
708                 if (tw->t_port) {
709                         m->m_pkthdr.csum_flags = CSUM_UDP;
710                         m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
711                         udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
712                             ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
713                         th->th_sum = htons(0);
714                 } else {
715                         m->m_pkthdr.csum_flags = CSUM_TCP;
716                         m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
717                         th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
718                             htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
719                 }
720                 ip->ip_len = htons(m->m_pkthdr.len);
721                 if (V_path_mtu_discovery)
722                         ip->ip_off |= htons(IP_DF);
723                 TCP_PROBE5(send, NULL, NULL, ip, NULL, th);
724                 error = ip_output(m, inp->inp_options, NULL,
725                     ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
726                     NULL, inp);
727         }
728 #endif
729         if (flags & TH_ACK)
730                 TCPSTAT_INC(tcps_sndacks);
731         else
732                 TCPSTAT_INC(tcps_sndctrl);
733         TCPSTAT_INC(tcps_sndtotal);
734         return (error);
735 }
736
737 static void
738 tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
739 {
740
741         NET_EPOCH_ASSERT();
742         INP_WLOCK_ASSERT(tw->tw_inpcb);
743
744         TW_WLOCK(V_tw_lock);
745         if (rearm)
746                 TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
747         tw->tw_time = ticks + 2 * V_tcp_msl;
748         TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
749         TW_WUNLOCK(V_tw_lock);
750 }
751
752 static void
753 tcp_tw_2msl_stop(struct tcptw *tw, int reuse)
754 {
755         struct ucred *cred;
756         struct inpcb *inp;
757         int released __unused;
758
759         NET_EPOCH_ASSERT();
760
761         TW_WLOCK(V_tw_lock);
762         inp = tw->tw_inpcb;
763         tw->tw_inpcb = NULL;
764
765         TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
766         cred = tw->tw_cred;
767         tw->tw_cred = NULL;
768         TW_WUNLOCK(V_tw_lock);
769
770         if (cred != NULL)
771                 crfree(cred);
772
773         released = in_pcbrele_wlocked(inp);
774         KASSERT(!released, ("%s: inp should not be released here", __func__));
775
776         if (!reuse)
777                 uma_zfree(V_tcptw_zone, tw);
778         TCPSTATES_DEC(TCPS_TIME_WAIT);
779 }
780
781 struct tcptw *
782 tcp_tw_2msl_scan(int reuse)
783 {
784         struct tcptw *tw;
785         struct inpcb *inp;
786
787         NET_EPOCH_ASSERT();
788
789         for (;;) {
790                 TW_RLOCK(V_tw_lock);
791                 tw = TAILQ_FIRST(&V_twq_2msl);
792                 if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0)) {
793                         TW_RUNLOCK(V_tw_lock);
794                         break;
795                 }
796                 KASSERT(tw->tw_inpcb != NULL, ("%s: tw->tw_inpcb == NULL",
797                     __func__));
798
799                 inp = tw->tw_inpcb;
800                 in_pcbref(inp);
801                 TW_RUNLOCK(V_tw_lock);
802
803                 INP_WLOCK(inp);
804                 tw = intotw(inp);
805                 if (in_pcbrele_wlocked(inp)) {
806                         if (__predict_true(tw == NULL)) {
807                                 continue;
808                         } else {
809                                 /* This should not happen as in TIMEWAIT
810                                  * state the inp should not be destroyed
811                                  * before its tcptw. If INVARIANTS is
812                                  * defined panic.
813                                  */
814 #ifdef INVARIANTS
815                                 panic("%s: Panic before an infinite "
816                                           "loop: INP_TIMEWAIT && (INP_FREED "
817                                           "|| inp last reference) && tw != "
818                                           "NULL", __func__);
819 #else
820                                 log(LOG_ERR, "%s: Avoid an infinite "
821                                         "loop: INP_TIMEWAIT && (INP_FREED "
822                                         "|| inp last reference) && tw != "
823                                         "NULL", __func__);
824 #endif
825                                 break;
826                         }
827                 }
828
829                 if (tw == NULL) {
830                         /* tcp_twclose() has already been called */
831                         INP_WUNLOCK(inp);
832                         continue;
833                 }
834
835                 tcp_twclose(tw, reuse);
836                 if (reuse)
837                         return tw;
838         }
839
840         return NULL;
841 }