]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_log_buf.c
Make use of the stats(3) framework in the TCP stack.
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_log_buf.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016-2018 Netflix, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/arb.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/qmath.h>
39 #include <sys/queue.h>
40 #include <sys/refcount.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/tree.h>
46 #include <sys/stats.h>
47 #include <sys/counter.h>
48
49 #include <dev/tcp_log/tcp_log_dev.h>
50
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/vnet.h>
54
55 #include <netinet/in.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/tcp_log_buf.h>
60
61 /* Default expiry time */
62 #define TCP_LOG_EXPIRE_TIME     ((sbintime_t)60 * SBT_1S)
63
64 /* Max interval at which to run the expiry timer */
65 #define TCP_LOG_EXPIRE_INTVL    ((sbintime_t)5 * SBT_1S)
66
67 bool    tcp_log_verbose;
68 static uma_zone_t tcp_log_bucket_zone, tcp_log_node_zone, tcp_log_zone;
69 static int      tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT;
70 static uint32_t tcp_log_version = TCP_LOG_BUF_VER;
71 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket);
72 static struct tcp_log_id_tree tcp_log_id_head;
73 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head =
74     STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head);
75 static struct mtx tcp_log_expireq_mtx;
76 static struct callout tcp_log_expireq_callout;
77 static u_long tcp_log_auto_ratio = 0;
78 static volatile u_long tcp_log_auto_ratio_cur = 0;
79 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL;
80 static bool tcp_log_auto_all = false;
81
82 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
83
84 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW, 0, "TCP Black Box controls");
85
86 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose,
87     0, "Force verbose logging for TCP traces");
88
89 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit,
90     CTLFLAG_RW, &tcp_log_session_limit, 0,
91     "Maximum number of events maintained for each TCP session");
92
93 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW,
94     &tcp_log_zone, "Maximum number of events maintained for all TCP sessions");
95
96 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD,
97     &tcp_log_zone, "Current number of events maintained for all TCP sessions");
98
99 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW,
100     &tcp_log_bucket_zone, "Maximum number of log IDs");
101
102 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD,
103     &tcp_log_bucket_zone, "Current number of log IDs");
104
105 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW,
106     &tcp_log_node_zone, "Maximum number of tcpcbs with log IDs");
107
108 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD,
109     &tcp_log_node_zone, "Current number of tcpcbs with log IDs");
110
111 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version,
112     0, "Version of log formats exported");
113
114 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW,
115     &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions");
116
117 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW,
118     &tcp_log_auto_mode, TCP_LOG_STATE_HEAD_AUTO,
119     "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_HEAD_AUTO)");
120
121 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW,
122     &tcp_log_auto_all, false,
123     "Auto-select from all sessions (rather than just those with IDs)");
124
125 #ifdef TCPLOG_DEBUG_COUNTERS
126 counter_u64_t tcp_log_queued;
127 counter_u64_t tcp_log_que_fail1;
128 counter_u64_t tcp_log_que_fail2;
129 counter_u64_t tcp_log_que_fail3;
130 counter_u64_t tcp_log_que_fail4;
131 counter_u64_t tcp_log_que_fail5;
132 counter_u64_t tcp_log_que_copyout;
133 counter_u64_t tcp_log_que_read;
134 counter_u64_t tcp_log_que_freed;
135
136 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD,
137     &tcp_log_queued, "Number of entries queued");
138 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD,
139     &tcp_log_que_fail1, "Number of entries queued but fail 1");
140 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD,
141     &tcp_log_que_fail2, "Number of entries queued but fail 2");
142 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD,
143     &tcp_log_que_fail3, "Number of entries queued but fail 3");
144 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD,
145     &tcp_log_que_fail4, "Number of entries queued but fail 4");
146 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD,
147     &tcp_log_que_fail5, "Number of entries queued but fail 4");
148 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD,
149     &tcp_log_que_copyout, "Number of entries copied out");
150 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD,
151     &tcp_log_que_read, "Number of entries read from the queue");
152 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD,
153     &tcp_log_que_freed, "Number of entries freed after reading");
154 #endif
155
156 #ifdef INVARIANTS
157 #define TCPLOG_DEBUG_RINGBUF
158 #endif
159
160 struct tcp_log_mem
161 {
162         STAILQ_ENTRY(tcp_log_mem) tlm_queue;
163         struct tcp_log_buffer   tlm_buf;
164         struct tcp_log_verbose  tlm_v;
165 #ifdef TCPLOG_DEBUG_RINGBUF
166         volatile int            tlm_refcnt;
167 #endif
168 };
169
170 /* 60 bytes for the header, + 16 bytes for padding */
171 static uint8_t  zerobuf[76];
172
173 /*
174  * Lock order:
175  * 1. TCPID_TREE
176  * 2. TCPID_BUCKET
177  * 3. INP
178  *
179  * Rules:
180  * A. You need a lock on the Tree to add/remove buckets.
181  * B. You need a lock on the bucket to add/remove nodes from the bucket.
182  * C. To change information in a node, you need the INP lock if the tln_closed
183  *    field is false. Otherwise, you need the bucket lock. (Note that the
184  *    tln_closed field can change at any point, so you need to recheck the
185  *    entry after acquiring the INP lock.)
186  * D. To remove a node from the bucket, you must have that entry locked,
187  *    according to the criteria of Rule C. Also, the node must not be on
188  *    the expiry queue.
189  * E. The exception to C is the expiry queue fields, which are locked by
190  *    the TCPLOG_EXPIREQ lock.
191  *
192  * Buckets have a reference count. Each node is a reference. Further,
193  * other callers may add reference counts to keep a bucket from disappearing.
194  * You can add a reference as long as you own a lock sufficient to keep the
195  * bucket from disappearing. For example, a common use is:
196  *   a. Have a locked INP, but need to lock the TCPID_BUCKET.
197  *   b. Add a refcount on the bucket. (Safe because the INP lock prevents
198  *      the TCPID_BUCKET from going away.)
199  *   c. Drop the INP lock.
200  *   d. Acquire a lock on the TCPID_BUCKET.
201  *   e. Acquire a lock on the INP.
202  *   f. Drop the refcount on the bucket.
203  *      (At this point, the bucket may disappear.)
204  *
205  * Expire queue lock:
206  * You can acquire this with either the bucket or INP lock. Don't reverse it.
207  * When the expire code has committed to freeing a node, it resets the expiry
208  * time to SBT_MAX. That is the signal to everyone else that they should
209  * leave that node alone.
210  */
211 static struct rwlock tcp_id_tree_lock;
212 #define TCPID_TREE_WLOCK()              rw_wlock(&tcp_id_tree_lock)
213 #define TCPID_TREE_RLOCK()              rw_rlock(&tcp_id_tree_lock)
214 #define TCPID_TREE_UPGRADE()            rw_try_upgrade(&tcp_id_tree_lock)
215 #define TCPID_TREE_WUNLOCK()            rw_wunlock(&tcp_id_tree_lock)
216 #define TCPID_TREE_RUNLOCK()            rw_runlock(&tcp_id_tree_lock)
217 #define TCPID_TREE_WLOCK_ASSERT()       rw_assert(&tcp_id_tree_lock, RA_WLOCKED)
218 #define TCPID_TREE_RLOCK_ASSERT()       rw_assert(&tcp_id_tree_lock, RA_RLOCKED)
219 #define TCPID_TREE_UNLOCK_ASSERT()      rw_assert(&tcp_id_tree_lock, RA_UNLOCKED)
220
221 #define TCPID_BUCKET_LOCK_INIT(tlb)     mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF)
222 #define TCPID_BUCKET_LOCK_DESTROY(tlb)  mtx_destroy(&((tlb)->tlb_mtx))
223 #define TCPID_BUCKET_LOCK(tlb)          mtx_lock(&((tlb)->tlb_mtx))
224 #define TCPID_BUCKET_UNLOCK(tlb)        mtx_unlock(&((tlb)->tlb_mtx))
225 #define TCPID_BUCKET_LOCK_ASSERT(tlb)   mtx_assert(&((tlb)->tlb_mtx), MA_OWNED)
226 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED)
227
228 #define TCPID_BUCKET_REF(tlb)           refcount_acquire(&((tlb)->tlb_refcnt))
229 #define TCPID_BUCKET_UNREF(tlb)         refcount_release(&((tlb)->tlb_refcnt))
230
231 #define TCPLOG_EXPIREQ_LOCK()           mtx_lock(&tcp_log_expireq_mtx)
232 #define TCPLOG_EXPIREQ_UNLOCK()         mtx_unlock(&tcp_log_expireq_mtx)
233
234 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node);
235
236 struct tcp_log_id_bucket
237 {
238         /*
239          * tlb_id must be first. This lets us use strcmp on
240          * (struct tcp_log_id_bucket *) and (char *) interchangeably.
241          */
242         char                            tlb_id[TCP_LOG_ID_LEN];
243         RB_ENTRY(tcp_log_id_bucket)     tlb_rb;
244         struct tcp_log_id_head          tlb_head;
245         struct mtx                      tlb_mtx;
246         volatile u_int                  tlb_refcnt;
247 };
248
249 struct tcp_log_id_node
250 {
251         SLIST_ENTRY(tcp_log_id_node) tln_list;
252         STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */
253         sbintime_t              tln_expiretime; /* Locked by the expireq lock */
254
255         /*
256          * If INP is NULL, that means the connection has closed. We've
257          * saved the connection endpoint information and the log entries
258          * in the tln_ie and tln_entries members. We've also saved a pointer
259          * to the enclosing bucket here. If INP is not NULL, the information is
260          * in the PCB and not here.
261          */
262         struct inpcb            *tln_inp;
263         struct tcpcb            *tln_tp;
264         struct tcp_log_id_bucket *tln_bucket;
265         struct in_endpoints     tln_ie;
266         struct tcp_log_stailq   tln_entries;
267         int                     tln_count;
268         volatile int            tln_closed;
269         uint8_t                 tln_af;
270 };
271
272 enum tree_lock_state {
273         TREE_UNLOCKED = 0,
274         TREE_RLOCKED,
275         TREE_WLOCKED,
276 };
277
278 /* Do we want to select this session for auto-logging? */
279 static __inline bool
280 tcp_log_selectauto(void)
281 {
282
283         /*
284          * If we are doing auto-capturing, figure out whether we will capture
285          * this session.
286          */
287         if (tcp_log_auto_ratio &&
288             (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
289             tcp_log_auto_ratio) == 0)
290                 return (true);
291         return (false);
292 }
293
294 static __inline int
295 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b)
296 {
297         KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL"));
298         KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL"));
299         return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN);
300 }
301
302 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
303
304 static __inline void
305 tcp_log_id_validate_tree_lock(int tree_locked)
306 {
307
308 #ifdef INVARIANTS
309         switch (tree_locked) {
310         case TREE_WLOCKED:
311                 TCPID_TREE_WLOCK_ASSERT();
312                 break;
313         case TREE_RLOCKED:
314                 TCPID_TREE_RLOCK_ASSERT();
315                 break;
316         case TREE_UNLOCKED:
317                 TCPID_TREE_UNLOCK_ASSERT();
318                 break;
319         default:
320                 kassert_panic("%s:%d: unknown tree lock state", __func__,
321                     __LINE__);
322         }
323 #endif
324 }
325
326 static __inline void
327 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb)
328 {
329
330         TCPID_TREE_WLOCK_ASSERT();
331         KASSERT(SLIST_EMPTY(&tlb->tlb_head),
332             ("%s: Attempt to remove non-empty bucket", __func__));
333         if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) {
334 #ifdef INVARIANTS
335                 kassert_panic("%s:%d: error removing element from tree",
336                             __func__, __LINE__);
337 #endif
338         }
339         TCPID_BUCKET_LOCK_DESTROY(tlb);
340         uma_zfree(tcp_log_bucket_zone, tlb);
341 }
342
343 /*
344  * Call with a referenced and locked bucket.
345  * Will return true if the bucket was freed; otherwise, false.
346  * tlb: The bucket to unreference.
347  * tree_locked: A pointer to the state of the tree lock. If the tree lock
348  *    state changes, the function will update it.
349  * inp: If not NULL and the function needs to drop the inp lock to relock the
350  *    tree, it will do so. (The caller must ensure inp will not become invalid,
351  *    probably by holding a reference to it.)
352  */
353 static bool
354 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked,
355     struct inpcb *inp)
356 {
357
358         KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__));
359         KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
360             __func__));
361
362         tcp_log_id_validate_tree_lock(*tree_locked);
363
364         /*
365          * Did we hold the last reference on the tlb? If so, we may need
366          * to free it. (Note that we can realistically only execute the
367          * loop twice: once without a write lock and once with a write
368          * lock.)
369          */
370         while (TCPID_BUCKET_UNREF(tlb)) {
371                 /*
372                  * We need a write lock on the tree to free this.
373                  * If we can upgrade the tree lock, this is "easy". If we
374                  * can't upgrade the tree lock, we need to do this the
375                  * "hard" way: unwind all our locks and relock everything.
376                  * In the meantime, anything could have changed. We even
377                  * need to validate that we still need to free the bucket.
378                  */
379                 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE())
380                         *tree_locked = TREE_WLOCKED;
381                 else if (*tree_locked != TREE_WLOCKED) {
382                         TCPID_BUCKET_REF(tlb);
383                         if (inp != NULL)
384                                 INP_WUNLOCK(inp);
385                         TCPID_BUCKET_UNLOCK(tlb);
386                         if (*tree_locked == TREE_RLOCKED)
387                                 TCPID_TREE_RUNLOCK();
388                         TCPID_TREE_WLOCK();
389                         *tree_locked = TREE_WLOCKED;
390                         TCPID_BUCKET_LOCK(tlb);
391                         if (inp != NULL)
392                                 INP_WLOCK(inp);
393                         continue;
394                 }
395
396                 /*
397                  * We have an empty bucket and a write lock on the tree.
398                  * Remove the empty bucket.
399                  */
400                 tcp_log_remove_bucket(tlb);
401                 return (true);
402         }
403         return (false);
404 }
405
406 /*
407  * Call with a locked bucket. This function will release the lock on the
408  * bucket before returning.
409  *
410  * The caller is responsible for freeing the tp->t_lin/tln node!
411  *
412  * Note: one of tp or both tlb and tln must be supplied.
413  *
414  * inp: A pointer to the inp. If the function needs to drop the inp lock to
415  *    acquire the tree write lock, it will do so. (The caller must ensure inp
416  *    will not become invalid, probably by holding a reference to it.)
417  * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored)
418  * tlb: A pointer to the bucket. (optional; ignored if tp is specified)
419  * tln: A pointer to the node. (optional; ignored if tp is specified)
420  * tree_locked: A pointer to the state of the tree lock. If the tree lock
421  *    state changes, the function will update it.
422  *
423  * Will return true if the INP lock was reacquired; otherwise, false.
424  */
425 static bool
426 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp,
427     struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln,
428     int *tree_locked)
429 {
430         int orig_tree_locked;
431
432         KASSERT(tp != NULL || (tlb != NULL && tln != NULL),
433             ("%s: called with tp=%p, tlb=%p, tln=%p", __func__,
434             tp, tlb, tln));
435         KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
436             __func__));
437
438         if (tp != NULL) {
439                 tlb = tp->t_lib;
440                 tln = tp->t_lin;
441                 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__));
442                 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__));
443         }
444
445         tcp_log_id_validate_tree_lock(*tree_locked);
446         TCPID_BUCKET_LOCK_ASSERT(tlb);
447
448         /*
449          * Remove the node, clear the log bucket and node from the TCPCB, and
450          * decrement the bucket refcount. In the process, if this is the
451          * last reference, the bucket will be freed.
452          */
453         SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list);
454         if (tp != NULL) {
455                 tp->t_lib = NULL;
456                 tp->t_lin = NULL;
457         }
458         orig_tree_locked = *tree_locked;
459         if (!tcp_log_unref_bucket(tlb, tree_locked, inp))
460                 TCPID_BUCKET_UNLOCK(tlb);
461         return (*tree_locked != orig_tree_locked);
462 }
463
464 #define RECHECK_INP_CLEAN(cleanup)      do {                    \
465         if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {    \
466                 rv = ECONNRESET;                                \
467                 cleanup;                                        \
468                 goto done;                                      \
469         }                                                       \
470         tp = intotcpcb(inp);                                    \
471 } while (0)
472
473 #define RECHECK_INP()   RECHECK_INP_CLEAN(/* noop */)
474
475 static void
476 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
477 {
478
479         INP_WLOCK_ASSERT(tp->t_inpcb);
480
481 #ifdef STATS
482         if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
483                 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id));
484 #endif
485 }
486
487 /*
488  * Set the TCP log ID for a TCPCB.
489  * Called with INPCB locked. Returns with it unlocked.
490  */
491 int
492 tcp_log_set_id(struct tcpcb *tp, char *id)
493 {
494         struct tcp_log_id_bucket *tlb, *tmp_tlb;
495         struct tcp_log_id_node *tln;
496         struct inpcb *inp;
497         int tree_locked, rv;
498         bool bucket_locked;
499
500         tlb = NULL;
501         tln = NULL;
502         inp = tp->t_inpcb;
503         tree_locked = TREE_UNLOCKED;
504         bucket_locked = false;
505
506 restart:
507         INP_WLOCK_ASSERT(inp);
508
509         /* See if the ID is unchanged. */
510         if ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) ||
511             (tp->t_lib == NULL && *id == 0)) {
512                 rv = 0;
513                 goto done;
514         }
515
516         /*
517          * If the TCPCB had a previous ID, we need to extricate it from
518          * the previous list.
519          *
520          * Drop the TCPCB lock and lock the tree and the bucket.
521          * Because this is called in the socket context, we (theoretically)
522          * don't need to worry about the INPCB completely going away
523          * while we are gone.
524          */
525         if (tp->t_lib != NULL) {
526                 tlb = tp->t_lib;
527                 TCPID_BUCKET_REF(tlb);
528                 INP_WUNLOCK(inp);
529
530                 if (tree_locked == TREE_UNLOCKED) {
531                         TCPID_TREE_RLOCK();
532                         tree_locked = TREE_RLOCKED;
533                 }
534                 TCPID_BUCKET_LOCK(tlb);
535                 bucket_locked = true;
536                 INP_WLOCK(inp);
537
538                 /*
539                  * Unreference the bucket. If our bucket went away, it is no
540                  * longer locked or valid.
541                  */
542                 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) {
543                         bucket_locked = false;
544                         tlb = NULL;
545                 }
546
547                 /* Validate the INP. */
548                 RECHECK_INP();
549
550                 /*
551                  * Evaluate whether the bucket changed while we were unlocked.
552                  *
553                  * Possible scenarios here:
554                  * 1. Bucket is unchanged and the same one we started with.
555                  * 2. The TCPCB no longer has a bucket and our bucket was
556                  *    freed.
557                  * 3. The TCPCB has a new bucket, whether ours was freed.
558                  * 4. The TCPCB no longer has a bucket and our bucket was
559                  *    not freed.
560                  *
561                  * In cases 2-4, we will start over. In case 1, we will
562                  * proceed here to remove the bucket.
563                  */
564                 if (tlb == NULL || tp->t_lib != tlb) {
565                         KASSERT(bucket_locked || tlb == NULL,
566                             ("%s: bucket_locked (%d) and tlb (%p) are "
567                             "inconsistent", __func__, bucket_locked, tlb));
568                         
569                         if (bucket_locked) {
570                                 TCPID_BUCKET_UNLOCK(tlb);
571                                 bucket_locked = false;
572                                 tlb = NULL;
573                         }
574                         goto restart;
575                 }
576
577                 /*
578                  * Store the (struct tcp_log_id_node) for reuse. Then, remove
579                  * it from the bucket. In the process, we may end up relocking.
580                  * If so, we need to validate that the INP is still valid, and
581                  * the TCPCB entries match we expect.
582                  *
583                  * We will clear tlb and change the bucket_locked state just
584                  * before calling tcp_log_remove_id_node(), since that function
585                  * will unlock the bucket.
586                  */
587                 if (tln != NULL)
588                         uma_zfree(tcp_log_node_zone, tln);
589                 tln = tp->t_lin;
590                 tlb = NULL;
591                 bucket_locked = false;
592                 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) {
593                         RECHECK_INP();
594
595                         /*
596                          * If the TCPCB moved to a new bucket while we had
597                          * dropped the lock, restart.
598                          */
599                         if (tp->t_lib != NULL || tp->t_lin != NULL)
600                                 goto restart;
601                 }
602
603                 /*
604                  * Yay! We successfully removed the TCPCB from its old
605                  * bucket. Phew!
606                  *
607                  * On to bigger and better things...
608                  */
609         }
610
611         /* At this point, the TCPCB should not be in any bucket. */
612         KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__));
613
614         /*
615          * If the new ID is not empty, we need to now assign this TCPCB to a
616          * new bucket.
617          */
618         if (*id) {
619                 /* Get a new tln, if we don't already have one to reuse. */
620                 if (tln == NULL) {
621                         tln = uma_zalloc(tcp_log_node_zone, M_NOWAIT | M_ZERO);
622                         if (tln == NULL) {
623                                 rv = ENOBUFS;
624                                 goto done;
625                         }
626                         tln->tln_inp = inp;
627                         tln->tln_tp = tp;
628                 }
629
630                 /*
631                  * Drop the INP lock for a bit. We don't need it, and dropping
632                  * it prevents lock order reversals.
633                  */
634                 INP_WUNLOCK(inp);
635
636                 /* Make sure we have at least a read lock on the tree. */
637                 tcp_log_id_validate_tree_lock(tree_locked);
638                 if (tree_locked == TREE_UNLOCKED) {
639                         TCPID_TREE_RLOCK();
640                         tree_locked = TREE_RLOCKED;
641                 }
642
643 refind:
644                 /*
645                  * Remember that we constructed (struct tcp_log_id_node) so
646                  * we can safely cast the id to it for the purposes of finding.
647                  */
648                 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL", 
649                     __func__, __LINE__));
650                 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
651                     (struct tcp_log_id_bucket *) id);
652
653                 /*
654                  * If we didn't find a matching bucket, we need to add a new
655                  * one. This requires a write lock. But, of course, we will
656                  * need to recheck some things when we re-acquire the lock.
657                  */
658                 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) {
659                         tree_locked = TREE_WLOCKED;
660                         if (!TCPID_TREE_UPGRADE()) {
661                                 TCPID_TREE_RUNLOCK();
662                                 TCPID_TREE_WLOCK();
663
664                                 /*
665                                  * The tree may have changed while we were
666                                  * unlocked.
667                                  */
668                                 goto refind;
669                         }
670                 }
671
672                 /* If we need to add a new bucket, do it now. */
673                 if (tmp_tlb == NULL) {
674                         /* Allocate new bucket. */
675                         tlb = uma_zalloc(tcp_log_bucket_zone, M_NOWAIT);
676                         if (tlb == NULL) {
677                                 rv = ENOBUFS;
678                                 goto done_noinp;
679                         }
680
681                         /*
682                          * Copy the ID to the bucket.
683                          * NB: Don't use strlcpy() unless you are sure
684                          * we've always validated NULL termination.
685                          *
686                          * TODO: When I'm done writing this, see if we
687                          * we have correctly validated NULL termination and
688                          * can use strlcpy(). :-)
689                          */
690                         strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1);
691                         tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0';
692
693                         /*
694                          * Take the refcount for the first node and go ahead
695                          * and lock this. Note that we zero the tlb_mtx
696                          * structure, since 0xdeadc0de flips the right bits
697                          * for the code to think that this mutex has already
698                          * been initialized. :-(
699                          */
700                         SLIST_INIT(&tlb->tlb_head);
701                         refcount_init(&tlb->tlb_refcnt, 1);
702                         memset(&tlb->tlb_mtx, 0, sizeof(struct mtx));
703                         TCPID_BUCKET_LOCK_INIT(tlb);
704                         TCPID_BUCKET_LOCK(tlb);
705                         bucket_locked = true;
706
707 #define FREE_NEW_TLB()  do {                            \
708         TCPID_BUCKET_LOCK_DESTROY(tlb);                 \
709         uma_zfree(tcp_log_bucket_zone, tlb);            \
710         bucket_locked = false;                          \
711         tlb = NULL;                                     \
712 } while (0)
713                         /*
714                          * Relock the INP and make sure we are still
715                          * unassigned.
716                          */
717                         INP_WLOCK(inp);
718                         RECHECK_INP_CLEAN(FREE_NEW_TLB());
719                         if (tp->t_lib != NULL) {
720                                 FREE_NEW_TLB();
721                                 goto restart;
722                         }
723
724                         /* Add the new bucket to the tree. */
725                         tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head,
726                             tlb);
727                         KASSERT(tmp_tlb == NULL,
728                             ("%s: Unexpected conflicting bucket (%p) while "
729                             "adding new bucket (%p)", __func__, tmp_tlb, tlb));
730
731                         /*
732                          * If we found a conflicting bucket, free the new
733                          * one we made and fall through to use the existing
734                          * bucket.
735                          */
736                         if (tmp_tlb != NULL) {
737                                 FREE_NEW_TLB();
738                                 INP_WUNLOCK(inp);
739                         }
740 #undef  FREE_NEW_TLB
741                 }
742
743                 /* If we found an existing bucket, use it. */
744                 if (tmp_tlb != NULL) {
745                         tlb = tmp_tlb;
746                         TCPID_BUCKET_LOCK(tlb);
747                         bucket_locked = true;
748
749                         /*
750                          * Relock the INP and make sure we are still
751                          * unassigned.
752                          */
753                         INP_UNLOCK_ASSERT(inp);
754                         INP_WLOCK(inp);
755                         RECHECK_INP();
756                         if (tp->t_lib != NULL) {
757                                 TCPID_BUCKET_UNLOCK(tlb);
758                                 bucket_locked = false;
759                                 tlb = NULL;
760                                 goto restart;
761                         }
762
763                         /* Take a reference on the bucket. */
764                         TCPID_BUCKET_REF(tlb);
765                 }
766
767                 tcp_log_grow_tlb(tlb->tlb_id, tp);
768
769                 /* Add the new node to the list. */
770                 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list);
771                 tp->t_lib = tlb;
772                 tp->t_lin = tln;
773                 tln = NULL;
774         }
775
776         rv = 0;
777
778 done:
779         /* Unlock things, as needed, and return. */
780         INP_WUNLOCK(inp);
781 done_noinp:
782         INP_UNLOCK_ASSERT(inp);
783         if (bucket_locked) {
784                 TCPID_BUCKET_LOCK_ASSERT(tlb);
785                 TCPID_BUCKET_UNLOCK(tlb);
786         } else if (tlb != NULL)
787                 TCPID_BUCKET_UNLOCK_ASSERT(tlb);
788         if (tree_locked == TREE_WLOCKED) {
789                 TCPID_TREE_WLOCK_ASSERT();
790                 TCPID_TREE_WUNLOCK();
791         } else if (tree_locked == TREE_RLOCKED) {
792                 TCPID_TREE_RLOCK_ASSERT();
793                 TCPID_TREE_RUNLOCK();
794         } else
795                 TCPID_TREE_UNLOCK_ASSERT();
796         if (tln != NULL)
797                 uma_zfree(tcp_log_node_zone, tln);
798         return (rv);
799 }
800
801 /*
802  * Get the TCP log ID for a TCPCB.
803  * Called with INPCB locked.
804  * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long.
805  * Returns number of bytes copied.
806  */
807 size_t
808 tcp_log_get_id(struct tcpcb *tp, char *buf)
809 {
810         size_t len;
811
812         INP_LOCK_ASSERT(tp->t_inpcb);
813         if (tp->t_lib != NULL) {
814                 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
815                 KASSERT(len < TCP_LOG_ID_LEN,
816                     ("%s:%d: tp->t_lib->tlb_id too long (%zu)",
817                     __func__, __LINE__, len));
818         } else {
819                 *buf = '\0';
820                 len = 0;
821         }
822         return (len);
823 }
824
825 /*
826  * Get number of connections with the same log ID.
827  * Log ID is taken from given TCPCB.
828  * Called with INPCB locked.
829  */
830 u_int
831 tcp_log_get_id_cnt(struct tcpcb *tp)
832 {
833
834         INP_WLOCK_ASSERT(tp->t_inpcb);
835         return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
836 }
837
838 #ifdef TCPLOG_DEBUG_RINGBUF
839 /*
840  * Functions/macros to increment/decrement reference count for a log
841  * entry. This should catch when we do a double-free/double-remove or
842  * a double-add.
843  */
844 static inline void
845 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func,
846     int line)
847 {
848         int refcnt;
849
850         refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1);
851         if (refcnt != 0)
852                 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)",
853                     func, line, log_entry, refcnt);
854 }
855 #define tcp_log_entry_refcnt_add(l)     \
856     _tcp_log_entry_refcnt_add((l), __func__, __LINE__)
857
858 static inline void
859 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func,
860     int line)
861 {
862         int refcnt;
863
864         refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1);
865         if (refcnt != 1)
866                 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)",
867                     func, line, log_entry, refcnt);
868 }
869 #define tcp_log_entry_refcnt_rem(l)     \
870     _tcp_log_entry_refcnt_rem((l), __func__, __LINE__)
871
872 #else /* !TCPLOG_DEBUG_RINGBUF */
873
874 #define tcp_log_entry_refcnt_add(l)
875 #define tcp_log_entry_refcnt_rem(l)
876
877 #endif
878
879 /*
880  * Cleanup after removing a log entry, but only decrement the count if we
881  * are running INVARIANTS.
882  */
883 static inline void
884 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused)
885 {
886
887         uma_zfree(tcp_log_zone, log_entry);
888 #ifdef INVARIANTS
889         (*count)--;
890         KASSERT(*count >= 0,
891             ("%s: count unexpectedly negative", __func__));
892 #endif
893 }
894
895 static void
896 tcp_log_free_entries(struct tcp_log_stailq *head, int *count)
897 {
898         struct tcp_log_mem *log_entry;
899
900         /* Free the entries. */
901         while ((log_entry = STAILQ_FIRST(head)) != NULL) {
902                 STAILQ_REMOVE_HEAD(head, tlm_queue);
903                 tcp_log_entry_refcnt_rem(log_entry);
904                 tcp_log_free_log_common(log_entry, count);
905         }
906 }
907
908 /* Cleanup after removing a log entry. */
909 static inline void
910 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry)
911 {
912         uma_zfree(tcp_log_zone, log_entry);
913         tp->t_lognum--;
914         KASSERT(tp->t_lognum >= 0,
915             ("%s: tp->t_lognum unexpectedly negative", __func__));
916 }
917
918 /* Remove a log entry from the head of a list. */
919 static inline void
920 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry)
921 {
922
923         KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs),
924             ("%s: attempt to remove non-HEAD log entry", __func__));
925         STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
926         tcp_log_entry_refcnt_rem(log_entry);
927         tcp_log_remove_log_cleanup(tp, log_entry);
928 }
929
930 #ifdef TCPLOG_DEBUG_RINGBUF
931 /*
932  * Initialize the log entry's reference count, which we want to
933  * survive allocations.
934  */
935 static int
936 tcp_log_zone_init(void *mem, int size, int flags __unused)
937 {
938         struct tcp_log_mem *tlm;
939
940         KASSERT(size >= sizeof(struct tcp_log_mem),
941             ("%s: unexpectedly short (%d) allocation", __func__, size));
942         tlm = (struct tcp_log_mem *)mem;
943         tlm->tlm_refcnt = 0;
944         return (0);
945 }
946
947 /*
948  * Double check that the refcnt is zero on allocation and return.
949  */
950 static int
951 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused)
952 {
953         struct tcp_log_mem *tlm;
954
955         KASSERT(size >= sizeof(struct tcp_log_mem),
956             ("%s: unexpectedly short (%d) allocation", __func__, size));
957         tlm = (struct tcp_log_mem *)mem;
958         if (tlm->tlm_refcnt != 0)
959                 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
960                     __func__, __LINE__, tlm, tlm->tlm_refcnt);
961         return (0);
962 }
963
964 static void
965 tcp_log_zone_dtor(void *mem, int size, void *args __unused)
966 {
967         struct tcp_log_mem *tlm;
968
969         KASSERT(size >= sizeof(struct tcp_log_mem),
970             ("%s: unexpectedly short (%d) allocation", __func__, size));
971         tlm = (struct tcp_log_mem *)mem;
972         if (tlm->tlm_refcnt != 0)
973                 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
974                     __func__, __LINE__, tlm, tlm->tlm_refcnt);
975 }
976 #endif /* TCPLOG_DEBUG_RINGBUF */
977
978 /* Do global initialization. */
979 void
980 tcp_log_init(void)
981 {
982
983         tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem),
984 #ifdef TCPLOG_DEBUG_RINGBUF
985             tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init,
986 #else
987             NULL, NULL, NULL,
988 #endif
989             NULL, UMA_ALIGN_PTR, 0);
990         (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT);
991         tcp_log_bucket_zone = uma_zcreate("tcp_log_bucket",
992             sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL,
993             UMA_ALIGN_PTR, 0);
994         tcp_log_node_zone = uma_zcreate("tcp_log_node",
995             sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL,
996             UMA_ALIGN_PTR, 0);
997 #ifdef TCPLOG_DEBUG_COUNTERS
998         tcp_log_queued = counter_u64_alloc(M_WAITOK);
999         tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK);
1000         tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK);
1001         tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK);
1002         tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK);
1003         tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK);
1004         tcp_log_que_copyout = counter_u64_alloc(M_WAITOK);
1005         tcp_log_que_read = counter_u64_alloc(M_WAITOK);
1006         tcp_log_que_freed = counter_u64_alloc(M_WAITOK);
1007 #endif
1008
1009         rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW);
1010         mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF);
1011         callout_init(&tcp_log_expireq_callout, 1);
1012 }
1013
1014 /* Do per-TCPCB initialization. */
1015 void
1016 tcp_log_tcpcbinit(struct tcpcb *tp)
1017 {
1018
1019         /* A new TCPCB should start out zero-initialized. */
1020         STAILQ_INIT(&tp->t_logs);
1021
1022         /*
1023          * If we are doing auto-capturing, figure out whether we will capture
1024          * this session.
1025          */
1026         if (tcp_log_selectauto()) {
1027                 tp->t_logstate = tcp_log_auto_mode;
1028                 tp->t_flags2 |= TF2_LOG_AUTO;
1029         }
1030 }
1031
1032
1033 /* Remove entries */
1034 static void
1035 tcp_log_expire(void *unused __unused)
1036 {
1037         struct tcp_log_id_bucket *tlb;
1038         struct tcp_log_id_node *tln;
1039         sbintime_t expiry_limit;
1040         int tree_locked;
1041
1042         TCPLOG_EXPIREQ_LOCK();
1043         if (callout_pending(&tcp_log_expireq_callout)) {
1044                 /* Callout was reset. */
1045                 TCPLOG_EXPIREQ_UNLOCK();
1046                 return;
1047         }
1048
1049         /*
1050          * Process entries until we reach one that expires too far in the
1051          * future. Look one second in the future.
1052          */
1053         expiry_limit = getsbinuptime() + SBT_1S;
1054         tree_locked = TREE_UNLOCKED;
1055
1056         while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL &&
1057             tln->tln_expiretime <= expiry_limit) {
1058                 if (!callout_active(&tcp_log_expireq_callout)) {
1059                         /*
1060                          * Callout was stopped. I guess we should
1061                          * just quit at this point.
1062                          */
1063                         TCPLOG_EXPIREQ_UNLOCK();
1064                         return;
1065                 }
1066
1067                 /*
1068                  * Remove the node from the head of the list and unlock
1069                  * the list. Change the expiry time to SBT_MAX as a signal
1070                  * to other threads that we now own this.
1071                  */
1072                 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq);
1073                 tln->tln_expiretime = SBT_MAX;
1074                 TCPLOG_EXPIREQ_UNLOCK();
1075
1076                 /*
1077                  * Remove the node from the bucket.
1078                  */
1079                 tlb = tln->tln_bucket;
1080                 TCPID_BUCKET_LOCK(tlb);
1081                 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) {
1082                         tcp_log_id_validate_tree_lock(tree_locked);
1083                         if (tree_locked == TREE_WLOCKED)
1084                                 TCPID_TREE_WUNLOCK();
1085                         else
1086                                 TCPID_TREE_RUNLOCK();
1087                         tree_locked = TREE_UNLOCKED;
1088                 }
1089
1090                 /* Drop the INP reference. */
1091                 INP_WLOCK(tln->tln_inp);
1092                 if (!in_pcbrele_wlocked(tln->tln_inp))
1093                         INP_WUNLOCK(tln->tln_inp);
1094
1095                 /* Free the log records. */
1096                 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count);
1097
1098                 /* Free the node. */
1099                 uma_zfree(tcp_log_node_zone, tln);
1100
1101                 /* Relock the expiry queue. */
1102                 TCPLOG_EXPIREQ_LOCK();
1103         }
1104
1105         /*
1106          * We've expired all the entries we can. Do we need to reschedule
1107          * ourselves?
1108          */
1109         callout_deactivate(&tcp_log_expireq_callout);
1110         if (tln != NULL) {
1111                 /*
1112                  * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and
1113                  * set the next callout to that. (This helps ensure we generally
1114                  * run the callout no more often than desired.)
1115                  */
1116                 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL;
1117                 if (expiry_limit < tln->tln_expiretime)
1118                         expiry_limit = tln->tln_expiretime;
1119                 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit,
1120                     SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE);
1121         }
1122
1123         /* We're done. */
1124         TCPLOG_EXPIREQ_UNLOCK();
1125         return;
1126 }
1127
1128 /*
1129  * Move log data from the TCPCB to a new node. This will reset the TCPCB log
1130  * entries and log count; however, it will not touch other things from the
1131  * TCPCB (e.g. t_lin, t_lib).
1132  *
1133  * NOTE: Must hold a lock on the INP.
1134  */
1135 static void
1136 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
1137 {
1138
1139         INP_WLOCK_ASSERT(tp->t_inpcb);
1140
1141         tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie;
1142         if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6)
1143                 tln->tln_af = AF_INET6;
1144         else
1145                 tln->tln_af = AF_INET;
1146         tln->tln_entries = tp->t_logs;
1147         tln->tln_count = tp->t_lognum;
1148         tln->tln_bucket = tp->t_lib;
1149
1150         /* Clear information from the PCB. */
1151         STAILQ_INIT(&tp->t_logs);
1152         tp->t_lognum = 0;
1153 }
1154
1155 /* Do per-TCPCB cleanup */
1156 void
1157 tcp_log_tcpcbfini(struct tcpcb *tp)
1158 {
1159         struct tcp_log_id_node *tln, *tln_first;
1160         struct tcp_log_mem *log_entry;
1161         sbintime_t callouttime;
1162
1163         INP_WLOCK_ASSERT(tp->t_inpcb);
1164
1165         /*
1166          * If we were gathering packets to be automatically dumped, try to do
1167          * it now. If this succeeds, the log information in the TCPCB will be
1168          * cleared. Otherwise, we'll handle the log information as we do
1169          * for other states.
1170          */
1171         switch(tp->t_logstate) {
1172         case TCP_LOG_STATE_HEAD_AUTO:
1173                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1174                     M_NOWAIT, false);
1175                 break;
1176         case TCP_LOG_STATE_TAIL_AUTO:
1177                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail",
1178                     M_NOWAIT, false);
1179                 break;
1180         case TCP_LOG_STATE_CONTINUAL:
1181                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1182                     M_NOWAIT, false);
1183                 break;
1184         }
1185
1186         /*
1187          * There are two ways we could keep logs: per-socket or per-ID. If
1188          * we are tracking logs with an ID, then the logs survive the
1189          * destruction of the TCPCB.
1190          * 
1191          * If the TCPCB is associated with an ID node, move the logs from the
1192          * TCPCB to the ID node. In theory, this is safe, for reasons which I
1193          * will now explain for my own benefit when I next need to figure out
1194          * this code. :-)
1195          *
1196          * We own the INP lock. Therefore, no one else can change the contents
1197          * of this node (Rule C). Further, no one can remove this node from
1198          * the bucket while we hold the lock (Rule D). Basically, no one can
1199          * mess with this node. That leaves two states in which we could be:
1200          * 
1201          * 1. Another thread is currently waiting to acquire the INP lock, with
1202          *    plans to do something with this node. When we drop the INP lock,
1203          *    they will have a chance to do that. They will recheck the
1204          *    tln_closed field (see note to Rule C) and then acquire the
1205          *    bucket lock before proceeding further.
1206          *
1207          * 2. Another thread will try to acquire a lock at some point in the
1208          *    future. If they try to acquire a lock before we set the
1209          *    tln_closed field, they will follow state #1. If they try to
1210          *    acquire a lock after we set the tln_closed field, they will be
1211          *    able to make changes to the node, at will, following Rule C.
1212          *
1213          * Therefore, we currently own this node and can make any changes
1214          * we want. But, as soon as we set the tln_closed field to true, we
1215          * have effectively dropped our lock on the node. (For this reason, we
1216          * also need to make sure our writes are ordered correctly. An atomic
1217          * operation with "release" semantics should be sufficient.)
1218          */
1219
1220         if (tp->t_lin != NULL) {
1221                 /* Copy the relevant information to the log entry. */
1222                 tln = tp->t_lin;
1223                 KASSERT(tln->tln_inp == tp->t_inpcb,
1224                     ("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)",
1225                     __func__, tln->tln_inp, tp->t_inpcb));
1226                 tcp_log_move_tp_to_node(tp, tln);
1227
1228                 /* Clear information from the PCB. */
1229                 tp->t_lin = NULL;
1230                 tp->t_lib = NULL;
1231
1232                 /*
1233                  * Take a reference on the INP. This ensures that the INP
1234                  * remains valid while the node is on the expiry queue. This
1235                  * ensures the INP is valid for other threads that may be
1236                  * racing to lock this node when we move it to the expire
1237                  * queue.
1238                  */
1239                 in_pcbref(tp->t_inpcb);
1240
1241                 /*
1242                  * Store the entry on the expiry list. The exact behavior
1243                  * depends on whether we have entries to keep. If so, we
1244                  * put the entry at the tail of the list and expire in
1245                  * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put
1246                  * the entry at the head of the list. (Handling the cleanup
1247                  * via the expiry timer lets us avoid locking messy-ness here.)
1248                  */
1249                 tln->tln_expiretime = getsbinuptime();
1250                 TCPLOG_EXPIREQ_LOCK();
1251                 if (tln->tln_count) {
1252                         tln->tln_expiretime += TCP_LOG_EXPIRE_TIME;
1253                         if (STAILQ_EMPTY(&tcp_log_expireq_head) &&
1254                             !callout_active(&tcp_log_expireq_callout)) {
1255                                 /*
1256                                  * We are adding the first entry and a callout
1257                                  * is not currently scheduled; therefore, we
1258                                  * need to schedule one.
1259                                  */
1260                                 callout_reset_sbt(&tcp_log_expireq_callout,
1261                                     tln->tln_expiretime, SBT_1S, tcp_log_expire,
1262                                     NULL, C_ABSOLUTE);
1263                         }
1264                         STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln,
1265                             tln_expireq);
1266                 } else {
1267                         callouttime = tln->tln_expiretime +
1268                             TCP_LOG_EXPIRE_INTVL;
1269                         tln_first = STAILQ_FIRST(&tcp_log_expireq_head);
1270
1271                         if ((tln_first == NULL ||
1272                             callouttime < tln_first->tln_expiretime) &&
1273                             (callout_pending(&tcp_log_expireq_callout) ||
1274                             !callout_active(&tcp_log_expireq_callout))) {
1275                                 /*
1276                                  * The list is empty, or we want to run the
1277                                  * expire code before the first entry's timer
1278                                  * fires. Also, we are in a case where a callout
1279                                  * is not actively running. We want to reset
1280                                  * the callout to occur sooner.
1281                                  */
1282                                 callout_reset_sbt(&tcp_log_expireq_callout,
1283                                     callouttime, SBT_1S, tcp_log_expire, NULL,
1284                                     C_ABSOLUTE);
1285                         }
1286
1287                         /*
1288                          * Insert to the head, or just after the head, as
1289                          * appropriate. (This might result in small
1290                          * mis-orderings as a bunch of "expire now" entries
1291                          * gather at the start of the list, but that should
1292                          * not produce big problems, since the expire timer
1293                          * will walk through all of them.)
1294                          */
1295                         if (tln_first == NULL ||
1296                             tln->tln_expiretime < tln_first->tln_expiretime)
1297                                 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln,
1298                                     tln_expireq);
1299                         else
1300                                 STAILQ_INSERT_AFTER(&tcp_log_expireq_head,
1301                                     tln_first, tln, tln_expireq);
1302                 }
1303                 TCPLOG_EXPIREQ_UNLOCK();
1304
1305                 /*
1306                  * We are done messing with the tln. After this point, we
1307                  * can't touch it. (Note that the "release" semantics should
1308                  * be included with the TCPLOG_EXPIREQ_UNLOCK() call above.
1309                  * Therefore, they should be unnecessary here. However, it
1310                  * seems like a good idea to include them anyway, since we
1311                  * really are releasing a lock here.)
1312                  */
1313                 atomic_store_rel_int(&tln->tln_closed, 1);
1314         } else {
1315                 /* Remove log entries. */
1316                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1317                         tcp_log_remove_log_head(tp, log_entry);
1318                 KASSERT(tp->t_lognum == 0,
1319                     ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1320                         __func__, tp->t_lognum));
1321         }
1322
1323         /*
1324          * Change the log state to off (just in case anything tries to sneak
1325          * in a last-minute log).
1326          */
1327         tp->t_logstate = TCP_LOG_STATE_OFF;
1328 }
1329
1330 /*
1331  * This logs an event for a TCP socket. Normally, this is called via
1332  * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for
1333  * TCP_LOG_EVENT().
1334  */
1335
1336 struct tcp_log_buffer *
1337 tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
1338     struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len,
1339     union tcp_log_stackspecific *stackinfo, int th_hostorder,
1340     const char *output_caller, const char *func, int line, const struct timeval *itv)
1341 {
1342         struct tcp_log_mem *log_entry;
1343         struct tcp_log_buffer *log_buf;
1344         int attempt_count = 0;
1345         struct tcp_log_verbose *log_verbose;
1346         uint32_t logsn;
1347
1348         KASSERT((func == NULL && line == 0) || (func != NULL && line > 0),
1349             ("%s called with inconsistent func (%p) and line (%d) arguments",
1350                 __func__, func, line));
1351
1352         INP_WLOCK_ASSERT(tp->t_inpcb);
1353
1354         KASSERT(tp->t_logstate == TCP_LOG_STATE_HEAD ||
1355             tp->t_logstate == TCP_LOG_STATE_TAIL ||
1356             tp->t_logstate == TCP_LOG_STATE_CONTINUAL ||
1357             tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO ||
1358             tp->t_logstate == TCP_LOG_STATE_TAIL_AUTO,
1359             ("%s called with unexpected tp->t_logstate (%d)", __func__,
1360                 tp->t_logstate));
1361
1362         /*
1363          * Get the serial number. We do this early so it will
1364          * increment even if we end up skipping the log entry for some
1365          * reason.
1366          */
1367         logsn = tp->t_logsn++;
1368
1369         /*
1370          * Can we get a new log entry? If so, increment the lognum counter
1371          * here.
1372          */
1373 retry:
1374         if (tp->t_lognum < tcp_log_session_limit) {
1375                 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL)
1376                         tp->t_lognum++;
1377         } else
1378                 log_entry = NULL;
1379
1380         /* Do we need to try to reuse? */
1381         if (log_entry == NULL) {
1382                 /*
1383                  * Sacrifice auto-logged sessions without a log ID if
1384                  * tcp_log_auto_all is false. (If they don't have a log
1385                  * ID by now, it is probable that either they won't get one
1386                  * or we are resource-constrained.)
1387                  */
1388                 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1389                     !tcp_log_auto_all) {
1390                         if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) {
1391 #ifdef INVARIANTS
1392                                 panic("%s:%d: tcp_log_state_change() failed "
1393                                     "to set tp %p to TCP_LOG_STATE_CLEAR",
1394                                     __func__, __LINE__, tp);
1395 #endif
1396                                 tp->t_logstate = TCP_LOG_STATE_OFF;
1397                         }
1398                         return (NULL);
1399                 }
1400                 /*
1401                  * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump
1402                  * the buffers. If successful, deactivate tracing. Otherwise,
1403                  * leave it active so we will retry.
1404                  */
1405                 if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO &&
1406                     !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1407                     M_NOWAIT, false)) {
1408                         tp->t_logstate = TCP_LOG_STATE_OFF;
1409                         return(NULL);
1410                 } else if ((tp->t_logstate == TCP_LOG_STATE_CONTINUAL) &&
1411                     !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1412                     M_NOWAIT, false)) {
1413                         if (attempt_count == 0) {
1414                                 attempt_count++;
1415                                 goto retry;
1416                         }
1417 #ifdef TCPLOG_DEBUG_COUNTERS
1418                         counter_u64_add(tcp_log_que_fail4, 1);
1419 #endif
1420                         return(NULL);
1421                 } else if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO)
1422                         return(NULL);
1423
1424                 /* If in HEAD state, just deactivate the tracing and return. */
1425                 if (tp->t_logstate == TCP_LOG_STATE_HEAD) {
1426                         tp->t_logstate = TCP_LOG_STATE_OFF;
1427                         return(NULL);
1428                 }
1429
1430                 /*
1431                  * Get a buffer to reuse. If that fails, just give up.
1432                  * (We can't log anything without a buffer in which to
1433                  * put it.)
1434                  *
1435                  * Note that we don't change the t_lognum counter
1436                  * here. Because we are re-using the buffer, the total
1437                  * number won't change.
1438                  */
1439                 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL)
1440                         return(NULL);
1441                 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1442                 tcp_log_entry_refcnt_rem(log_entry);
1443         }
1444
1445         KASSERT(log_entry != NULL,
1446             ("%s: log_entry unexpectedly NULL", __func__));
1447
1448         /* Extract the log buffer and verbose buffer pointers. */
1449         log_buf = &log_entry->tlm_buf;
1450         log_verbose = &log_entry->tlm_v;
1451
1452         /* Basic entries. */
1453         if (itv == NULL)
1454                 getmicrouptime(&log_buf->tlb_tv);
1455         else
1456                 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval));
1457         log_buf->tlb_ticks = ticks;
1458         log_buf->tlb_sn = logsn;
1459         log_buf->tlb_stackid = tp->t_fb->tfb_id;
1460         log_buf->tlb_eventid = eventid;
1461         log_buf->tlb_eventflags = 0;
1462         log_buf->tlb_errno = errornum;
1463
1464         /* Socket buffers */
1465         if (rxbuf != NULL) {
1466                 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF;
1467                 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc;
1468                 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc;
1469                 log_buf->tlb_rxbuf.tls_sb_spare = 0;
1470         }
1471         if (txbuf != NULL) {
1472                 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF;
1473                 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc;
1474                 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc;
1475                 log_buf->tlb_txbuf.tls_sb_spare = 0;
1476         }
1477         /* Copy values from tp to the log entry. */
1478 #define COPY_STAT(f)    log_buf->tlb_ ## f = tp->f
1479 #define COPY_STAT_T(f)  log_buf->tlb_ ## f = tp->t_ ## f
1480         COPY_STAT_T(state);
1481         COPY_STAT_T(starttime);
1482         COPY_STAT(iss);
1483         COPY_STAT_T(flags);
1484         COPY_STAT(snd_una);
1485         COPY_STAT(snd_max);
1486         COPY_STAT(snd_cwnd);
1487         COPY_STAT(snd_nxt);
1488         COPY_STAT(snd_recover);
1489         COPY_STAT(snd_wnd);
1490         COPY_STAT(snd_ssthresh);
1491         COPY_STAT_T(srtt);
1492         COPY_STAT_T(rttvar);
1493         COPY_STAT(rcv_up);
1494         COPY_STAT(rcv_adv);
1495         COPY_STAT(rcv_nxt);
1496         COPY_STAT(sack_newdata);
1497         COPY_STAT(rcv_wnd);
1498         COPY_STAT_T(dupacks);
1499         COPY_STAT_T(segqlen);
1500         COPY_STAT(snd_numholes);
1501         COPY_STAT(snd_scale);
1502         COPY_STAT(rcv_scale);
1503 #undef COPY_STAT
1504 #undef COPY_STAT_T
1505         log_buf->tlb_flex1 = 0;
1506         log_buf->tlb_flex2 = 0;
1507         /* Copy stack-specific info. */
1508         if (stackinfo != NULL) {
1509                 memcpy(&log_buf->tlb_stackinfo, stackinfo,
1510                     sizeof(log_buf->tlb_stackinfo));
1511                 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO;
1512         }
1513
1514         /* The packet */
1515         log_buf->tlb_len = len;
1516         if (th) {
1517                 int optlen;
1518
1519                 log_buf->tlb_eventflags |= TLB_FLAG_HDR;
1520                 log_buf->tlb_th = *th;
1521                 if (th_hostorder)
1522                         tcp_fields_to_net(&log_buf->tlb_th);
1523                 optlen = (th->th_off << 2) - sizeof (struct tcphdr);
1524                 if (optlen > 0)
1525                         memcpy(log_buf->tlb_opts, th + 1, optlen);
1526         }
1527
1528         /* Verbose information */
1529         if (func != NULL) {
1530                 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE;
1531                 if (output_caller != NULL)
1532                         strlcpy(log_verbose->tlv_snd_frm, output_caller,
1533                             TCP_FUNC_LEN);
1534                 else
1535                         *log_verbose->tlv_snd_frm = 0;
1536                 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN);
1537                 log_verbose->tlv_trace_line = line;
1538         }
1539
1540         /* Insert the new log at the tail. */
1541         STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue);
1542         tcp_log_entry_refcnt_add(log_entry);
1543         return (log_buf);
1544 }
1545
1546 /*
1547  * Change the logging state for a TCPCB. Returns 0 on success or an
1548  * error code on failure.
1549  */
1550 int
1551 tcp_log_state_change(struct tcpcb *tp, int state)
1552 {
1553         struct tcp_log_mem *log_entry;
1554
1555         INP_WLOCK_ASSERT(tp->t_inpcb);
1556         switch(state) {
1557         case TCP_LOG_STATE_CLEAR:
1558                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1559                         tcp_log_remove_log_head(tp, log_entry);
1560                 /* Fall through */
1561
1562         case TCP_LOG_STATE_OFF:
1563                 tp->t_logstate = TCP_LOG_STATE_OFF;
1564                 break;
1565
1566         case TCP_LOG_STATE_TAIL:
1567         case TCP_LOG_STATE_HEAD:
1568         case TCP_LOG_STATE_CONTINUAL:
1569         case TCP_LOG_STATE_HEAD_AUTO:
1570         case TCP_LOG_STATE_TAIL_AUTO:
1571                 tp->t_logstate = state;
1572                 break;
1573
1574         default:
1575                 return (EINVAL);
1576         }
1577
1578         tp->t_flags2 &= ~(TF2_LOG_AUTO);
1579
1580         return (0);
1581 }
1582
1583 /* If tcp_drain() is called, flush half the log entries. */
1584 void
1585 tcp_log_drain(struct tcpcb *tp)
1586 {
1587         struct tcp_log_mem *log_entry, *next;
1588         int target, skip;
1589
1590         INP_WLOCK_ASSERT(tp->t_inpcb);
1591         if ((target = tp->t_lognum / 2) == 0)
1592                 return;
1593
1594         /*
1595          * If we are logging the "head" packets, we want to discard
1596          * from the tail of the queue. Otherwise, we want to discard
1597          * from the head.
1598          */
1599         if (tp->t_logstate == TCP_LOG_STATE_HEAD ||
1600             tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) {
1601                 skip = tp->t_lognum - target;
1602                 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue)
1603                         if (!--skip)
1604                                 break;
1605                 KASSERT(log_entry != NULL,
1606                     ("%s: skipped through all entries!", __func__));
1607                 if (log_entry == NULL)
1608                         return;
1609                 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) {
1610                         STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue);
1611                         tcp_log_entry_refcnt_rem(next);
1612                         tcp_log_remove_log_cleanup(tp, next);
1613 #ifdef INVARIANTS
1614                         target--;
1615 #endif
1616                 }
1617                 KASSERT(target == 0,
1618                     ("%s: After removing from tail, target was %d", __func__,
1619                         target));
1620         } else if (tp->t_logstate == TCP_LOG_STATE_CONTINUAL) {
1621                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1622                     M_NOWAIT, false);
1623         } else {
1624                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL &&
1625                     target--)
1626                         tcp_log_remove_log_head(tp, log_entry);
1627                 KASSERT(target <= 0,
1628                     ("%s: After removing from head, target was %d", __func__,
1629                         target));
1630                 KASSERT(tp->t_lognum > 0,
1631                     ("%s: After removing from head, tp->t_lognum was %d",
1632                         __func__, target));
1633                 KASSERT(log_entry != NULL,
1634                     ("%s: After removing from head, the tailq was empty",
1635                         __func__));
1636         }
1637 }
1638
1639 static inline int
1640 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len)
1641 {
1642
1643         if (sopt->sopt_td != NULL)
1644                 return (copyout(src, dst, len));
1645         bcopy(src, dst, len);
1646         return (0);
1647 }
1648
1649 static int
1650 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp,
1651     struct tcp_log_buffer **end, int count)
1652 {
1653         struct tcp_log_buffer *out_entry;
1654         struct tcp_log_mem *log_entry;
1655         size_t entrysize;
1656         int error;
1657 #ifdef INVARIANTS
1658         int orig_count = count;
1659 #endif
1660
1661         /* Copy the data out. */
1662         error = 0;
1663         out_entry = (struct tcp_log_buffer *) sopt->sopt_val;
1664         STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) {
1665                 count--;
1666                 KASSERT(count >= 0,
1667                     ("%s:%d: Exceeded expected count (%d) processing list %p",
1668                     __func__, __LINE__, orig_count, log_tailqp));
1669
1670 #ifdef TCPLOG_DEBUG_COUNTERS
1671                 counter_u64_add(tcp_log_que_copyout, 1);
1672 #endif
1673
1674                 /*
1675                  * Skip copying out the header if it isn't present.
1676                  * Instead, copy out zeros (to ensure we don't leak info).
1677                  * TODO: Make sure we truly do zero everything we don't
1678                  * explicitly set.
1679                  */
1680                 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)
1681                         entrysize = sizeof(struct tcp_log_buffer);
1682                 else
1683                         entrysize = offsetof(struct tcp_log_buffer, tlb_th);
1684                 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry,
1685                     entrysize);
1686                 if (error)
1687                         break;
1688                 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) {
1689                         error = tcp_log_copyout(sopt, zerobuf,
1690                             ((uint8_t *)out_entry) + entrysize,
1691                             sizeof(struct tcp_log_buffer) - entrysize);
1692                 }
1693
1694                 /*
1695                  * Copy out the verbose bit, if needed. Either way,
1696                  * increment the output pointer the correct amount.
1697                  */
1698                 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) {
1699                         error = tcp_log_copyout(sopt, &log_entry->tlm_v,
1700                             out_entry->tlb_verbose,
1701                             sizeof(struct tcp_log_verbose));
1702                         if (error)
1703                                 break;
1704                         out_entry = (struct tcp_log_buffer *)
1705                             (((uint8_t *) (out_entry + 1)) +
1706                             sizeof(struct tcp_log_verbose));
1707                 } else
1708                         out_entry++;
1709         }
1710         *end = out_entry;
1711         KASSERT(error || count == 0,
1712             ("%s:%d: Less than expected count (%d) processing list %p"
1713             " (%d remain)", __func__, __LINE__, orig_count,
1714             log_tailqp, count));
1715
1716         return (error);
1717 }
1718
1719 /*
1720  * Copy out the buffer. Note that we do incremental copying, so
1721  * sooptcopyout() won't work. However, the goal is to produce the same
1722  * end result as if we copied in the entire user buffer, updated it,
1723  * and then used sooptcopyout() to copy it out.
1724  *
1725  * NOTE: This should be called with a write lock on the PCB; however,
1726  * the function will drop it after it extracts the data from the TCPCB.
1727  */
1728 int
1729 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
1730 {
1731         struct tcp_log_stailq log_tailq;
1732         struct tcp_log_mem *log_entry, *log_next;
1733         struct tcp_log_buffer *out_entry;
1734         struct inpcb *inp;
1735         size_t outsize, entrysize;
1736         int error, outnum;
1737
1738         INP_WLOCK_ASSERT(tp->t_inpcb);
1739         inp = tp->t_inpcb;
1740
1741         /*
1742          * Determine which log entries will fit in the buffer. As an
1743          * optimization, skip this if all the entries will clearly fit
1744          * in the buffer. (However, get an exact size if we are using
1745          * INVARIANTS.)
1746          */
1747 #ifndef INVARIANTS
1748         if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) +
1749             sizeof(struct tcp_log_verbose)) >= tp->t_lognum) {
1750                 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue);
1751                 log_next = NULL;
1752                 outsize = 0;
1753                 outnum = tp->t_lognum;
1754         } else {
1755 #endif
1756                 outsize = outnum = 0;
1757                 log_entry = NULL;
1758                 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) {
1759                         entrysize = sizeof(struct tcp_log_buffer);
1760                         if (log_next->tlm_buf.tlb_eventflags &
1761                             TLB_FLAG_VERBOSE)
1762                                 entrysize += sizeof(struct tcp_log_verbose);
1763                         if ((sopt->sopt_valsize - outsize) < entrysize)
1764                                 break;
1765                         outsize += entrysize;
1766                         outnum++;
1767                         log_entry = log_next;
1768                 }
1769                 KASSERT(outsize <= sopt->sopt_valsize,
1770                     ("%s: calculated output size (%zu) greater than available"
1771                         "space (%zu)", __func__, outsize, sopt->sopt_valsize));
1772 #ifndef INVARIANTS
1773         }
1774 #endif
1775
1776         /*
1777          * Copy traditional sooptcopyout() behavior: if sopt->sopt_val
1778          * is NULL, silently skip the copy. However, in this case, we
1779          * will leave the list alone and return. Functionally, this
1780          * gives userspace a way to poll for an approximate buffer
1781          * size they will need to get the log entries.
1782          */
1783         if (sopt->sopt_val == NULL) {
1784                 INP_WUNLOCK(inp);
1785                 if (outsize == 0) {
1786                         outsize = outnum * (sizeof(struct tcp_log_buffer) +
1787                             sizeof(struct tcp_log_verbose));
1788                 }
1789                 if (sopt->sopt_valsize > outsize)
1790                         sopt->sopt_valsize = outsize;
1791                 return (0);
1792         }
1793
1794         /*
1795          * Break apart the list. We'll save the ones we want to copy
1796          * out locally and remove them from the TCPCB list. We can
1797          * then drop the INPCB lock while we do the copyout.
1798          *
1799          * There are roughly three cases:
1800          * 1. There was nothing to copy out. That's easy: drop the
1801          * lock and return.
1802          * 2. We are copying out the entire list. Again, that's easy:
1803          * move the whole list.
1804          * 3. We are copying out a partial list. That's harder. We
1805          * need to update the list book-keeping entries.
1806          */
1807         if (log_entry != NULL && log_next == NULL) {
1808                 /* Move entire list. */
1809                 KASSERT(outnum == tp->t_lognum,
1810                     ("%s:%d: outnum (%d) should match tp->t_lognum (%d)",
1811                         __func__, __LINE__, outnum, tp->t_lognum));
1812                 log_tailq = tp->t_logs;
1813                 tp->t_lognum = 0;
1814                 STAILQ_INIT(&tp->t_logs);
1815         } else if (log_entry != NULL) {
1816                 /* Move partial list. */
1817                 KASSERT(outnum < tp->t_lognum,
1818                     ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)",
1819                         __func__, __LINE__, outnum, tp->t_lognum));
1820                 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs);
1821                 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue);
1822                 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL,
1823                     ("%s:%d: tp->t_logs is unexpectedly shorter than expected"
1824                     "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)",
1825                     __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum));
1826                 STAILQ_NEXT(log_entry, tlm_queue) = NULL;
1827                 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue);
1828                 tp->t_lognum -= outnum;
1829         } else
1830                 STAILQ_INIT(&log_tailq);
1831
1832         /* Drop the PCB lock. */
1833         INP_WUNLOCK(inp);
1834
1835         /* Copy the data out. */
1836         error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum);
1837
1838         if (error) {
1839                 /* Restore list */
1840                 INP_WLOCK(inp);
1841                 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0) {
1842                         tp = intotcpcb(inp);
1843
1844                         /* Merge the two lists. */
1845                         STAILQ_CONCAT(&log_tailq, &tp->t_logs);
1846                         tp->t_logs = log_tailq;
1847                         tp->t_lognum += outnum;
1848                 }
1849                 INP_WUNLOCK(inp);
1850         } else {
1851                 /* Sanity check entries */
1852                 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val)  ==
1853                     outsize, ("%s: Actual output size (%zu) != "
1854                         "calculated output size (%zu)", __func__,
1855                         (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val),
1856                         outsize));
1857
1858                 /* Free the entries we just copied out. */
1859                 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) {
1860                         tcp_log_entry_refcnt_rem(log_entry);
1861                         uma_zfree(tcp_log_zone, log_entry);
1862                 }
1863         }
1864
1865         sopt->sopt_valsize = (size_t)((caddr_t)out_entry -
1866             (caddr_t)sopt->sopt_val);
1867         return (error);
1868 }
1869
1870 static void
1871 tcp_log_free_queue(struct tcp_log_dev_queue *param)
1872 {
1873         struct tcp_log_dev_log_queue *entry;
1874
1875         KASSERT(param != NULL, ("%s: called with NULL param", __func__));
1876         if (param == NULL)
1877                 return;
1878
1879         entry = (struct tcp_log_dev_log_queue *)param;
1880
1881         /* Free the entries. */
1882         tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
1883
1884         /* Free the buffer, if it is allocated. */
1885         if (entry->tldl_common.tldq_buf != NULL)
1886                 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV);
1887
1888         /* Free the queue entry. */
1889         free(entry, M_TCPLOGDEV);
1890 }
1891
1892 static struct tcp_log_common_header *
1893 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
1894 {
1895         struct tcp_log_dev_log_queue *entry;
1896         struct tcp_log_header *hdr;
1897         uint8_t *end;
1898         struct sockopt sopt;
1899         int error;
1900
1901         entry = (struct tcp_log_dev_log_queue *)param;
1902
1903         /* Take a worst-case guess at space needs. */
1904         sopt.sopt_valsize = sizeof(struct tcp_log_header) +
1905             entry->tldl_count * (sizeof(struct tcp_log_buffer) +
1906             sizeof(struct tcp_log_verbose));
1907         hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT);
1908         if (hdr == NULL) {
1909 #ifdef TCPLOG_DEBUG_COUNTERS
1910                 counter_u64_add(tcp_log_que_fail5, entry->tldl_count);
1911 #endif
1912                 return (NULL);
1913         }
1914         sopt.sopt_val = hdr + 1;
1915         sopt.sopt_valsize -= sizeof(struct tcp_log_header);
1916         sopt.sopt_td = NULL;
1917         
1918         error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
1919             (struct tcp_log_buffer **)&end, entry->tldl_count);
1920         if (error) {
1921                 free(hdr, M_TCPLOGDEV);
1922                 return (NULL);
1923         }
1924
1925         /* Free the entries. */
1926         tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
1927         entry->tldl_count = 0;
1928
1929         memset(hdr, 0, sizeof(struct tcp_log_header));
1930         hdr->tlh_version = TCP_LOG_BUF_VER;
1931         hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR;
1932         hdr->tlh_length = end - (uint8_t *)hdr;
1933         hdr->tlh_ie = entry->tldl_ie;
1934         hdr->tlh_af = entry->tldl_af;
1935         getboottime(&hdr->tlh_offset);
1936         strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN);
1937         strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN);
1938         return ((struct tcp_log_common_header *)hdr);
1939 }
1940
1941 /*
1942  * Queue the tcpcb's log buffer for transmission via the log buffer facility.
1943  *
1944  * NOTE: This should be called with a write lock on the PCB.
1945  *
1946  * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
1947  * and reacquire the INP lock if it needs to do so.
1948  *
1949  * If force is false, this will only dump auto-logged sessions if
1950  * tcp_log_auto_all is true or if there is a log ID defined for the session.
1951  */
1952 int
1953 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
1954 {
1955         struct tcp_log_dev_log_queue *entry;
1956         struct inpcb *inp;
1957 #ifdef TCPLOG_DEBUG_COUNTERS
1958         int num_entries;
1959 #endif
1960
1961         inp = tp->t_inpcb;
1962         INP_WLOCK_ASSERT(inp);
1963
1964         /* If there are no log entries, there is nothing to do. */
1965         if (tp->t_lognum == 0)
1966                 return (0);
1967
1968         /* Check for a log ID. */
1969         if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1970             !tcp_log_auto_all && !force) {
1971                 struct tcp_log_mem *log_entry;
1972
1973                 /*
1974                  * We needed a log ID and none was found. Free the log entries
1975                  * and return success. Also, cancel further logging. If the
1976                  * session doesn't have a log ID by now, we'll assume it isn't
1977                  * going to get one.
1978                  */
1979                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1980                         tcp_log_remove_log_head(tp, log_entry);
1981                 KASSERT(tp->t_lognum == 0,
1982                     ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1983                         __func__, tp->t_lognum));
1984                 tp->t_logstate = TCP_LOG_STATE_OFF;
1985                 return (0);
1986         }
1987
1988         /*
1989          * Allocate memory. If we must wait, we'll need to drop the locks
1990          * and reacquire them (and do all the related business that goes
1991          * along with that).
1992          */
1993         entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
1994             M_NOWAIT);
1995         if (entry == NULL && (how & M_NOWAIT)) {
1996 #ifdef TCPLOG_DEBUG_COUNTERS
1997                 counter_u64_add(tcp_log_que_fail3, 1);
1998 #endif
1999                 return (ENOBUFS);
2000         }
2001         if (entry == NULL) {
2002                 INP_WUNLOCK(inp);
2003                 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2004                     M_TCPLOGDEV, M_WAITOK);
2005                 INP_WLOCK(inp);
2006                 /*
2007                  * Note that this check is slightly overly-restrictive in
2008                  * that the TCB can survive either of these events.
2009                  * However, there is currently not a good way to ensure
2010                  * that is the case. So, if we hit this M_WAIT path, we
2011                  * may end up dropping some entries. That seems like a
2012                  * small price to pay for safety.
2013                  */
2014                 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
2015                         free(entry, M_TCPLOGDEV);
2016 #ifdef TCPLOG_DEBUG_COUNTERS
2017                         counter_u64_add(tcp_log_que_fail2, 1);
2018 #endif
2019                         return (ECONNRESET);
2020                 }
2021                 tp = intotcpcb(inp);
2022                 if (tp->t_lognum == 0) {
2023                         free(entry, M_TCPLOGDEV);
2024                         return (0);
2025                 }
2026         }
2027
2028         /* Fill in the unique parts of the queue entry. */
2029         if (tp->t_lib != NULL)
2030                 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
2031         else
2032                 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN);
2033         if (reason != NULL)
2034                 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2035         else
2036                 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2037         entry->tldl_ie = inp->inp_inc.inc_ie;
2038         if (inp->inp_inc.inc_flags & INC_ISIPV6)
2039                 entry->tldl_af = AF_INET6;
2040         else
2041                 entry->tldl_af = AF_INET;
2042         entry->tldl_entries = tp->t_logs;
2043         entry->tldl_count = tp->t_lognum;
2044
2045         /* Fill in the common parts of the queue entry. */
2046         entry->tldl_common.tldq_buf = NULL;
2047         entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2048         entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2049
2050         /* Clear the log data from the TCPCB. */
2051 #ifdef TCPLOG_DEBUG_COUNTERS
2052         num_entries = tp->t_lognum;
2053 #endif
2054         tp->t_lognum = 0;
2055         STAILQ_INIT(&tp->t_logs);
2056
2057         /* Add the entry. If no one is listening, free the entry. */
2058         if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) {
2059                 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2060 #ifdef TCPLOG_DEBUG_COUNTERS
2061                 counter_u64_add(tcp_log_que_fail1, num_entries);
2062         } else {
2063                 counter_u64_add(tcp_log_queued, num_entries);
2064 #endif
2065         }
2066         return (0);
2067 }
2068
2069 /*
2070  * Queue the log_id_node's log buffers for transmission via the log buffer
2071  * facility.
2072  *
2073  * NOTE: This should be called with the bucket locked and referenced.
2074  *
2075  * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2076  * and reacquire the bucket lock if it needs to do so. (The caller must
2077  * ensure that the tln is no longer on any lists so no one else will mess
2078  * with this while the lock is dropped!)
2079  */
2080 static int
2081 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how)
2082 {
2083         struct tcp_log_dev_log_queue *entry;
2084         struct tcp_log_id_bucket *tlb;
2085
2086         tlb = tln->tln_bucket;
2087         TCPID_BUCKET_LOCK_ASSERT(tlb);
2088         KASSERT(tlb->tlb_refcnt > 0,
2089             ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)",
2090             __func__, __LINE__, tln, tlb));
2091         KASSERT(tln->tln_closed,
2092             ("%s:%d: Called for node with tln_closed==false (tln=%p)",
2093             __func__, __LINE__, tln));
2094
2095         /* If there are no log entries, there is nothing to do. */
2096         if (tln->tln_count == 0)
2097                 return (0);
2098
2099         /*
2100          * Allocate memory. If we must wait, we'll need to drop the locks
2101          * and reacquire them (and do all the related business that goes
2102          * along with that).
2103          */
2104         entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2105             M_NOWAIT);
2106         if (entry == NULL && (how & M_NOWAIT))
2107                 return (ENOBUFS);
2108         if (entry == NULL) {
2109                 TCPID_BUCKET_UNLOCK(tlb);
2110                 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2111                     M_TCPLOGDEV, M_WAITOK);
2112                 TCPID_BUCKET_LOCK(tlb);
2113         }
2114
2115         /* Fill in the common parts of the queue entry.. */
2116         entry->tldl_common.tldq_buf = NULL;
2117         entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2118         entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2119
2120         /* Fill in the unique parts of the queue entry. */
2121         strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN);
2122         if (reason != NULL)
2123                 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2124         else
2125                 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2126         entry->tldl_ie = tln->tln_ie;
2127         entry->tldl_entries = tln->tln_entries;
2128         entry->tldl_count = tln->tln_count;
2129         entry->tldl_af = tln->tln_af;
2130
2131         /* Add the entry. If no one is listening, free the entry. */
2132         if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry))
2133                 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2134
2135         return (0);
2136 }
2137
2138
2139 /*
2140  * Queue the log buffers for all sessions in a bucket for transmissions via
2141  * the log buffer facility.
2142  *
2143  * NOTE: This should be called with a locked bucket; however, the function
2144  * will drop the lock.
2145  */
2146 #define LOCAL_SAVE      10
2147 static void
2148 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
2149 {
2150         struct tcp_log_id_node local_entries[LOCAL_SAVE];
2151         struct inpcb *inp;
2152         struct tcpcb *tp;
2153         struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln;
2154         int i, num_local_entries, tree_locked;
2155         bool expireq_locked;
2156
2157         TCPID_BUCKET_LOCK_ASSERT(tlb);
2158
2159         /*
2160          * Take a reference on the bucket to keep it from disappearing until
2161          * we are done.
2162          */
2163         TCPID_BUCKET_REF(tlb);
2164
2165         /*
2166          * We'll try to create these without dropping locks. However, we
2167          * might very well need to drop locks to get memory. If that's the
2168          * case, we'll save up to 10 on the stack, and sacrifice the rest.
2169          * (Otherwise, we need to worry about finding our place again in a
2170          * potentially changed list. It just doesn't seem worth the trouble
2171          * to do that.
2172          */
2173         expireq_locked = false;
2174         num_local_entries = 0;
2175         prev_tln = NULL;
2176         tree_locked = TREE_UNLOCKED;
2177         SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) {
2178                 /*
2179                  * If this isn't associated with a TCPCB, we can pull it off
2180                  * the list now. We need to be careful that the expire timer
2181                  * hasn't already taken ownership (tln_expiretime == SBT_MAX).
2182                  * If so, we let the expire timer code free the data. 
2183                  */
2184                 if (cur_tln->tln_closed) {
2185 no_inp:
2186                         /*
2187                          * Get the expireq lock so we can get a consistent
2188                          * read of tln_expiretime and so we can remove this
2189                          * from the expireq.
2190                          */
2191                         if (!expireq_locked) {
2192                                 TCPLOG_EXPIREQ_LOCK();
2193                                 expireq_locked = true;
2194                         }
2195
2196                         /*
2197                          * We ignore entries with tln_expiretime == SBT_MAX.
2198                          * The expire timer code already owns those.
2199                          */
2200                         KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0,
2201                             ("%s:%d: node on the expire queue without positive "
2202                             "expire time", __func__, __LINE__));
2203                         if (cur_tln->tln_expiretime == SBT_MAX) {
2204                                 prev_tln = cur_tln;
2205                                 continue;
2206                         }
2207
2208                         /* Remove the entry from the expireq. */
2209                         STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln,
2210                             tcp_log_id_node, tln_expireq);
2211
2212                         /* Remove the entry from the bucket. */
2213                         if (prev_tln != NULL)
2214                                 SLIST_REMOVE_AFTER(prev_tln, tln_list);
2215                         else
2216                                 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list);
2217
2218                         /*
2219                          * Drop the INP and bucket reference counts. Due to
2220                          * lock-ordering rules, we need to drop the expire
2221                          * queue lock.
2222                          */
2223                         TCPLOG_EXPIREQ_UNLOCK();
2224                         expireq_locked = false;
2225
2226                         /* Drop the INP reference. */
2227                         INP_WLOCK(cur_tln->tln_inp);
2228                         if (!in_pcbrele_wlocked(cur_tln->tln_inp))
2229                                 INP_WUNLOCK(cur_tln->tln_inp);
2230
2231                         if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2232 #ifdef INVARIANTS
2233                                 panic("%s: Bucket refcount unexpectedly 0.",
2234                                     __func__);
2235 #endif
2236                                 /*
2237                                  * Recover as best we can: free the entry we
2238                                  * own.
2239                                  */
2240                                 tcp_log_free_entries(&cur_tln->tln_entries,
2241                                     &cur_tln->tln_count);
2242                                 uma_zfree(tcp_log_node_zone, cur_tln);
2243                                 goto done;
2244                         }
2245
2246                         if (tcp_log_dump_node_logbuf(cur_tln, reason,
2247                             M_NOWAIT)) {
2248                                 /*
2249                                  * If we have sapce, save the entries locally.
2250                                  * Otherwise, free them.
2251                                  */
2252                                 if (num_local_entries < LOCAL_SAVE) {
2253                                         local_entries[num_local_entries] =
2254                                             *cur_tln;
2255                                         num_local_entries++;
2256                                 } else {
2257                                         tcp_log_free_entries(
2258                                             &cur_tln->tln_entries,
2259                                             &cur_tln->tln_count);
2260                                 }
2261                         }
2262
2263                         /* No matter what, we are done with the node now. */
2264                         uma_zfree(tcp_log_node_zone, cur_tln);
2265
2266                         /*
2267                          * Because we removed this entry from the list, prev_tln
2268                          * (which tracks the previous entry still on the tlb
2269                          * list) remains unchanged.
2270                          */
2271                         continue;
2272                 }
2273
2274                 /*
2275                  * If we get to this point, the session data is still held in
2276                  * the TCPCB. So, we need to pull the data out of that.
2277                  *
2278                  * We will need to drop the expireq lock so we can lock the INP.
2279                  * We can then try to extract the data the "easy" way. If that
2280                  * fails, we'll save the log entries for later.
2281                  */
2282                 if (expireq_locked) {
2283                         TCPLOG_EXPIREQ_UNLOCK();
2284                         expireq_locked = false;
2285                 }
2286
2287                 /* Lock the INP and then re-check the state. */
2288                 inp = cur_tln->tln_inp;
2289                 INP_WLOCK(inp);
2290                 /*
2291                  * If we caught this while it was transitioning, the data
2292                  * might have moved from the TCPCB to the tln (signified by
2293                  * setting tln_closed to true. If so, treat this like an
2294                  * inactive connection.
2295                  */
2296                 if (cur_tln->tln_closed) {
2297                         /*
2298                          * It looks like we may have caught this connection
2299                          * while it was transitioning from active to inactive.
2300                          * Treat this like an inactive connection.
2301                          */
2302                         INP_WUNLOCK(inp);
2303                         goto no_inp;
2304                 }
2305
2306                 /*
2307                  * Try to dump the data from the tp without dropping the lock.
2308                  * If this fails, try to save off the data locally.
2309                  */
2310                 tp = cur_tln->tln_tp;
2311                 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) &&
2312                     num_local_entries < LOCAL_SAVE) {
2313                         tcp_log_move_tp_to_node(tp,
2314                             &local_entries[num_local_entries]);
2315                         local_entries[num_local_entries].tln_closed = 1;
2316                         KASSERT(local_entries[num_local_entries].tln_bucket ==
2317                             tlb, ("%s: %d: bucket mismatch for node %p",
2318                             __func__, __LINE__, cur_tln));
2319                         num_local_entries++;
2320                 }
2321
2322                 INP_WUNLOCK(inp);
2323
2324                 /*
2325                  * We are goint to leave the current tln on the list. It will
2326                  * become the previous tln.
2327                  */
2328                 prev_tln = cur_tln;
2329         }
2330
2331         /* Drop our locks, if any. */
2332         KASSERT(tree_locked == TREE_UNLOCKED,
2333             ("%s: %d: tree unexpectedly locked", __func__, __LINE__));
2334         switch (tree_locked) {
2335         case TREE_WLOCKED:
2336                 TCPID_TREE_WUNLOCK();
2337                 tree_locked = TREE_UNLOCKED;
2338                 break;
2339         case TREE_RLOCKED:
2340                 TCPID_TREE_RUNLOCK();
2341                 tree_locked = TREE_UNLOCKED;
2342                 break;
2343         }
2344         if (expireq_locked) {
2345                 TCPLOG_EXPIREQ_UNLOCK();
2346                 expireq_locked = false;
2347         }
2348
2349         /*
2350          * Try again for any saved entries. tcp_log_dump_node_logbuf() is
2351          * guaranteed to free the log entries within the node. And, since
2352          * the node itself is on our stack, we don't need to free it.
2353          */
2354         for (i = 0; i < num_local_entries; i++)
2355                 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK);
2356
2357         /* Drop our reference. */
2358         if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
2359                 TCPID_BUCKET_UNLOCK(tlb);
2360
2361 done:
2362         /* Drop our locks, if any. */
2363         switch (tree_locked) {
2364         case TREE_WLOCKED:
2365                 TCPID_TREE_WUNLOCK();
2366                 break;
2367         case TREE_RLOCKED:
2368                 TCPID_TREE_RUNLOCK();
2369                 break;
2370         }
2371         if (expireq_locked)
2372                 TCPLOG_EXPIREQ_UNLOCK();
2373 }
2374 #undef  LOCAL_SAVE
2375
2376
2377 /*
2378  * Queue the log buffers for all sessions in a bucket for transmissions via
2379  * the log buffer facility.
2380  *
2381  * NOTE: This should be called with a locked INP; however, the function
2382  * will drop the lock.
2383  */
2384 void
2385 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
2386 {
2387         struct tcp_log_id_bucket *tlb;
2388         int tree_locked;
2389
2390         /* Figure out our bucket and lock it. */
2391         INP_WLOCK_ASSERT(tp->t_inpcb);
2392         tlb = tp->t_lib;
2393         if (tlb == NULL) {
2394                 /*
2395                  * No bucket; treat this like a request to dump a single
2396                  * session's traces.
2397                  */
2398                 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
2399                 INP_WUNLOCK(tp->t_inpcb);
2400                 return;
2401         }
2402         TCPID_BUCKET_REF(tlb);
2403         INP_WUNLOCK(tp->t_inpcb);
2404         TCPID_BUCKET_LOCK(tlb);
2405
2406         /* If we are the last reference, we have nothing more to do here. */
2407         tree_locked = TREE_UNLOCKED;
2408         if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2409                 switch (tree_locked) {
2410                 case TREE_WLOCKED:
2411                         TCPID_TREE_WUNLOCK();
2412                         break;
2413                 case TREE_RLOCKED:
2414                         TCPID_TREE_RUNLOCK();
2415                         break;
2416                 }
2417                 return;
2418         }
2419
2420         /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */ 
2421         tcp_log_dumpbucketlogs(tlb, reason);
2422 }
2423
2424 /*
2425  * Mark the end of a flow with the current stack. A stack can add
2426  * stack-specific info to this trace event by overriding this
2427  * function (see bbr_log_flowend() for example).
2428  */
2429 void
2430 tcp_log_flowend(struct tcpcb *tp)
2431 {
2432         if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2433                 struct socket *so = tp->t_inpcb->inp_socket;
2434                 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
2435                                 TCP_LOG_FLOWEND, 0, 0, NULL, false);
2436         }
2437 }
2438