]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_log_buf.c
MFV r358511,r358532:
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_log_buf.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016-2018 Netflix, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/arb.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/qmath.h>
39 #include <sys/queue.h>
40 #include <sys/refcount.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/tree.h>
46 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
47 #include <sys/counter.h>
48
49 #include <dev/tcp_log/tcp_log_dev.h>
50
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/vnet.h>
54
55 #include <netinet/in.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/tcp_log_buf.h>
60
61 /* Default expiry time */
62 #define TCP_LOG_EXPIRE_TIME     ((sbintime_t)60 * SBT_1S)
63
64 /* Max interval at which to run the expiry timer */
65 #define TCP_LOG_EXPIRE_INTVL    ((sbintime_t)5 * SBT_1S)
66
67 bool    tcp_log_verbose;
68 static uma_zone_t tcp_log_bucket_zone, tcp_log_node_zone, tcp_log_zone;
69 static int      tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT;
70 static uint32_t tcp_log_version = TCP_LOG_BUF_VER;
71 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket);
72 static struct tcp_log_id_tree tcp_log_id_head;
73 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head =
74     STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head);
75 static struct mtx tcp_log_expireq_mtx;
76 static struct callout tcp_log_expireq_callout;
77 static u_long tcp_log_auto_ratio = 0;
78 static volatile u_long tcp_log_auto_ratio_cur = 0;
79 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL;
80 static bool tcp_log_auto_all = false;
81 static uint32_t tcp_disable_all_bb_logs = 0;
82
83 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
84
85 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
86     "TCP Black Box controls");
87
88 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose,
89     0, "Force verbose logging for TCP traces");
90
91 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit,
92     CTLFLAG_RW, &tcp_log_session_limit, 0,
93     "Maximum number of events maintained for each TCP session");
94
95 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW,
96     &tcp_log_zone, "Maximum number of events maintained for all TCP sessions");
97
98 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD,
99     &tcp_log_zone, "Current number of events maintained for all TCP sessions");
100
101 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW,
102     &tcp_log_bucket_zone, "Maximum number of log IDs");
103
104 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD,
105     &tcp_log_bucket_zone, "Current number of log IDs");
106
107 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW,
108     &tcp_log_node_zone, "Maximum number of tcpcbs with log IDs");
109
110 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD,
111     &tcp_log_node_zone, "Current number of tcpcbs with log IDs");
112
113 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version,
114     0, "Version of log formats exported");
115
116 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW,
117     &tcp_disable_all_bb_logs, TCP_LOG_STATE_HEAD_AUTO,
118     "Disable all BB logging for all connections");
119
120 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW,
121     &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions");
122
123 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW,
124     &tcp_log_auto_mode, TCP_LOG_STATE_HEAD_AUTO,
125     "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_HEAD_AUTO)");
126
127 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW,
128     &tcp_log_auto_all, false,
129     "Auto-select from all sessions (rather than just those with IDs)");
130
131 #ifdef TCPLOG_DEBUG_COUNTERS
132 counter_u64_t tcp_log_queued;
133 counter_u64_t tcp_log_que_fail1;
134 counter_u64_t tcp_log_que_fail2;
135 counter_u64_t tcp_log_que_fail3;
136 counter_u64_t tcp_log_que_fail4;
137 counter_u64_t tcp_log_que_fail5;
138 counter_u64_t tcp_log_que_copyout;
139 counter_u64_t tcp_log_que_read;
140 counter_u64_t tcp_log_que_freed;
141
142 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD,
143     &tcp_log_queued, "Number of entries queued");
144 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD,
145     &tcp_log_que_fail1, "Number of entries queued but fail 1");
146 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD,
147     &tcp_log_que_fail2, "Number of entries queued but fail 2");
148 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD,
149     &tcp_log_que_fail3, "Number of entries queued but fail 3");
150 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD,
151     &tcp_log_que_fail4, "Number of entries queued but fail 4");
152 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD,
153     &tcp_log_que_fail5, "Number of entries queued but fail 4");
154 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD,
155     &tcp_log_que_copyout, "Number of entries copied out");
156 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD,
157     &tcp_log_que_read, "Number of entries read from the queue");
158 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD,
159     &tcp_log_que_freed, "Number of entries freed after reading");
160 #endif
161
162 #ifdef INVARIANTS
163 #define TCPLOG_DEBUG_RINGBUF
164 #endif
165 /* Number of requests to consider a PBCID "active". */
166 #define ACTIVE_REQUEST_COUNT    10
167
168 /* Statistic tracking for "active" PBCIDs. */
169 static counter_u64_t tcp_log_pcb_ids_cur;
170 static counter_u64_t tcp_log_pcb_ids_tot;
171
172 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD,
173     &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system");
174 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD,
175     &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated");
176
177 struct tcp_log_mem
178 {
179         STAILQ_ENTRY(tcp_log_mem) tlm_queue;
180         struct tcp_log_buffer   tlm_buf;
181         struct tcp_log_verbose  tlm_v;
182 #ifdef TCPLOG_DEBUG_RINGBUF
183         volatile int            tlm_refcnt;
184 #endif
185 };
186
187 /* 60 bytes for the header, + 16 bytes for padding */
188 static uint8_t  zerobuf[76];
189
190 /*
191  * Lock order:
192  * 1. TCPID_TREE
193  * 2. TCPID_BUCKET
194  * 3. INP
195  *
196  * Rules:
197  * A. You need a lock on the Tree to add/remove buckets.
198  * B. You need a lock on the bucket to add/remove nodes from the bucket.
199  * C. To change information in a node, you need the INP lock if the tln_closed
200  *    field is false. Otherwise, you need the bucket lock. (Note that the
201  *    tln_closed field can change at any point, so you need to recheck the
202  *    entry after acquiring the INP lock.)
203  * D. To remove a node from the bucket, you must have that entry locked,
204  *    according to the criteria of Rule C. Also, the node must not be on
205  *    the expiry queue.
206  * E. The exception to C is the expiry queue fields, which are locked by
207  *    the TCPLOG_EXPIREQ lock.
208  *
209  * Buckets have a reference count. Each node is a reference. Further,
210  * other callers may add reference counts to keep a bucket from disappearing.
211  * You can add a reference as long as you own a lock sufficient to keep the
212  * bucket from disappearing. For example, a common use is:
213  *   a. Have a locked INP, but need to lock the TCPID_BUCKET.
214  *   b. Add a refcount on the bucket. (Safe because the INP lock prevents
215  *      the TCPID_BUCKET from going away.)
216  *   c. Drop the INP lock.
217  *   d. Acquire a lock on the TCPID_BUCKET.
218  *   e. Acquire a lock on the INP.
219  *   f. Drop the refcount on the bucket.
220  *      (At this point, the bucket may disappear.)
221  *
222  * Expire queue lock:
223  * You can acquire this with either the bucket or INP lock. Don't reverse it.
224  * When the expire code has committed to freeing a node, it resets the expiry
225  * time to SBT_MAX. That is the signal to everyone else that they should
226  * leave that node alone.
227  */
228 static struct rwlock tcp_id_tree_lock;
229 #define TCPID_TREE_WLOCK()              rw_wlock(&tcp_id_tree_lock)
230 #define TCPID_TREE_RLOCK()              rw_rlock(&tcp_id_tree_lock)
231 #define TCPID_TREE_UPGRADE()            rw_try_upgrade(&tcp_id_tree_lock)
232 #define TCPID_TREE_WUNLOCK()            rw_wunlock(&tcp_id_tree_lock)
233 #define TCPID_TREE_RUNLOCK()            rw_runlock(&tcp_id_tree_lock)
234 #define TCPID_TREE_WLOCK_ASSERT()       rw_assert(&tcp_id_tree_lock, RA_WLOCKED)
235 #define TCPID_TREE_RLOCK_ASSERT()       rw_assert(&tcp_id_tree_lock, RA_RLOCKED)
236 #define TCPID_TREE_UNLOCK_ASSERT()      rw_assert(&tcp_id_tree_lock, RA_UNLOCKED)
237
238 #define TCPID_BUCKET_LOCK_INIT(tlb)     mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF)
239 #define TCPID_BUCKET_LOCK_DESTROY(tlb)  mtx_destroy(&((tlb)->tlb_mtx))
240 #define TCPID_BUCKET_LOCK(tlb)          mtx_lock(&((tlb)->tlb_mtx))
241 #define TCPID_BUCKET_UNLOCK(tlb)        mtx_unlock(&((tlb)->tlb_mtx))
242 #define TCPID_BUCKET_LOCK_ASSERT(tlb)   mtx_assert(&((tlb)->tlb_mtx), MA_OWNED)
243 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED)
244
245 #define TCPID_BUCKET_REF(tlb)           refcount_acquire(&((tlb)->tlb_refcnt))
246 #define TCPID_BUCKET_UNREF(tlb)         refcount_release(&((tlb)->tlb_refcnt))
247
248 #define TCPLOG_EXPIREQ_LOCK()           mtx_lock(&tcp_log_expireq_mtx)
249 #define TCPLOG_EXPIREQ_UNLOCK()         mtx_unlock(&tcp_log_expireq_mtx)
250
251 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node);
252
253 struct tcp_log_id_bucket
254 {
255         /*
256          * tlb_id must be first. This lets us use strcmp on
257          * (struct tcp_log_id_bucket *) and (char *) interchangeably.
258          */
259         char                            tlb_id[TCP_LOG_ID_LEN];
260         char                            tlb_tag[TCP_LOG_TAG_LEN];
261         RB_ENTRY(tcp_log_id_bucket)     tlb_rb;
262         struct tcp_log_id_head          tlb_head;
263         struct mtx                      tlb_mtx;
264         volatile u_int                  tlb_refcnt;
265         volatile u_int                  tlb_reqcnt;
266         uint32_t                        tlb_loglimit;
267         uint8_t                         tlb_logstate;
268 };
269
270 struct tcp_log_id_node
271 {
272         SLIST_ENTRY(tcp_log_id_node) tln_list;
273         STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */
274         sbintime_t              tln_expiretime; /* Locked by the expireq lock */
275
276         /*
277          * If INP is NULL, that means the connection has closed. We've
278          * saved the connection endpoint information and the log entries
279          * in the tln_ie and tln_entries members. We've also saved a pointer
280          * to the enclosing bucket here. If INP is not NULL, the information is
281          * in the PCB and not here.
282          */
283         struct inpcb            *tln_inp;
284         struct tcpcb            *tln_tp;
285         struct tcp_log_id_bucket *tln_bucket;
286         struct in_endpoints     tln_ie;
287         struct tcp_log_stailq   tln_entries;
288         int                     tln_count;
289         volatile int            tln_closed;
290         uint8_t                 tln_af;
291 };
292
293 enum tree_lock_state {
294         TREE_UNLOCKED = 0,
295         TREE_RLOCKED,
296         TREE_WLOCKED,
297 };
298
299 /* Do we want to select this session for auto-logging? */
300 static __inline bool
301 tcp_log_selectauto(void)
302 {
303
304         /*
305          * If we are doing auto-capturing, figure out whether we will capture
306          * this session.
307          */
308         if (tcp_log_auto_ratio &&
309             (tcp_disable_all_bb_logs == 0) &&
310             (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
311             tcp_log_auto_ratio) == 0)
312                 return (true);
313         return (false);
314 }
315
316 static __inline int
317 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b)
318 {
319         KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL"));
320         KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL"));
321         return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN);
322 }
323
324 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
325
326 static __inline void
327 tcp_log_id_validate_tree_lock(int tree_locked)
328 {
329
330 #ifdef INVARIANTS
331         switch (tree_locked) {
332         case TREE_WLOCKED:
333                 TCPID_TREE_WLOCK_ASSERT();
334                 break;
335         case TREE_RLOCKED:
336                 TCPID_TREE_RLOCK_ASSERT();
337                 break;
338         case TREE_UNLOCKED:
339                 TCPID_TREE_UNLOCK_ASSERT();
340                 break;
341         default:
342                 kassert_panic("%s:%d: unknown tree lock state", __func__,
343                     __LINE__);
344         }
345 #endif
346 }
347
348 static __inline void
349 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb)
350 {
351
352         TCPID_TREE_WLOCK_ASSERT();
353         KASSERT(SLIST_EMPTY(&tlb->tlb_head),
354             ("%s: Attempt to remove non-empty bucket", __func__));
355         if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) {
356 #ifdef INVARIANTS
357                 kassert_panic("%s:%d: error removing element from tree",
358                             __func__, __LINE__);
359 #endif
360         }
361         TCPID_BUCKET_LOCK_DESTROY(tlb);
362         counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1);
363         uma_zfree(tcp_log_bucket_zone, tlb);
364 }
365
366 /*
367  * Call with a referenced and locked bucket.
368  * Will return true if the bucket was freed; otherwise, false.
369  * tlb: The bucket to unreference.
370  * tree_locked: A pointer to the state of the tree lock. If the tree lock
371  *    state changes, the function will update it.
372  * inp: If not NULL and the function needs to drop the inp lock to relock the
373  *    tree, it will do so. (The caller must ensure inp will not become invalid,
374  *    probably by holding a reference to it.)
375  */
376 static bool
377 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked,
378     struct inpcb *inp)
379 {
380
381         KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__));
382         KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
383             __func__));
384
385         tcp_log_id_validate_tree_lock(*tree_locked);
386
387         /*
388          * Did we hold the last reference on the tlb? If so, we may need
389          * to free it. (Note that we can realistically only execute the
390          * loop twice: once without a write lock and once with a write
391          * lock.)
392          */
393         while (TCPID_BUCKET_UNREF(tlb)) {
394                 /*
395                  * We need a write lock on the tree to free this.
396                  * If we can upgrade the tree lock, this is "easy". If we
397                  * can't upgrade the tree lock, we need to do this the
398                  * "hard" way: unwind all our locks and relock everything.
399                  * In the meantime, anything could have changed. We even
400                  * need to validate that we still need to free the bucket.
401                  */
402                 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE())
403                         *tree_locked = TREE_WLOCKED;
404                 else if (*tree_locked != TREE_WLOCKED) {
405                         TCPID_BUCKET_REF(tlb);
406                         if (inp != NULL)
407                                 INP_WUNLOCK(inp);
408                         TCPID_BUCKET_UNLOCK(tlb);
409                         if (*tree_locked == TREE_RLOCKED)
410                                 TCPID_TREE_RUNLOCK();
411                         TCPID_TREE_WLOCK();
412                         *tree_locked = TREE_WLOCKED;
413                         TCPID_BUCKET_LOCK(tlb);
414                         if (inp != NULL)
415                                 INP_WLOCK(inp);
416                         continue;
417                 }
418
419                 /*
420                  * We have an empty bucket and a write lock on the tree.
421                  * Remove the empty bucket.
422                  */
423                 tcp_log_remove_bucket(tlb);
424                 return (true);
425         }
426         return (false);
427 }
428
429 /*
430  * Call with a locked bucket. This function will release the lock on the
431  * bucket before returning.
432  *
433  * The caller is responsible for freeing the tp->t_lin/tln node!
434  *
435  * Note: one of tp or both tlb and tln must be supplied.
436  *
437  * inp: A pointer to the inp. If the function needs to drop the inp lock to
438  *    acquire the tree write lock, it will do so. (The caller must ensure inp
439  *    will not become invalid, probably by holding a reference to it.)
440  * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored)
441  * tlb: A pointer to the bucket. (optional; ignored if tp is specified)
442  * tln: A pointer to the node. (optional; ignored if tp is specified)
443  * tree_locked: A pointer to the state of the tree lock. If the tree lock
444  *    state changes, the function will update it.
445  *
446  * Will return true if the INP lock was reacquired; otherwise, false.
447  */
448 static bool
449 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp,
450     struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln,
451     int *tree_locked)
452 {
453         int orig_tree_locked;
454
455         KASSERT(tp != NULL || (tlb != NULL && tln != NULL),
456             ("%s: called with tp=%p, tlb=%p, tln=%p", __func__,
457             tp, tlb, tln));
458         KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
459             __func__));
460
461         if (tp != NULL) {
462                 tlb = tp->t_lib;
463                 tln = tp->t_lin;
464                 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__));
465                 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__));
466         }
467
468         tcp_log_id_validate_tree_lock(*tree_locked);
469         TCPID_BUCKET_LOCK_ASSERT(tlb);
470
471         /*
472          * Remove the node, clear the log bucket and node from the TCPCB, and
473          * decrement the bucket refcount. In the process, if this is the
474          * last reference, the bucket will be freed.
475          */
476         SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list);
477         if (tp != NULL) {
478                 tp->t_lib = NULL;
479                 tp->t_lin = NULL;
480         }
481         orig_tree_locked = *tree_locked;
482         if (!tcp_log_unref_bucket(tlb, tree_locked, inp))
483                 TCPID_BUCKET_UNLOCK(tlb);
484         return (*tree_locked != orig_tree_locked);
485 }
486
487 #define RECHECK_INP_CLEAN(cleanup)      do {                    \
488         if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {    \
489                 rv = ECONNRESET;                                \
490                 cleanup;                                        \
491                 goto done;                                      \
492         }                                                       \
493         tp = intotcpcb(inp);                                    \
494 } while (0)
495
496 #define RECHECK_INP()   RECHECK_INP_CLEAN(/* noop */)
497
498 static void
499 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
500 {
501
502         INP_WLOCK_ASSERT(tp->t_inpcb);
503
504 #ifdef STATS
505         if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
506                 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id));
507 #endif
508 }
509
510 static void
511 tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb)
512 {
513
514         atomic_fetchadd_int(&tlb->tlb_reqcnt, 1);
515 }
516
517 /*
518  * Associate the specified tag with a particular TCP log ID.
519  * Called with INPCB locked. Returns with it unlocked.
520  * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID.
521  */
522 int
523 tcp_log_set_tag(struct tcpcb *tp, char *tag)
524 {
525         struct tcp_log_id_bucket *tlb;
526         int tree_locked;
527
528         INP_WLOCK_ASSERT(tp->t_inpcb);
529
530         tree_locked = TREE_UNLOCKED;
531         tlb = tp->t_lib;
532         if (tlb == NULL) {
533                 INP_WUNLOCK(tp->t_inpcb);
534                 return (EOPNOTSUPP);
535         }
536
537         TCPID_BUCKET_REF(tlb);
538         INP_WUNLOCK(tp->t_inpcb);
539         TCPID_BUCKET_LOCK(tlb);
540         strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN);
541         if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
542                 TCPID_BUCKET_UNLOCK(tlb);
543
544         if (tree_locked == TREE_WLOCKED) {
545                 TCPID_TREE_WLOCK_ASSERT();
546                 TCPID_TREE_WUNLOCK();
547         } else if (tree_locked == TREE_RLOCKED) {
548                 TCPID_TREE_RLOCK_ASSERT();
549                 TCPID_TREE_RUNLOCK();
550         } else
551                 TCPID_TREE_UNLOCK_ASSERT();
552
553         return (0);
554 }
555
556 /*
557  * Set the TCP log ID for a TCPCB.
558  * Called with INPCB locked. Returns with it unlocked.
559  */
560 int
561 tcp_log_set_id(struct tcpcb *tp, char *id)
562 {
563         struct tcp_log_id_bucket *tlb, *tmp_tlb;
564         struct tcp_log_id_node *tln;
565         struct inpcb *inp;
566         int tree_locked, rv;
567         bool bucket_locked;
568
569         tlb = NULL;
570         tln = NULL;
571         inp = tp->t_inpcb;
572         tree_locked = TREE_UNLOCKED;
573         bucket_locked = false;
574
575 restart:
576         INP_WLOCK_ASSERT(inp);
577
578         /* See if the ID is unchanged. */
579         if ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) ||
580             (tp->t_lib == NULL && *id == 0)) {
581                 if (tp->t_lib != NULL) {
582                         tcp_log_increment_reqcnt(tp->t_lib);
583                         if ((tp->t_lib->tlb_logstate) &&
584                             (tp->t_log_state_set == 0)) {
585                                 /* Clone in any logging */
586
587                                 tp->t_logstate = tp->t_lib->tlb_logstate;
588                         }
589                         if ((tp->t_lib->tlb_loglimit) &&
590                             (tp->t_log_state_set == 0)) {
591                                 /* We also have a limit set */
592
593                                 tp->t_loglimit = tp->t_lib->tlb_loglimit;
594                         }
595                 }
596                 rv = 0;
597                 goto done;
598         }
599
600         /*
601          * If the TCPCB had a previous ID, we need to extricate it from
602          * the previous list.
603          *
604          * Drop the TCPCB lock and lock the tree and the bucket.
605          * Because this is called in the socket context, we (theoretically)
606          * don't need to worry about the INPCB completely going away
607          * while we are gone.
608          */
609         if (tp->t_lib != NULL) {
610                 tlb = tp->t_lib;
611                 TCPID_BUCKET_REF(tlb);
612                 INP_WUNLOCK(inp);
613
614                 if (tree_locked == TREE_UNLOCKED) {
615                         TCPID_TREE_RLOCK();
616                         tree_locked = TREE_RLOCKED;
617                 }
618                 TCPID_BUCKET_LOCK(tlb);
619                 bucket_locked = true;
620                 INP_WLOCK(inp);
621
622                 /*
623                  * Unreference the bucket. If our bucket went away, it is no
624                  * longer locked or valid.
625                  */
626                 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) {
627                         bucket_locked = false;
628                         tlb = NULL;
629                 }
630
631                 /* Validate the INP. */
632                 RECHECK_INP();
633
634                 /*
635                  * Evaluate whether the bucket changed while we were unlocked.
636                  *
637                  * Possible scenarios here:
638                  * 1. Bucket is unchanged and the same one we started with.
639                  * 2. The TCPCB no longer has a bucket and our bucket was
640                  *    freed.
641                  * 3. The TCPCB has a new bucket, whether ours was freed.
642                  * 4. The TCPCB no longer has a bucket and our bucket was
643                  *    not freed.
644                  *
645                  * In cases 2-4, we will start over. In case 1, we will
646                  * proceed here to remove the bucket.
647                  */
648                 if (tlb == NULL || tp->t_lib != tlb) {
649                         KASSERT(bucket_locked || tlb == NULL,
650                             ("%s: bucket_locked (%d) and tlb (%p) are "
651                             "inconsistent", __func__, bucket_locked, tlb));
652
653                         if (bucket_locked) {
654                                 TCPID_BUCKET_UNLOCK(tlb);
655                                 bucket_locked = false;
656                                 tlb = NULL;
657                         }
658                         goto restart;
659                 }
660
661                 /*
662                  * Store the (struct tcp_log_id_node) for reuse. Then, remove
663                  * it from the bucket. In the process, we may end up relocking.
664                  * If so, we need to validate that the INP is still valid, and
665                  * the TCPCB entries match we expect.
666                  *
667                  * We will clear tlb and change the bucket_locked state just
668                  * before calling tcp_log_remove_id_node(), since that function
669                  * will unlock the bucket.
670                  */
671                 if (tln != NULL)
672                         uma_zfree(tcp_log_node_zone, tln);
673                 tln = tp->t_lin;
674                 tlb = NULL;
675                 bucket_locked = false;
676                 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) {
677                         RECHECK_INP();
678
679                         /*
680                          * If the TCPCB moved to a new bucket while we had
681                          * dropped the lock, restart.
682                          */
683                         if (tp->t_lib != NULL || tp->t_lin != NULL)
684                                 goto restart;
685                 }
686
687                 /*
688                  * Yay! We successfully removed the TCPCB from its old
689                  * bucket. Phew!
690                  *
691                  * On to bigger and better things...
692                  */
693         }
694
695         /* At this point, the TCPCB should not be in any bucket. */
696         KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__));
697
698         /*
699          * If the new ID is not empty, we need to now assign this TCPCB to a
700          * new bucket.
701          */
702         if (*id) {
703                 /* Get a new tln, if we don't already have one to reuse. */
704                 if (tln == NULL) {
705                         tln = uma_zalloc(tcp_log_node_zone, M_NOWAIT | M_ZERO);
706                         if (tln == NULL) {
707                                 rv = ENOBUFS;
708                                 goto done;
709                         }
710                         tln->tln_inp = inp;
711                         tln->tln_tp = tp;
712                 }
713
714                 /*
715                  * Drop the INP lock for a bit. We don't need it, and dropping
716                  * it prevents lock order reversals.
717                  */
718                 INP_WUNLOCK(inp);
719
720                 /* Make sure we have at least a read lock on the tree. */
721                 tcp_log_id_validate_tree_lock(tree_locked);
722                 if (tree_locked == TREE_UNLOCKED) {
723                         TCPID_TREE_RLOCK();
724                         tree_locked = TREE_RLOCKED;
725                 }
726
727 refind:
728                 /*
729                  * Remember that we constructed (struct tcp_log_id_node) so
730                  * we can safely cast the id to it for the purposes of finding.
731                  */
732                 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
733                     __func__, __LINE__));
734                 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
735                     (struct tcp_log_id_bucket *) id);
736
737                 /*
738                  * If we didn't find a matching bucket, we need to add a new
739                  * one. This requires a write lock. But, of course, we will
740                  * need to recheck some things when we re-acquire the lock.
741                  */
742                 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) {
743                         tree_locked = TREE_WLOCKED;
744                         if (!TCPID_TREE_UPGRADE()) {
745                                 TCPID_TREE_RUNLOCK();
746                                 TCPID_TREE_WLOCK();
747
748                                 /*
749                                  * The tree may have changed while we were
750                                  * unlocked.
751                                  */
752                                 goto refind;
753                         }
754                 }
755
756                 /* If we need to add a new bucket, do it now. */
757                 if (tmp_tlb == NULL) {
758                         /* Allocate new bucket. */
759                         tlb = uma_zalloc(tcp_log_bucket_zone, M_NOWAIT);
760                         if (tlb == NULL) {
761                                 rv = ENOBUFS;
762                                 goto done_noinp;
763                         }
764                         counter_u64_add(tcp_log_pcb_ids_cur, 1);
765                         counter_u64_add(tcp_log_pcb_ids_tot, 1);
766
767                         if ((tcp_log_auto_all == false) &&
768                             tcp_log_auto_mode &&
769                             tcp_log_selectauto()) {
770                                 /* Save off the log state */
771                                 tlb->tlb_logstate = tcp_log_auto_mode;
772                         } else
773                                 tlb->tlb_logstate = TCP_LOG_STATE_OFF;
774                         tlb->tlb_loglimit = 0;
775                         tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */
776
777                         /*
778                          * Copy the ID to the bucket.
779                          * NB: Don't use strlcpy() unless you are sure
780                          * we've always validated NULL termination.
781                          *
782                          * TODO: When I'm done writing this, see if we
783                          * we have correctly validated NULL termination and
784                          * can use strlcpy(). :-)
785                          */
786                         strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1);
787                         tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0';
788
789                         /*
790                          * Take the refcount for the first node and go ahead
791                          * and lock this. Note that we zero the tlb_mtx
792                          * structure, since 0xdeadc0de flips the right bits
793                          * for the code to think that this mutex has already
794                          * been initialized. :-(
795                          */
796                         SLIST_INIT(&tlb->tlb_head);
797                         refcount_init(&tlb->tlb_refcnt, 1);
798                         tlb->tlb_reqcnt = 1;
799                         memset(&tlb->tlb_mtx, 0, sizeof(struct mtx));
800                         TCPID_BUCKET_LOCK_INIT(tlb);
801                         TCPID_BUCKET_LOCK(tlb);
802                         bucket_locked = true;
803
804 #define FREE_NEW_TLB()  do {                            \
805         TCPID_BUCKET_LOCK_DESTROY(tlb);                 \
806         uma_zfree(tcp_log_bucket_zone, tlb);            \
807         counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1);      \
808         counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1);      \
809         bucket_locked = false;                          \
810         tlb = NULL;                                     \
811 } while (0)
812                         /*
813                          * Relock the INP and make sure we are still
814                          * unassigned.
815                          */
816                         INP_WLOCK(inp);
817                         RECHECK_INP_CLEAN(FREE_NEW_TLB());
818                         if (tp->t_lib != NULL) {
819                                 FREE_NEW_TLB();
820                                 goto restart;
821                         }
822
823                         /* Add the new bucket to the tree. */
824                         tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head,
825                             tlb);
826                         KASSERT(tmp_tlb == NULL,
827                             ("%s: Unexpected conflicting bucket (%p) while "
828                             "adding new bucket (%p)", __func__, tmp_tlb, tlb));
829
830                         /*
831                          * If we found a conflicting bucket, free the new
832                          * one we made and fall through to use the existing
833                          * bucket.
834                          */
835                         if (tmp_tlb != NULL) {
836                                 FREE_NEW_TLB();
837                                 INP_WUNLOCK(inp);
838                         }
839 #undef  FREE_NEW_TLB
840                 }
841
842                 /* If we found an existing bucket, use it. */
843                 if (tmp_tlb != NULL) {
844                         tlb = tmp_tlb;
845                         TCPID_BUCKET_LOCK(tlb);
846                         bucket_locked = true;
847
848                         /*
849                          * Relock the INP and make sure we are still
850                          * unassigned.
851                          */
852                         INP_UNLOCK_ASSERT(inp);
853                         INP_WLOCK(inp);
854                         RECHECK_INP();
855                         if (tp->t_lib != NULL) {
856                                 TCPID_BUCKET_UNLOCK(tlb);
857                                 bucket_locked = false;
858                                 tlb = NULL;
859                                 goto restart;
860                         }
861
862                         /* Take a reference on the bucket. */
863                         TCPID_BUCKET_REF(tlb);
864
865                         /* Record the request. */
866                         tcp_log_increment_reqcnt(tlb);
867                 }
868
869                 tcp_log_grow_tlb(tlb->tlb_id, tp);
870
871                 /* Add the new node to the list. */
872                 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list);
873                 tp->t_lib = tlb;
874                 tp->t_lin = tln;
875                 if (tp->t_lib->tlb_logstate) {
876                         /* Clone in any logging */
877
878                         tp->t_logstate = tp->t_lib->tlb_logstate;
879                 }
880                 if (tp->t_lib->tlb_loglimit) {
881                         /* The loglimit too */
882
883                         tp->t_loglimit = tp->t_lib->tlb_loglimit;
884                 }
885                 tln = NULL;
886         }
887
888         rv = 0;
889
890 done:
891         /* Unlock things, as needed, and return. */
892         INP_WUNLOCK(inp);
893 done_noinp:
894         INP_UNLOCK_ASSERT(inp);
895         if (bucket_locked) {
896                 TCPID_BUCKET_LOCK_ASSERT(tlb);
897                 TCPID_BUCKET_UNLOCK(tlb);
898         } else if (tlb != NULL)
899                 TCPID_BUCKET_UNLOCK_ASSERT(tlb);
900         if (tree_locked == TREE_WLOCKED) {
901                 TCPID_TREE_WLOCK_ASSERT();
902                 TCPID_TREE_WUNLOCK();
903         } else if (tree_locked == TREE_RLOCKED) {
904                 TCPID_TREE_RLOCK_ASSERT();
905                 TCPID_TREE_RUNLOCK();
906         } else
907                 TCPID_TREE_UNLOCK_ASSERT();
908         if (tln != NULL)
909                 uma_zfree(tcp_log_node_zone, tln);
910         return (rv);
911 }
912
913 /*
914  * Get the TCP log ID for a TCPCB.
915  * Called with INPCB locked.
916  * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long.
917  * Returns number of bytes copied.
918  */
919 size_t
920 tcp_log_get_id(struct tcpcb *tp, char *buf)
921 {
922         size_t len;
923
924         INP_LOCK_ASSERT(tp->t_inpcb);
925         if (tp->t_lib != NULL) {
926                 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
927                 KASSERT(len < TCP_LOG_ID_LEN,
928                     ("%s:%d: tp->t_lib->tlb_id too long (%zu)",
929                     __func__, __LINE__, len));
930         } else {
931                 *buf = '\0';
932                 len = 0;
933         }
934         return (len);
935 }
936
937 /*
938  * Get the tag associated with the TCPCB's log ID.
939  * Called with INPCB locked. Returns with it unlocked.
940  * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long.
941  * Returns number of bytes copied.
942  */
943 size_t
944 tcp_log_get_tag(struct tcpcb *tp, char *buf)
945 {
946         struct tcp_log_id_bucket *tlb;
947         size_t len;
948         int tree_locked;
949
950         INP_WLOCK_ASSERT(tp->t_inpcb);
951
952         tree_locked = TREE_UNLOCKED;
953         tlb = tp->t_lib;
954
955         if (tlb != NULL) {
956                 TCPID_BUCKET_REF(tlb);
957                 INP_WUNLOCK(tp->t_inpcb);
958                 TCPID_BUCKET_LOCK(tlb);
959                 len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN);
960                 KASSERT(len < TCP_LOG_TAG_LEN,
961                     ("%s:%d: tp->t_lib->tlb_tag too long (%zu)",
962                     __func__, __LINE__, len));
963                 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
964                         TCPID_BUCKET_UNLOCK(tlb);
965
966                 if (tree_locked == TREE_WLOCKED) {
967                         TCPID_TREE_WLOCK_ASSERT();
968                         TCPID_TREE_WUNLOCK();
969                 } else if (tree_locked == TREE_RLOCKED) {
970                         TCPID_TREE_RLOCK_ASSERT();
971                         TCPID_TREE_RUNLOCK();
972                 } else
973                         TCPID_TREE_UNLOCK_ASSERT();
974         } else {
975                 INP_WUNLOCK(tp->t_inpcb);
976                 *buf = '\0';
977                 len = 0;
978         }
979
980         return (len);
981 }
982
983 /*
984  * Get number of connections with the same log ID.
985  * Log ID is taken from given TCPCB.
986  * Called with INPCB locked.
987  */
988 u_int
989 tcp_log_get_id_cnt(struct tcpcb *tp)
990 {
991
992         INP_WLOCK_ASSERT(tp->t_inpcb);
993         return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
994 }
995
996 #ifdef TCPLOG_DEBUG_RINGBUF
997 /*
998  * Functions/macros to increment/decrement reference count for a log
999  * entry. This should catch when we do a double-free/double-remove or
1000  * a double-add.
1001  */
1002 static inline void
1003 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func,
1004     int line)
1005 {
1006         int refcnt;
1007
1008         refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1);
1009         if (refcnt != 0)
1010                 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)",
1011                     func, line, log_entry, refcnt);
1012 }
1013 #define tcp_log_entry_refcnt_add(l)     \
1014     _tcp_log_entry_refcnt_add((l), __func__, __LINE__)
1015
1016 static inline void
1017 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func,
1018     int line)
1019 {
1020         int refcnt;
1021
1022         refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1);
1023         if (refcnt != 1)
1024                 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)",
1025                     func, line, log_entry, refcnt);
1026 }
1027 #define tcp_log_entry_refcnt_rem(l)     \
1028     _tcp_log_entry_refcnt_rem((l), __func__, __LINE__)
1029
1030 #else /* !TCPLOG_DEBUG_RINGBUF */
1031
1032 #define tcp_log_entry_refcnt_add(l)
1033 #define tcp_log_entry_refcnt_rem(l)
1034
1035 #endif
1036
1037 /*
1038  * Cleanup after removing a log entry, but only decrement the count if we
1039  * are running INVARIANTS.
1040  */
1041 static inline void
1042 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused)
1043 {
1044
1045         uma_zfree(tcp_log_zone, log_entry);
1046 #ifdef INVARIANTS
1047         (*count)--;
1048         KASSERT(*count >= 0,
1049             ("%s: count unexpectedly negative", __func__));
1050 #endif
1051 }
1052
1053 static void
1054 tcp_log_free_entries(struct tcp_log_stailq *head, int *count)
1055 {
1056         struct tcp_log_mem *log_entry;
1057
1058         /* Free the entries. */
1059         while ((log_entry = STAILQ_FIRST(head)) != NULL) {
1060                 STAILQ_REMOVE_HEAD(head, tlm_queue);
1061                 tcp_log_entry_refcnt_rem(log_entry);
1062                 tcp_log_free_log_common(log_entry, count);
1063         }
1064 }
1065
1066 /* Cleanup after removing a log entry. */
1067 static inline void
1068 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1069 {
1070         uma_zfree(tcp_log_zone, log_entry);
1071         tp->t_lognum--;
1072         KASSERT(tp->t_lognum >= 0,
1073             ("%s: tp->t_lognum unexpectedly negative", __func__));
1074 }
1075
1076 /* Remove a log entry from the head of a list. */
1077 static inline void
1078 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1079 {
1080
1081         KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs),
1082             ("%s: attempt to remove non-HEAD log entry", __func__));
1083         STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1084         tcp_log_entry_refcnt_rem(log_entry);
1085         tcp_log_remove_log_cleanup(tp, log_entry);
1086 }
1087
1088 #ifdef TCPLOG_DEBUG_RINGBUF
1089 /*
1090  * Initialize the log entry's reference count, which we want to
1091  * survive allocations.
1092  */
1093 static int
1094 tcp_log_zone_init(void *mem, int size, int flags __unused)
1095 {
1096         struct tcp_log_mem *tlm;
1097
1098         KASSERT(size >= sizeof(struct tcp_log_mem),
1099             ("%s: unexpectedly short (%d) allocation", __func__, size));
1100         tlm = (struct tcp_log_mem *)mem;
1101         tlm->tlm_refcnt = 0;
1102         return (0);
1103 }
1104
1105 /*
1106  * Double check that the refcnt is zero on allocation and return.
1107  */
1108 static int
1109 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused)
1110 {
1111         struct tcp_log_mem *tlm;
1112
1113         KASSERT(size >= sizeof(struct tcp_log_mem),
1114             ("%s: unexpectedly short (%d) allocation", __func__, size));
1115         tlm = (struct tcp_log_mem *)mem;
1116         if (tlm->tlm_refcnt != 0)
1117                 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1118                     __func__, __LINE__, tlm, tlm->tlm_refcnt);
1119         return (0);
1120 }
1121
1122 static void
1123 tcp_log_zone_dtor(void *mem, int size, void *args __unused)
1124 {
1125         struct tcp_log_mem *tlm;
1126
1127         KASSERT(size >= sizeof(struct tcp_log_mem),
1128             ("%s: unexpectedly short (%d) allocation", __func__, size));
1129         tlm = (struct tcp_log_mem *)mem;
1130         if (tlm->tlm_refcnt != 0)
1131                 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1132                     __func__, __LINE__, tlm, tlm->tlm_refcnt);
1133 }
1134 #endif /* TCPLOG_DEBUG_RINGBUF */
1135
1136 /* Do global initialization. */
1137 void
1138 tcp_log_init(void)
1139 {
1140
1141         tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem),
1142 #ifdef TCPLOG_DEBUG_RINGBUF
1143             tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init,
1144 #else
1145             NULL, NULL, NULL,
1146 #endif
1147             NULL, UMA_ALIGN_PTR, 0);
1148         (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT);
1149         tcp_log_bucket_zone = uma_zcreate("tcp_log_bucket",
1150             sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL,
1151             UMA_ALIGN_PTR, 0);
1152         tcp_log_node_zone = uma_zcreate("tcp_log_node",
1153             sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL,
1154             UMA_ALIGN_PTR, 0);
1155 #ifdef TCPLOG_DEBUG_COUNTERS
1156         tcp_log_queued = counter_u64_alloc(M_WAITOK);
1157         tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK);
1158         tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK);
1159         tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK);
1160         tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK);
1161         tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK);
1162         tcp_log_que_copyout = counter_u64_alloc(M_WAITOK);
1163         tcp_log_que_read = counter_u64_alloc(M_WAITOK);
1164         tcp_log_que_freed = counter_u64_alloc(M_WAITOK);
1165 #endif
1166         tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK);
1167         tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK);
1168
1169         rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW);
1170         mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF);
1171         callout_init(&tcp_log_expireq_callout, 1);
1172 }
1173
1174 /* Do per-TCPCB initialization. */
1175 void
1176 tcp_log_tcpcbinit(struct tcpcb *tp)
1177 {
1178
1179         /* A new TCPCB should start out zero-initialized. */
1180         STAILQ_INIT(&tp->t_logs);
1181
1182         /*
1183          * If we are doing auto-capturing, figure out whether we will capture
1184          * this session.
1185          */
1186         tp->t_loglimit = tcp_log_session_limit;
1187         if ((tcp_log_auto_all == true) &&
1188             tcp_log_auto_mode &&
1189             tcp_log_selectauto()) {
1190                 tp->t_logstate = tcp_log_auto_mode;
1191                 tp->t_flags2 |= TF2_LOG_AUTO;
1192         }
1193 }
1194
1195
1196 /* Remove entries */
1197 static void
1198 tcp_log_expire(void *unused __unused)
1199 {
1200         struct tcp_log_id_bucket *tlb;
1201         struct tcp_log_id_node *tln;
1202         sbintime_t expiry_limit;
1203         int tree_locked;
1204
1205         TCPLOG_EXPIREQ_LOCK();
1206         if (callout_pending(&tcp_log_expireq_callout)) {
1207                 /* Callout was reset. */
1208                 TCPLOG_EXPIREQ_UNLOCK();
1209                 return;
1210         }
1211
1212         /*
1213          * Process entries until we reach one that expires too far in the
1214          * future. Look one second in the future.
1215          */
1216         expiry_limit = getsbinuptime() + SBT_1S;
1217         tree_locked = TREE_UNLOCKED;
1218
1219         while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL &&
1220             tln->tln_expiretime <= expiry_limit) {
1221                 if (!callout_active(&tcp_log_expireq_callout)) {
1222                         /*
1223                          * Callout was stopped. I guess we should
1224                          * just quit at this point.
1225                          */
1226                         TCPLOG_EXPIREQ_UNLOCK();
1227                         return;
1228                 }
1229
1230                 /*
1231                  * Remove the node from the head of the list and unlock
1232                  * the list. Change the expiry time to SBT_MAX as a signal
1233                  * to other threads that we now own this.
1234                  */
1235                 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq);
1236                 tln->tln_expiretime = SBT_MAX;
1237                 TCPLOG_EXPIREQ_UNLOCK();
1238
1239                 /*
1240                  * Remove the node from the bucket.
1241                  */
1242                 tlb = tln->tln_bucket;
1243                 TCPID_BUCKET_LOCK(tlb);
1244                 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) {
1245                         tcp_log_id_validate_tree_lock(tree_locked);
1246                         if (tree_locked == TREE_WLOCKED)
1247                                 TCPID_TREE_WUNLOCK();
1248                         else
1249                                 TCPID_TREE_RUNLOCK();
1250                         tree_locked = TREE_UNLOCKED;
1251                 }
1252
1253                 /* Drop the INP reference. */
1254                 INP_WLOCK(tln->tln_inp);
1255                 if (!in_pcbrele_wlocked(tln->tln_inp))
1256                         INP_WUNLOCK(tln->tln_inp);
1257
1258                 /* Free the log records. */
1259                 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count);
1260
1261                 /* Free the node. */
1262                 uma_zfree(tcp_log_node_zone, tln);
1263
1264                 /* Relock the expiry queue. */
1265                 TCPLOG_EXPIREQ_LOCK();
1266         }
1267
1268         /*
1269          * We've expired all the entries we can. Do we need to reschedule
1270          * ourselves?
1271          */
1272         callout_deactivate(&tcp_log_expireq_callout);
1273         if (tln != NULL) {
1274                 /*
1275                  * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and
1276                  * set the next callout to that. (This helps ensure we generally
1277                  * run the callout no more often than desired.)
1278                  */
1279                 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL;
1280                 if (expiry_limit < tln->tln_expiretime)
1281                         expiry_limit = tln->tln_expiretime;
1282                 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit,
1283                     SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE);
1284         }
1285
1286         /* We're done. */
1287         TCPLOG_EXPIREQ_UNLOCK();
1288         return;
1289 }
1290
1291 /*
1292  * Move log data from the TCPCB to a new node. This will reset the TCPCB log
1293  * entries and log count; however, it will not touch other things from the
1294  * TCPCB (e.g. t_lin, t_lib).
1295  *
1296  * NOTE: Must hold a lock on the INP.
1297  */
1298 static void
1299 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
1300 {
1301
1302         INP_WLOCK_ASSERT(tp->t_inpcb);
1303
1304         tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie;
1305         if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6)
1306                 tln->tln_af = AF_INET6;
1307         else
1308                 tln->tln_af = AF_INET;
1309         tln->tln_entries = tp->t_logs;
1310         tln->tln_count = tp->t_lognum;
1311         tln->tln_bucket = tp->t_lib;
1312
1313         /* Clear information from the PCB. */
1314         STAILQ_INIT(&tp->t_logs);
1315         tp->t_lognum = 0;
1316 }
1317
1318 /* Do per-TCPCB cleanup */
1319 void
1320 tcp_log_tcpcbfini(struct tcpcb *tp)
1321 {
1322         struct tcp_log_id_node *tln, *tln_first;
1323         struct tcp_log_mem *log_entry;
1324         sbintime_t callouttime;
1325
1326         INP_WLOCK_ASSERT(tp->t_inpcb);
1327
1328         TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_CONNEND, 0, 0, NULL, false);
1329
1330         /*
1331          * If we were gathering packets to be automatically dumped, try to do
1332          * it now. If this succeeds, the log information in the TCPCB will be
1333          * cleared. Otherwise, we'll handle the log information as we do
1334          * for other states.
1335          */
1336         switch(tp->t_logstate) {
1337         case TCP_LOG_STATE_HEAD_AUTO:
1338                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1339                     M_NOWAIT, false);
1340                 break;
1341         case TCP_LOG_STATE_TAIL_AUTO:
1342                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail",
1343                     M_NOWAIT, false);
1344                 break;
1345         case TCP_LOG_STATE_CONTINUAL:
1346                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1347                     M_NOWAIT, false);
1348                 break;
1349         }
1350
1351         /*
1352          * There are two ways we could keep logs: per-socket or per-ID. If
1353          * we are tracking logs with an ID, then the logs survive the
1354          * destruction of the TCPCB.
1355          *
1356          * If the TCPCB is associated with an ID node, move the logs from the
1357          * TCPCB to the ID node. In theory, this is safe, for reasons which I
1358          * will now explain for my own benefit when I next need to figure out
1359          * this code. :-)
1360          *
1361          * We own the INP lock. Therefore, no one else can change the contents
1362          * of this node (Rule C). Further, no one can remove this node from
1363          * the bucket while we hold the lock (Rule D). Basically, no one can
1364          * mess with this node. That leaves two states in which we could be:
1365          *
1366          * 1. Another thread is currently waiting to acquire the INP lock, with
1367          *    plans to do something with this node. When we drop the INP lock,
1368          *    they will have a chance to do that. They will recheck the
1369          *    tln_closed field (see note to Rule C) and then acquire the
1370          *    bucket lock before proceeding further.
1371          *
1372          * 2. Another thread will try to acquire a lock at some point in the
1373          *    future. If they try to acquire a lock before we set the
1374          *    tln_closed field, they will follow state #1. If they try to
1375          *    acquire a lock after we set the tln_closed field, they will be
1376          *    able to make changes to the node, at will, following Rule C.
1377          *
1378          * Therefore, we currently own this node and can make any changes
1379          * we want. But, as soon as we set the tln_closed field to true, we
1380          * have effectively dropped our lock on the node. (For this reason, we
1381          * also need to make sure our writes are ordered correctly. An atomic
1382          * operation with "release" semantics should be sufficient.)
1383          */
1384
1385         if (tp->t_lin != NULL) {
1386                 /* Copy the relevant information to the log entry. */
1387                 tln = tp->t_lin;
1388                 KASSERT(tln->tln_inp == tp->t_inpcb,
1389                     ("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)",
1390                     __func__, tln->tln_inp, tp->t_inpcb));
1391                 tcp_log_move_tp_to_node(tp, tln);
1392
1393                 /* Clear information from the PCB. */
1394                 tp->t_lin = NULL;
1395                 tp->t_lib = NULL;
1396
1397                 /*
1398                  * Take a reference on the INP. This ensures that the INP
1399                  * remains valid while the node is on the expiry queue. This
1400                  * ensures the INP is valid for other threads that may be
1401                  * racing to lock this node when we move it to the expire
1402                  * queue.
1403                  */
1404                 in_pcbref(tp->t_inpcb);
1405
1406                 /*
1407                  * Store the entry on the expiry list. The exact behavior
1408                  * depends on whether we have entries to keep. If so, we
1409                  * put the entry at the tail of the list and expire in
1410                  * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put
1411                  * the entry at the head of the list. (Handling the cleanup
1412                  * via the expiry timer lets us avoid locking messy-ness here.)
1413                  */
1414                 tln->tln_expiretime = getsbinuptime();
1415                 TCPLOG_EXPIREQ_LOCK();
1416                 if (tln->tln_count) {
1417                         tln->tln_expiretime += TCP_LOG_EXPIRE_TIME;
1418                         if (STAILQ_EMPTY(&tcp_log_expireq_head) &&
1419                             !callout_active(&tcp_log_expireq_callout)) {
1420                                 /*
1421                                  * We are adding the first entry and a callout
1422                                  * is not currently scheduled; therefore, we
1423                                  * need to schedule one.
1424                                  */
1425                                 callout_reset_sbt(&tcp_log_expireq_callout,
1426                                     tln->tln_expiretime, SBT_1S, tcp_log_expire,
1427                                     NULL, C_ABSOLUTE);
1428                         }
1429                         STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln,
1430                             tln_expireq);
1431                 } else {
1432                         callouttime = tln->tln_expiretime +
1433                             TCP_LOG_EXPIRE_INTVL;
1434                         tln_first = STAILQ_FIRST(&tcp_log_expireq_head);
1435
1436                         if ((tln_first == NULL ||
1437                             callouttime < tln_first->tln_expiretime) &&
1438                             (callout_pending(&tcp_log_expireq_callout) ||
1439                             !callout_active(&tcp_log_expireq_callout))) {
1440                                 /*
1441                                  * The list is empty, or we want to run the
1442                                  * expire code before the first entry's timer
1443                                  * fires. Also, we are in a case where a callout
1444                                  * is not actively running. We want to reset
1445                                  * the callout to occur sooner.
1446                                  */
1447                                 callout_reset_sbt(&tcp_log_expireq_callout,
1448                                     callouttime, SBT_1S, tcp_log_expire, NULL,
1449                                     C_ABSOLUTE);
1450                         }
1451
1452                         /*
1453                          * Insert to the head, or just after the head, as
1454                          * appropriate. (This might result in small
1455                          * mis-orderings as a bunch of "expire now" entries
1456                          * gather at the start of the list, but that should
1457                          * not produce big problems, since the expire timer
1458                          * will walk through all of them.)
1459                          */
1460                         if (tln_first == NULL ||
1461                             tln->tln_expiretime < tln_first->tln_expiretime)
1462                                 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln,
1463                                     tln_expireq);
1464                         else
1465                                 STAILQ_INSERT_AFTER(&tcp_log_expireq_head,
1466                                     tln_first, tln, tln_expireq);
1467                 }
1468                 TCPLOG_EXPIREQ_UNLOCK();
1469
1470                 /*
1471                  * We are done messing with the tln. After this point, we
1472                  * can't touch it. (Note that the "release" semantics should
1473                  * be included with the TCPLOG_EXPIREQ_UNLOCK() call above.
1474                  * Therefore, they should be unnecessary here. However, it
1475                  * seems like a good idea to include them anyway, since we
1476                  * really are releasing a lock here.)
1477                  */
1478                 atomic_store_rel_int(&tln->tln_closed, 1);
1479         } else {
1480                 /* Remove log entries. */
1481                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1482                         tcp_log_remove_log_head(tp, log_entry);
1483                 KASSERT(tp->t_lognum == 0,
1484                     ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1485                         __func__, tp->t_lognum));
1486         }
1487
1488         /*
1489          * Change the log state to off (just in case anything tries to sneak
1490          * in a last-minute log).
1491          */
1492         tp->t_logstate = TCP_LOG_STATE_OFF;
1493 }
1494
1495 static void
1496 tcp_log_purge_tp_logbuf(struct tcpcb *tp)
1497 {
1498         struct tcp_log_mem *log_entry;
1499         struct inpcb *inp;
1500
1501         inp = tp->t_inpcb;
1502         INP_WLOCK_ASSERT(inp);
1503         if (tp->t_lognum == 0)
1504                 return;
1505
1506         while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1507                 tcp_log_remove_log_head(tp, log_entry);
1508         KASSERT(tp->t_lognum == 0,
1509                 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1510                  __func__, tp->t_lognum));
1511         tp->t_logstate = TCP_LOG_STATE_OFF;
1512 }
1513
1514 /*
1515  * This logs an event for a TCP socket. Normally, this is called via
1516  * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for
1517  * TCP_LOG_EVENT().
1518  */
1519
1520 struct tcp_log_buffer *
1521 tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
1522     struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len,
1523     union tcp_log_stackspecific *stackinfo, int th_hostorder,
1524     const char *output_caller, const char *func, int line, const struct timeval *itv)
1525 {
1526         struct tcp_log_mem *log_entry;
1527         struct tcp_log_buffer *log_buf;
1528         int attempt_count = 0;
1529         struct tcp_log_verbose *log_verbose;
1530         uint32_t logsn;
1531
1532         KASSERT((func == NULL && line == 0) || (func != NULL && line > 0),
1533             ("%s called with inconsistent func (%p) and line (%d) arguments",
1534                 __func__, func, line));
1535
1536         INP_WLOCK_ASSERT(tp->t_inpcb);
1537         if (tcp_disable_all_bb_logs) {
1538                 /*
1539                  * The global shutdown logging
1540                  * switch has been thrown. Call
1541                  * the purge function that frees
1542                  * purges out the logs and
1543                  * turns off logging.
1544                  */
1545                 tcp_log_purge_tp_logbuf(tp);
1546                 return (NULL);
1547         }
1548         KASSERT(tp->t_logstate == TCP_LOG_STATE_HEAD ||
1549             tp->t_logstate == TCP_LOG_STATE_TAIL ||
1550             tp->t_logstate == TCP_LOG_STATE_CONTINUAL ||
1551             tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO ||
1552             tp->t_logstate == TCP_LOG_STATE_TAIL_AUTO,
1553             ("%s called with unexpected tp->t_logstate (%d)", __func__,
1554                 tp->t_logstate));
1555
1556         /*
1557          * Get the serial number. We do this early so it will
1558          * increment even if we end up skipping the log entry for some
1559          * reason.
1560          */
1561         logsn = tp->t_logsn++;
1562
1563         /*
1564          * Can we get a new log entry? If so, increment the lognum counter
1565          * here.
1566          */
1567 retry:
1568         if (tp->t_lognum < tp->t_loglimit) {
1569                 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL)
1570                         tp->t_lognum++;
1571         } else
1572                 log_entry = NULL;
1573
1574         /* Do we need to try to reuse? */
1575         if (log_entry == NULL) {
1576                 /*
1577                  * Sacrifice auto-logged sessions without a log ID if
1578                  * tcp_log_auto_all is false. (If they don't have a log
1579                  * ID by now, it is probable that either they won't get one
1580                  * or we are resource-constrained.)
1581                  */
1582                 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1583                     !tcp_log_auto_all) {
1584                         if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) {
1585 #ifdef INVARIANTS
1586                                 panic("%s:%d: tcp_log_state_change() failed "
1587                                     "to set tp %p to TCP_LOG_STATE_CLEAR",
1588                                     __func__, __LINE__, tp);
1589 #endif
1590                                 tp->t_logstate = TCP_LOG_STATE_OFF;
1591                         }
1592                         return (NULL);
1593                 }
1594                 /*
1595                  * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump
1596                  * the buffers. If successful, deactivate tracing. Otherwise,
1597                  * leave it active so we will retry.
1598                  */
1599                 if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO &&
1600                     !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1601                     M_NOWAIT, false)) {
1602                         tp->t_logstate = TCP_LOG_STATE_OFF;
1603                         return(NULL);
1604                 } else if ((tp->t_logstate == TCP_LOG_STATE_CONTINUAL) &&
1605                     !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1606                     M_NOWAIT, false)) {
1607                         if (attempt_count == 0) {
1608                                 attempt_count++;
1609                                 goto retry;
1610                         }
1611 #ifdef TCPLOG_DEBUG_COUNTERS
1612                         counter_u64_add(tcp_log_que_fail4, 1);
1613 #endif
1614                         return(NULL);
1615                 } else if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO)
1616                         return(NULL);
1617
1618                 /* If in HEAD state, just deactivate the tracing and return. */
1619                 if (tp->t_logstate == TCP_LOG_STATE_HEAD) {
1620                         tp->t_logstate = TCP_LOG_STATE_OFF;
1621                         return(NULL);
1622                 }
1623
1624                 /*
1625                  * Get a buffer to reuse. If that fails, just give up.
1626                  * (We can't log anything without a buffer in which to
1627                  * put it.)
1628                  *
1629                  * Note that we don't change the t_lognum counter
1630                  * here. Because we are re-using the buffer, the total
1631                  * number won't change.
1632                  */
1633                 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL)
1634                         return(NULL);
1635                 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1636                 tcp_log_entry_refcnt_rem(log_entry);
1637         }
1638
1639         KASSERT(log_entry != NULL,
1640             ("%s: log_entry unexpectedly NULL", __func__));
1641
1642         /* Extract the log buffer and verbose buffer pointers. */
1643         log_buf = &log_entry->tlm_buf;
1644         log_verbose = &log_entry->tlm_v;
1645
1646         /* Basic entries. */
1647         if (itv == NULL)
1648                 getmicrouptime(&log_buf->tlb_tv);
1649         else
1650                 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval));
1651         log_buf->tlb_ticks = ticks;
1652         log_buf->tlb_sn = logsn;
1653         log_buf->tlb_stackid = tp->t_fb->tfb_id;
1654         log_buf->tlb_eventid = eventid;
1655         log_buf->tlb_eventflags = 0;
1656         log_buf->tlb_errno = errornum;
1657
1658         /* Socket buffers */
1659         if (rxbuf != NULL) {
1660                 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF;
1661                 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc;
1662                 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc;
1663                 log_buf->tlb_rxbuf.tls_sb_spare = 0;
1664         }
1665         if (txbuf != NULL) {
1666                 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF;
1667                 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc;
1668                 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc;
1669                 log_buf->tlb_txbuf.tls_sb_spare = 0;
1670         }
1671         /* Copy values from tp to the log entry. */
1672 #define COPY_STAT(f)    log_buf->tlb_ ## f = tp->f
1673 #define COPY_STAT_T(f)  log_buf->tlb_ ## f = tp->t_ ## f
1674         COPY_STAT_T(state);
1675         COPY_STAT_T(starttime);
1676         COPY_STAT(iss);
1677         COPY_STAT_T(flags);
1678         COPY_STAT(snd_una);
1679         COPY_STAT(snd_max);
1680         COPY_STAT(snd_cwnd);
1681         COPY_STAT(snd_nxt);
1682         COPY_STAT(snd_recover);
1683         COPY_STAT(snd_wnd);
1684         COPY_STAT(snd_ssthresh);
1685         COPY_STAT_T(srtt);
1686         COPY_STAT_T(rttvar);
1687         COPY_STAT(rcv_up);
1688         COPY_STAT(rcv_adv);
1689         COPY_STAT(rcv_nxt);
1690         COPY_STAT(rcv_wnd);
1691         COPY_STAT_T(dupacks);
1692         COPY_STAT_T(segqlen);
1693         COPY_STAT(snd_numholes);
1694         COPY_STAT(snd_scale);
1695         COPY_STAT(rcv_scale);
1696 #undef COPY_STAT
1697 #undef COPY_STAT_T
1698         log_buf->tlb_flex1 = 0;
1699         log_buf->tlb_flex2 = 0;
1700         /* Copy stack-specific info. */
1701         if (stackinfo != NULL) {
1702                 memcpy(&log_buf->tlb_stackinfo, stackinfo,
1703                     sizeof(log_buf->tlb_stackinfo));
1704                 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO;
1705         }
1706
1707         /* The packet */
1708         log_buf->tlb_len = len;
1709         if (th) {
1710                 int optlen;
1711
1712                 log_buf->tlb_eventflags |= TLB_FLAG_HDR;
1713                 log_buf->tlb_th = *th;
1714                 if (th_hostorder)
1715                         tcp_fields_to_net(&log_buf->tlb_th);
1716                 optlen = (th->th_off << 2) - sizeof (struct tcphdr);
1717                 if (optlen > 0)
1718                         memcpy(log_buf->tlb_opts, th + 1, optlen);
1719         }
1720
1721         /* Verbose information */
1722         if (func != NULL) {
1723                 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE;
1724                 if (output_caller != NULL)
1725                         strlcpy(log_verbose->tlv_snd_frm, output_caller,
1726                             TCP_FUNC_LEN);
1727                 else
1728                         *log_verbose->tlv_snd_frm = 0;
1729                 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN);
1730                 log_verbose->tlv_trace_line = line;
1731         }
1732
1733         /* Insert the new log at the tail. */
1734         STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue);
1735         tcp_log_entry_refcnt_add(log_entry);
1736         return (log_buf);
1737 }
1738
1739 /*
1740  * Change the logging state for a TCPCB. Returns 0 on success or an
1741  * error code on failure.
1742  */
1743 int
1744 tcp_log_state_change(struct tcpcb *tp, int state)
1745 {
1746         struct tcp_log_mem *log_entry;
1747
1748         INP_WLOCK_ASSERT(tp->t_inpcb);
1749         switch(state) {
1750         case TCP_LOG_STATE_CLEAR:
1751                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1752                         tcp_log_remove_log_head(tp, log_entry);
1753                 /* Fall through */
1754
1755         case TCP_LOG_STATE_OFF:
1756                 tp->t_logstate = TCP_LOG_STATE_OFF;
1757                 break;
1758
1759         case TCP_LOG_STATE_TAIL:
1760         case TCP_LOG_STATE_HEAD:
1761         case TCP_LOG_STATE_CONTINUAL:
1762         case TCP_LOG_STATE_HEAD_AUTO:
1763         case TCP_LOG_STATE_TAIL_AUTO:
1764                 tp->t_logstate = state;
1765                 break;
1766
1767         default:
1768                 return (EINVAL);
1769         }
1770         if (tcp_disable_all_bb_logs) {
1771                 /* We are prohibited from doing any logs */
1772                 tp->t_logstate = TCP_LOG_STATE_OFF;
1773         }
1774         tp->t_flags2 &= ~(TF2_LOG_AUTO);
1775
1776         return (0);
1777 }
1778
1779 /* If tcp_drain() is called, flush half the log entries. */
1780 void
1781 tcp_log_drain(struct tcpcb *tp)
1782 {
1783         struct tcp_log_mem *log_entry, *next;
1784         int target, skip;
1785
1786         INP_WLOCK_ASSERT(tp->t_inpcb);
1787         if ((target = tp->t_lognum / 2) == 0)
1788                 return;
1789
1790         /*
1791          * If we are logging the "head" packets, we want to discard
1792          * from the tail of the queue. Otherwise, we want to discard
1793          * from the head.
1794          */
1795         if (tp->t_logstate == TCP_LOG_STATE_HEAD ||
1796             tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) {
1797                 skip = tp->t_lognum - target;
1798                 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue)
1799                         if (!--skip)
1800                                 break;
1801                 KASSERT(log_entry != NULL,
1802                     ("%s: skipped through all entries!", __func__));
1803                 if (log_entry == NULL)
1804                         return;
1805                 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) {
1806                         STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue);
1807                         tcp_log_entry_refcnt_rem(next);
1808                         tcp_log_remove_log_cleanup(tp, next);
1809 #ifdef INVARIANTS
1810                         target--;
1811 #endif
1812                 }
1813                 KASSERT(target == 0,
1814                     ("%s: After removing from tail, target was %d", __func__,
1815                         target));
1816         } else if (tp->t_logstate == TCP_LOG_STATE_CONTINUAL) {
1817                 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1818                     M_NOWAIT, false);
1819         } else {
1820                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL &&
1821                     target--)
1822                         tcp_log_remove_log_head(tp, log_entry);
1823                 KASSERT(target <= 0,
1824                     ("%s: After removing from head, target was %d", __func__,
1825                         target));
1826                 KASSERT(tp->t_lognum > 0,
1827                     ("%s: After removing from head, tp->t_lognum was %d",
1828                         __func__, target));
1829                 KASSERT(log_entry != NULL,
1830                     ("%s: After removing from head, the tailq was empty",
1831                         __func__));
1832         }
1833 }
1834
1835 static inline int
1836 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len)
1837 {
1838
1839         if (sopt->sopt_td != NULL)
1840                 return (copyout(src, dst, len));
1841         bcopy(src, dst, len);
1842         return (0);
1843 }
1844
1845 static int
1846 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp,
1847     struct tcp_log_buffer **end, int count)
1848 {
1849         struct tcp_log_buffer *out_entry;
1850         struct tcp_log_mem *log_entry;
1851         size_t entrysize;
1852         int error;
1853 #ifdef INVARIANTS
1854         int orig_count = count;
1855 #endif
1856
1857         /* Copy the data out. */
1858         error = 0;
1859         out_entry = (struct tcp_log_buffer *) sopt->sopt_val;
1860         STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) {
1861                 count--;
1862                 KASSERT(count >= 0,
1863                     ("%s:%d: Exceeded expected count (%d) processing list %p",
1864                     __func__, __LINE__, orig_count, log_tailqp));
1865
1866 #ifdef TCPLOG_DEBUG_COUNTERS
1867                 counter_u64_add(tcp_log_que_copyout, 1);
1868 #endif
1869
1870                 /*
1871                  * Skip copying out the header if it isn't present.
1872                  * Instead, copy out zeros (to ensure we don't leak info).
1873                  * TODO: Make sure we truly do zero everything we don't
1874                  * explicitly set.
1875                  */
1876                 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)
1877                         entrysize = sizeof(struct tcp_log_buffer);
1878                 else
1879                         entrysize = offsetof(struct tcp_log_buffer, tlb_th);
1880                 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry,
1881                     entrysize);
1882                 if (error)
1883                         break;
1884                 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) {
1885                         error = tcp_log_copyout(sopt, zerobuf,
1886                             ((uint8_t *)out_entry) + entrysize,
1887                             sizeof(struct tcp_log_buffer) - entrysize);
1888                 }
1889
1890                 /*
1891                  * Copy out the verbose bit, if needed. Either way,
1892                  * increment the output pointer the correct amount.
1893                  */
1894                 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) {
1895                         error = tcp_log_copyout(sopt, &log_entry->tlm_v,
1896                             out_entry->tlb_verbose,
1897                             sizeof(struct tcp_log_verbose));
1898                         if (error)
1899                                 break;
1900                         out_entry = (struct tcp_log_buffer *)
1901                             (((uint8_t *) (out_entry + 1)) +
1902                             sizeof(struct tcp_log_verbose));
1903                 } else
1904                         out_entry++;
1905         }
1906         *end = out_entry;
1907         KASSERT(error || count == 0,
1908             ("%s:%d: Less than expected count (%d) processing list %p"
1909             " (%d remain)", __func__, __LINE__, orig_count,
1910             log_tailqp, count));
1911
1912         return (error);
1913 }
1914
1915 /*
1916  * Copy out the buffer. Note that we do incremental copying, so
1917  * sooptcopyout() won't work. However, the goal is to produce the same
1918  * end result as if we copied in the entire user buffer, updated it,
1919  * and then used sooptcopyout() to copy it out.
1920  *
1921  * NOTE: This should be called with a write lock on the PCB; however,
1922  * the function will drop it after it extracts the data from the TCPCB.
1923  */
1924 int
1925 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
1926 {
1927         struct tcp_log_stailq log_tailq;
1928         struct tcp_log_mem *log_entry, *log_next;
1929         struct tcp_log_buffer *out_entry;
1930         struct inpcb *inp;
1931         size_t outsize, entrysize;
1932         int error, outnum;
1933
1934         INP_WLOCK_ASSERT(tp->t_inpcb);
1935         inp = tp->t_inpcb;
1936
1937         /*
1938          * Determine which log entries will fit in the buffer. As an
1939          * optimization, skip this if all the entries will clearly fit
1940          * in the buffer. (However, get an exact size if we are using
1941          * INVARIANTS.)
1942          */
1943 #ifndef INVARIANTS
1944         if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) +
1945             sizeof(struct tcp_log_verbose)) >= tp->t_lognum) {
1946                 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue);
1947                 log_next = NULL;
1948                 outsize = 0;
1949                 outnum = tp->t_lognum;
1950         } else {
1951 #endif
1952                 outsize = outnum = 0;
1953                 log_entry = NULL;
1954                 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) {
1955                         entrysize = sizeof(struct tcp_log_buffer);
1956                         if (log_next->tlm_buf.tlb_eventflags &
1957                             TLB_FLAG_VERBOSE)
1958                                 entrysize += sizeof(struct tcp_log_verbose);
1959                         if ((sopt->sopt_valsize - outsize) < entrysize)
1960                                 break;
1961                         outsize += entrysize;
1962                         outnum++;
1963                         log_entry = log_next;
1964                 }
1965                 KASSERT(outsize <= sopt->sopt_valsize,
1966                     ("%s: calculated output size (%zu) greater than available"
1967                         "space (%zu)", __func__, outsize, sopt->sopt_valsize));
1968 #ifndef INVARIANTS
1969         }
1970 #endif
1971
1972         /*
1973          * Copy traditional sooptcopyout() behavior: if sopt->sopt_val
1974          * is NULL, silently skip the copy. However, in this case, we
1975          * will leave the list alone and return. Functionally, this
1976          * gives userspace a way to poll for an approximate buffer
1977          * size they will need to get the log entries.
1978          */
1979         if (sopt->sopt_val == NULL) {
1980                 INP_WUNLOCK(inp);
1981                 if (outsize == 0) {
1982                         outsize = outnum * (sizeof(struct tcp_log_buffer) +
1983                             sizeof(struct tcp_log_verbose));
1984                 }
1985                 if (sopt->sopt_valsize > outsize)
1986                         sopt->sopt_valsize = outsize;
1987                 return (0);
1988         }
1989
1990         /*
1991          * Break apart the list. We'll save the ones we want to copy
1992          * out locally and remove them from the TCPCB list. We can
1993          * then drop the INPCB lock while we do the copyout.
1994          *
1995          * There are roughly three cases:
1996          * 1. There was nothing to copy out. That's easy: drop the
1997          * lock and return.
1998          * 2. We are copying out the entire list. Again, that's easy:
1999          * move the whole list.
2000          * 3. We are copying out a partial list. That's harder. We
2001          * need to update the list book-keeping entries.
2002          */
2003         if (log_entry != NULL && log_next == NULL) {
2004                 /* Move entire list. */
2005                 KASSERT(outnum == tp->t_lognum,
2006                     ("%s:%d: outnum (%d) should match tp->t_lognum (%d)",
2007                         __func__, __LINE__, outnum, tp->t_lognum));
2008                 log_tailq = tp->t_logs;
2009                 tp->t_lognum = 0;
2010                 STAILQ_INIT(&tp->t_logs);
2011         } else if (log_entry != NULL) {
2012                 /* Move partial list. */
2013                 KASSERT(outnum < tp->t_lognum,
2014                     ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)",
2015                         __func__, __LINE__, outnum, tp->t_lognum));
2016                 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs);
2017                 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue);
2018                 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL,
2019                     ("%s:%d: tp->t_logs is unexpectedly shorter than expected"
2020                     "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)",
2021                     __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum));
2022                 STAILQ_NEXT(log_entry, tlm_queue) = NULL;
2023                 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue);
2024                 tp->t_lognum -= outnum;
2025         } else
2026                 STAILQ_INIT(&log_tailq);
2027
2028         /* Drop the PCB lock. */
2029         INP_WUNLOCK(inp);
2030
2031         /* Copy the data out. */
2032         error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum);
2033
2034         if (error) {
2035                 /* Restore list */
2036                 INP_WLOCK(inp);
2037                 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0) {
2038                         tp = intotcpcb(inp);
2039
2040                         /* Merge the two lists. */
2041                         STAILQ_CONCAT(&log_tailq, &tp->t_logs);
2042                         tp->t_logs = log_tailq;
2043                         tp->t_lognum += outnum;
2044                 }
2045                 INP_WUNLOCK(inp);
2046         } else {
2047                 /* Sanity check entries */
2048                 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val)  ==
2049                     outsize, ("%s: Actual output size (%zu) != "
2050                         "calculated output size (%zu)", __func__,
2051                         (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val),
2052                         outsize));
2053
2054                 /* Free the entries we just copied out. */
2055                 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) {
2056                         tcp_log_entry_refcnt_rem(log_entry);
2057                         uma_zfree(tcp_log_zone, log_entry);
2058                 }
2059         }
2060
2061         sopt->sopt_valsize = (size_t)((caddr_t)out_entry -
2062             (caddr_t)sopt->sopt_val);
2063         return (error);
2064 }
2065
2066 static void
2067 tcp_log_free_queue(struct tcp_log_dev_queue *param)
2068 {
2069         struct tcp_log_dev_log_queue *entry;
2070
2071         KASSERT(param != NULL, ("%s: called with NULL param", __func__));
2072         if (param == NULL)
2073                 return;
2074
2075         entry = (struct tcp_log_dev_log_queue *)param;
2076
2077         /* Free the entries. */
2078         tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2079
2080         /* Free the buffer, if it is allocated. */
2081         if (entry->tldl_common.tldq_buf != NULL)
2082                 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV);
2083
2084         /* Free the queue entry. */
2085         free(entry, M_TCPLOGDEV);
2086 }
2087
2088 static struct tcp_log_common_header *
2089 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
2090 {
2091         struct tcp_log_dev_log_queue *entry;
2092         struct tcp_log_header *hdr;
2093         uint8_t *end;
2094         struct sockopt sopt;
2095         int error;
2096
2097         entry = (struct tcp_log_dev_log_queue *)param;
2098
2099         /* Take a worst-case guess at space needs. */
2100         sopt.sopt_valsize = sizeof(struct tcp_log_header) +
2101             entry->tldl_count * (sizeof(struct tcp_log_buffer) +
2102             sizeof(struct tcp_log_verbose));
2103         hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT);
2104         if (hdr == NULL) {
2105 #ifdef TCPLOG_DEBUG_COUNTERS
2106                 counter_u64_add(tcp_log_que_fail5, entry->tldl_count);
2107 #endif
2108                 return (NULL);
2109         }
2110         sopt.sopt_val = hdr + 1;
2111         sopt.sopt_valsize -= sizeof(struct tcp_log_header);
2112         sopt.sopt_td = NULL;
2113
2114         error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
2115             (struct tcp_log_buffer **)&end, entry->tldl_count);
2116         if (error) {
2117                 free(hdr, M_TCPLOGDEV);
2118                 return (NULL);
2119         }
2120
2121         /* Free the entries. */
2122         tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2123         entry->tldl_count = 0;
2124
2125         memset(hdr, 0, sizeof(struct tcp_log_header));
2126         hdr->tlh_version = TCP_LOG_BUF_VER;
2127         hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR;
2128         hdr->tlh_length = end - (uint8_t *)hdr;
2129         hdr->tlh_ie = entry->tldl_ie;
2130         hdr->tlh_af = entry->tldl_af;
2131         getboottime(&hdr->tlh_offset);
2132         strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN);
2133         strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN);
2134         strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN);
2135         return ((struct tcp_log_common_header *)hdr);
2136 }
2137
2138 /*
2139  * Queue the tcpcb's log buffer for transmission via the log buffer facility.
2140  *
2141  * NOTE: This should be called with a write lock on the PCB.
2142  *
2143  * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2144  * and reacquire the INP lock if it needs to do so.
2145  *
2146  * If force is false, this will only dump auto-logged sessions if
2147  * tcp_log_auto_all is true or if there is a log ID defined for the session.
2148  */
2149 int
2150 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
2151 {
2152         struct tcp_log_dev_log_queue *entry;
2153         struct inpcb *inp;
2154 #ifdef TCPLOG_DEBUG_COUNTERS
2155         int num_entries;
2156 #endif
2157
2158         inp = tp->t_inpcb;
2159         INP_WLOCK_ASSERT(inp);
2160
2161         /* If there are no log entries, there is nothing to do. */
2162         if (tp->t_lognum == 0)
2163                 return (0);
2164
2165         /* Check for a log ID. */
2166         if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
2167             !tcp_log_auto_all && !force) {
2168                 struct tcp_log_mem *log_entry;
2169
2170                 /*
2171                  * We needed a log ID and none was found. Free the log entries
2172                  * and return success. Also, cancel further logging. If the
2173                  * session doesn't have a log ID by now, we'll assume it isn't
2174                  * going to get one.
2175                  */
2176                 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
2177                         tcp_log_remove_log_head(tp, log_entry);
2178                 KASSERT(tp->t_lognum == 0,
2179                     ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
2180                         __func__, tp->t_lognum));
2181                 tp->t_logstate = TCP_LOG_STATE_OFF;
2182                 return (0);
2183         }
2184
2185         /*
2186          * Allocate memory. If we must wait, we'll need to drop the locks
2187          * and reacquire them (and do all the related business that goes
2188          * along with that).
2189          */
2190         entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2191             M_NOWAIT);
2192         if (entry == NULL && (how & M_NOWAIT)) {
2193 #ifdef TCPLOG_DEBUG_COUNTERS
2194                 counter_u64_add(tcp_log_que_fail3, 1);
2195 #endif
2196                 return (ENOBUFS);
2197         }
2198         if (entry == NULL) {
2199                 INP_WUNLOCK(inp);
2200                 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2201                     M_TCPLOGDEV, M_WAITOK);
2202                 INP_WLOCK(inp);
2203                 /*
2204                  * Note that this check is slightly overly-restrictive in
2205                  * that the TCB can survive either of these events.
2206                  * However, there is currently not a good way to ensure
2207                  * that is the case. So, if we hit this M_WAIT path, we
2208                  * may end up dropping some entries. That seems like a
2209                  * small price to pay for safety.
2210                  */
2211                 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
2212                         free(entry, M_TCPLOGDEV);
2213 #ifdef TCPLOG_DEBUG_COUNTERS
2214                         counter_u64_add(tcp_log_que_fail2, 1);
2215 #endif
2216                         return (ECONNRESET);
2217                 }
2218                 tp = intotcpcb(inp);
2219                 if (tp->t_lognum == 0) {
2220                         free(entry, M_TCPLOGDEV);
2221                         return (0);
2222                 }
2223         }
2224
2225         /* Fill in the unique parts of the queue entry. */
2226         if (tp->t_lib != NULL) {
2227                 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
2228                 strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN);
2229         } else {
2230                 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN);
2231                 strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN);
2232         }
2233         if (reason != NULL)
2234                 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2235         else
2236                 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2237         entry->tldl_ie = inp->inp_inc.inc_ie;
2238         if (inp->inp_inc.inc_flags & INC_ISIPV6)
2239                 entry->tldl_af = AF_INET6;
2240         else
2241                 entry->tldl_af = AF_INET;
2242         entry->tldl_entries = tp->t_logs;
2243         entry->tldl_count = tp->t_lognum;
2244
2245         /* Fill in the common parts of the queue entry. */
2246         entry->tldl_common.tldq_buf = NULL;
2247         entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2248         entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2249
2250         /* Clear the log data from the TCPCB. */
2251 #ifdef TCPLOG_DEBUG_COUNTERS
2252         num_entries = tp->t_lognum;
2253 #endif
2254         tp->t_lognum = 0;
2255         STAILQ_INIT(&tp->t_logs);
2256
2257         /* Add the entry. If no one is listening, free the entry. */
2258         if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) {
2259                 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2260 #ifdef TCPLOG_DEBUG_COUNTERS
2261                 counter_u64_add(tcp_log_que_fail1, num_entries);
2262         } else {
2263                 counter_u64_add(tcp_log_queued, num_entries);
2264 #endif
2265         }
2266         return (0);
2267 }
2268
2269 /*
2270  * Queue the log_id_node's log buffers for transmission via the log buffer
2271  * facility.
2272  *
2273  * NOTE: This should be called with the bucket locked and referenced.
2274  *
2275  * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2276  * and reacquire the bucket lock if it needs to do so. (The caller must
2277  * ensure that the tln is no longer on any lists so no one else will mess
2278  * with this while the lock is dropped!)
2279  */
2280 static int
2281 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how)
2282 {
2283         struct tcp_log_dev_log_queue *entry;
2284         struct tcp_log_id_bucket *tlb;
2285
2286         tlb = tln->tln_bucket;
2287         TCPID_BUCKET_LOCK_ASSERT(tlb);
2288         KASSERT(tlb->tlb_refcnt > 0,
2289             ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)",
2290             __func__, __LINE__, tln, tlb));
2291         KASSERT(tln->tln_closed,
2292             ("%s:%d: Called for node with tln_closed==false (tln=%p)",
2293             __func__, __LINE__, tln));
2294
2295         /* If there are no log entries, there is nothing to do. */
2296         if (tln->tln_count == 0)
2297                 return (0);
2298
2299         /*
2300          * Allocate memory. If we must wait, we'll need to drop the locks
2301          * and reacquire them (and do all the related business that goes
2302          * along with that).
2303          */
2304         entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2305             M_NOWAIT);
2306         if (entry == NULL && (how & M_NOWAIT))
2307                 return (ENOBUFS);
2308         if (entry == NULL) {
2309                 TCPID_BUCKET_UNLOCK(tlb);
2310                 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2311                     M_TCPLOGDEV, M_WAITOK);
2312                 TCPID_BUCKET_LOCK(tlb);
2313         }
2314
2315         /* Fill in the common parts of the queue entry.. */
2316         entry->tldl_common.tldq_buf = NULL;
2317         entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2318         entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2319
2320         /* Fill in the unique parts of the queue entry. */
2321         strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN);
2322         strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN);
2323         if (reason != NULL)
2324                 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2325         else
2326                 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2327         entry->tldl_ie = tln->tln_ie;
2328         entry->tldl_entries = tln->tln_entries;
2329         entry->tldl_count = tln->tln_count;
2330         entry->tldl_af = tln->tln_af;
2331
2332         /* Add the entry. If no one is listening, free the entry. */
2333         if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry))
2334                 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2335
2336         return (0);
2337 }
2338
2339
2340 /*
2341  * Queue the log buffers for all sessions in a bucket for transmissions via
2342  * the log buffer facility.
2343  *
2344  * NOTE: This should be called with a locked bucket; however, the function
2345  * will drop the lock.
2346  */
2347 #define LOCAL_SAVE      10
2348 static void
2349 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
2350 {
2351         struct tcp_log_id_node local_entries[LOCAL_SAVE];
2352         struct inpcb *inp;
2353         struct tcpcb *tp;
2354         struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln;
2355         int i, num_local_entries, tree_locked;
2356         bool expireq_locked;
2357
2358         TCPID_BUCKET_LOCK_ASSERT(tlb);
2359
2360         /*
2361          * Take a reference on the bucket to keep it from disappearing until
2362          * we are done.
2363          */
2364         TCPID_BUCKET_REF(tlb);
2365
2366         /*
2367          * We'll try to create these without dropping locks. However, we
2368          * might very well need to drop locks to get memory. If that's the
2369          * case, we'll save up to 10 on the stack, and sacrifice the rest.
2370          * (Otherwise, we need to worry about finding our place again in a
2371          * potentially changed list. It just doesn't seem worth the trouble
2372          * to do that.
2373          */
2374         expireq_locked = false;
2375         num_local_entries = 0;
2376         prev_tln = NULL;
2377         tree_locked = TREE_UNLOCKED;
2378         SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) {
2379                 /*
2380                  * If this isn't associated with a TCPCB, we can pull it off
2381                  * the list now. We need to be careful that the expire timer
2382                  * hasn't already taken ownership (tln_expiretime == SBT_MAX).
2383                  * If so, we let the expire timer code free the data.
2384                  */
2385                 if (cur_tln->tln_closed) {
2386 no_inp:
2387                         /*
2388                          * Get the expireq lock so we can get a consistent
2389                          * read of tln_expiretime and so we can remove this
2390                          * from the expireq.
2391                          */
2392                         if (!expireq_locked) {
2393                                 TCPLOG_EXPIREQ_LOCK();
2394                                 expireq_locked = true;
2395                         }
2396
2397                         /*
2398                          * We ignore entries with tln_expiretime == SBT_MAX.
2399                          * The expire timer code already owns those.
2400                          */
2401                         KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0,
2402                             ("%s:%d: node on the expire queue without positive "
2403                             "expire time", __func__, __LINE__));
2404                         if (cur_tln->tln_expiretime == SBT_MAX) {
2405                                 prev_tln = cur_tln;
2406                                 continue;
2407                         }
2408
2409                         /* Remove the entry from the expireq. */
2410                         STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln,
2411                             tcp_log_id_node, tln_expireq);
2412
2413                         /* Remove the entry from the bucket. */
2414                         if (prev_tln != NULL)
2415                                 SLIST_REMOVE_AFTER(prev_tln, tln_list);
2416                         else
2417                                 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list);
2418
2419                         /*
2420                          * Drop the INP and bucket reference counts. Due to
2421                          * lock-ordering rules, we need to drop the expire
2422                          * queue lock.
2423                          */
2424                         TCPLOG_EXPIREQ_UNLOCK();
2425                         expireq_locked = false;
2426
2427                         /* Drop the INP reference. */
2428                         INP_WLOCK(cur_tln->tln_inp);
2429                         if (!in_pcbrele_wlocked(cur_tln->tln_inp))
2430                                 INP_WUNLOCK(cur_tln->tln_inp);
2431
2432                         if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2433 #ifdef INVARIANTS
2434                                 panic("%s: Bucket refcount unexpectedly 0.",
2435                                     __func__);
2436 #endif
2437                                 /*
2438                                  * Recover as best we can: free the entry we
2439                                  * own.
2440                                  */
2441                                 tcp_log_free_entries(&cur_tln->tln_entries,
2442                                     &cur_tln->tln_count);
2443                                 uma_zfree(tcp_log_node_zone, cur_tln);
2444                                 goto done;
2445                         }
2446
2447                         if (tcp_log_dump_node_logbuf(cur_tln, reason,
2448                             M_NOWAIT)) {
2449                                 /*
2450                                  * If we have sapce, save the entries locally.
2451                                  * Otherwise, free them.
2452                                  */
2453                                 if (num_local_entries < LOCAL_SAVE) {
2454                                         local_entries[num_local_entries] =
2455                                             *cur_tln;
2456                                         num_local_entries++;
2457                                 } else {
2458                                         tcp_log_free_entries(
2459                                             &cur_tln->tln_entries,
2460                                             &cur_tln->tln_count);
2461                                 }
2462                         }
2463
2464                         /* No matter what, we are done with the node now. */
2465                         uma_zfree(tcp_log_node_zone, cur_tln);
2466
2467                         /*
2468                          * Because we removed this entry from the list, prev_tln
2469                          * (which tracks the previous entry still on the tlb
2470                          * list) remains unchanged.
2471                          */
2472                         continue;
2473                 }
2474
2475                 /*
2476                  * If we get to this point, the session data is still held in
2477                  * the TCPCB. So, we need to pull the data out of that.
2478                  *
2479                  * We will need to drop the expireq lock so we can lock the INP.
2480                  * We can then try to extract the data the "easy" way. If that
2481                  * fails, we'll save the log entries for later.
2482                  */
2483                 if (expireq_locked) {
2484                         TCPLOG_EXPIREQ_UNLOCK();
2485                         expireq_locked = false;
2486                 }
2487
2488                 /* Lock the INP and then re-check the state. */
2489                 inp = cur_tln->tln_inp;
2490                 INP_WLOCK(inp);
2491                 /*
2492                  * If we caught this while it was transitioning, the data
2493                  * might have moved from the TCPCB to the tln (signified by
2494                  * setting tln_closed to true. If so, treat this like an
2495                  * inactive connection.
2496                  */
2497                 if (cur_tln->tln_closed) {
2498                         /*
2499                          * It looks like we may have caught this connection
2500                          * while it was transitioning from active to inactive.
2501                          * Treat this like an inactive connection.
2502                          */
2503                         INP_WUNLOCK(inp);
2504                         goto no_inp;
2505                 }
2506
2507                 /*
2508                  * Try to dump the data from the tp without dropping the lock.
2509                  * If this fails, try to save off the data locally.
2510                  */
2511                 tp = cur_tln->tln_tp;
2512                 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) &&
2513                     num_local_entries < LOCAL_SAVE) {
2514                         tcp_log_move_tp_to_node(tp,
2515                             &local_entries[num_local_entries]);
2516                         local_entries[num_local_entries].tln_closed = 1;
2517                         KASSERT(local_entries[num_local_entries].tln_bucket ==
2518                             tlb, ("%s: %d: bucket mismatch for node %p",
2519                             __func__, __LINE__, cur_tln));
2520                         num_local_entries++;
2521                 }
2522
2523                 INP_WUNLOCK(inp);
2524
2525                 /*
2526                  * We are goint to leave the current tln on the list. It will
2527                  * become the previous tln.
2528                  */
2529                 prev_tln = cur_tln;
2530         }
2531
2532         /* Drop our locks, if any. */
2533         KASSERT(tree_locked == TREE_UNLOCKED,
2534             ("%s: %d: tree unexpectedly locked", __func__, __LINE__));
2535         switch (tree_locked) {
2536         case TREE_WLOCKED:
2537                 TCPID_TREE_WUNLOCK();
2538                 tree_locked = TREE_UNLOCKED;
2539                 break;
2540         case TREE_RLOCKED:
2541                 TCPID_TREE_RUNLOCK();
2542                 tree_locked = TREE_UNLOCKED;
2543                 break;
2544         }
2545         if (expireq_locked) {
2546                 TCPLOG_EXPIREQ_UNLOCK();
2547                 expireq_locked = false;
2548         }
2549
2550         /*
2551          * Try again for any saved entries. tcp_log_dump_node_logbuf() is
2552          * guaranteed to free the log entries within the node. And, since
2553          * the node itself is on our stack, we don't need to free it.
2554          */
2555         for (i = 0; i < num_local_entries; i++)
2556                 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK);
2557
2558         /* Drop our reference. */
2559         if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
2560                 TCPID_BUCKET_UNLOCK(tlb);
2561
2562 done:
2563         /* Drop our locks, if any. */
2564         switch (tree_locked) {
2565         case TREE_WLOCKED:
2566                 TCPID_TREE_WUNLOCK();
2567                 break;
2568         case TREE_RLOCKED:
2569                 TCPID_TREE_RUNLOCK();
2570                 break;
2571         }
2572         if (expireq_locked)
2573                 TCPLOG_EXPIREQ_UNLOCK();
2574 }
2575 #undef  LOCAL_SAVE
2576
2577
2578 /*
2579  * Queue the log buffers for all sessions in a bucket for transmissions via
2580  * the log buffer facility.
2581  *
2582  * NOTE: This should be called with a locked INP; however, the function
2583  * will drop the lock.
2584  */
2585 void
2586 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
2587 {
2588         struct tcp_log_id_bucket *tlb;
2589         int tree_locked;
2590
2591         /* Figure out our bucket and lock it. */
2592         INP_WLOCK_ASSERT(tp->t_inpcb);
2593         tlb = tp->t_lib;
2594         if (tlb == NULL) {
2595                 /*
2596                  * No bucket; treat this like a request to dump a single
2597                  * session's traces.
2598                  */
2599                 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
2600                 INP_WUNLOCK(tp->t_inpcb);
2601                 return;
2602         }
2603         TCPID_BUCKET_REF(tlb);
2604         INP_WUNLOCK(tp->t_inpcb);
2605         TCPID_BUCKET_LOCK(tlb);
2606
2607         /* If we are the last reference, we have nothing more to do here. */
2608         tree_locked = TREE_UNLOCKED;
2609         if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2610                 switch (tree_locked) {
2611                 case TREE_WLOCKED:
2612                         TCPID_TREE_WUNLOCK();
2613                         break;
2614                 case TREE_RLOCKED:
2615                         TCPID_TREE_RUNLOCK();
2616                         break;
2617                 }
2618                 return;
2619         }
2620
2621         /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
2622         tcp_log_dumpbucketlogs(tlb, reason);
2623 }
2624
2625 /*
2626  * Mark the end of a flow with the current stack. A stack can add
2627  * stack-specific info to this trace event by overriding this
2628  * function (see bbr_log_flowend() for example).
2629  */
2630 void
2631 tcp_log_flowend(struct tcpcb *tp)
2632 {
2633         if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2634                 struct socket *so = tp->t_inpcb->inp_socket;
2635                 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
2636                                 TCP_LOG_FLOWEND, 0, 0, NULL, false);
2637         }
2638 }
2639