2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2016-2018 Netflix, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/qmath.h>
39 #include <sys/queue.h>
40 #include <sys/refcount.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sysctl.h>
46 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
47 #include <sys/counter.h>
49 #include <dev/tcp_log/tcp_log_dev.h>
52 #include <net/if_var.h>
55 #include <netinet/in.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/tcp_log_buf.h>
61 /* Default expiry time */
62 #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S)
64 /* Max interval at which to run the expiry timer */
65 #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S)
68 static uma_zone_t tcp_log_bucket_zone, tcp_log_node_zone, tcp_log_zone;
69 static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT;
70 static uint32_t tcp_log_version = TCP_LOG_BUF_VER;
71 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket);
72 static struct tcp_log_id_tree tcp_log_id_head;
73 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head =
74 STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head);
75 static struct mtx tcp_log_expireq_mtx;
76 static struct callout tcp_log_expireq_callout;
77 static u_long tcp_log_auto_ratio = 0;
78 static volatile u_long tcp_log_auto_ratio_cur = 0;
79 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL;
80 static bool tcp_log_auto_all = false;
81 static uint32_t tcp_disable_all_bb_logs = 0;
83 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
85 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
86 "TCP Black Box controls");
88 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose,
89 0, "Force verbose logging for TCP traces");
91 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit,
92 CTLFLAG_RW, &tcp_log_session_limit, 0,
93 "Maximum number of events maintained for each TCP session");
95 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW,
96 &tcp_log_zone, "Maximum number of events maintained for all TCP sessions");
98 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD,
99 &tcp_log_zone, "Current number of events maintained for all TCP sessions");
101 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW,
102 &tcp_log_bucket_zone, "Maximum number of log IDs");
104 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD,
105 &tcp_log_bucket_zone, "Current number of log IDs");
107 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW,
108 &tcp_log_node_zone, "Maximum number of tcpcbs with log IDs");
110 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD,
111 &tcp_log_node_zone, "Current number of tcpcbs with log IDs");
113 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version,
114 0, "Version of log formats exported");
116 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW,
117 &tcp_disable_all_bb_logs, TCP_LOG_STATE_HEAD_AUTO,
118 "Disable all BB logging for all connections");
120 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW,
121 &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions");
123 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW,
124 &tcp_log_auto_mode, TCP_LOG_STATE_HEAD_AUTO,
125 "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_HEAD_AUTO)");
127 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW,
128 &tcp_log_auto_all, false,
129 "Auto-select from all sessions (rather than just those with IDs)");
131 #ifdef TCPLOG_DEBUG_COUNTERS
132 counter_u64_t tcp_log_queued;
133 counter_u64_t tcp_log_que_fail1;
134 counter_u64_t tcp_log_que_fail2;
135 counter_u64_t tcp_log_que_fail3;
136 counter_u64_t tcp_log_que_fail4;
137 counter_u64_t tcp_log_que_fail5;
138 counter_u64_t tcp_log_que_copyout;
139 counter_u64_t tcp_log_que_read;
140 counter_u64_t tcp_log_que_freed;
142 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD,
143 &tcp_log_queued, "Number of entries queued");
144 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD,
145 &tcp_log_que_fail1, "Number of entries queued but fail 1");
146 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD,
147 &tcp_log_que_fail2, "Number of entries queued but fail 2");
148 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD,
149 &tcp_log_que_fail3, "Number of entries queued but fail 3");
150 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD,
151 &tcp_log_que_fail4, "Number of entries queued but fail 4");
152 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD,
153 &tcp_log_que_fail5, "Number of entries queued but fail 4");
154 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD,
155 &tcp_log_que_copyout, "Number of entries copied out");
156 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD,
157 &tcp_log_que_read, "Number of entries read from the queue");
158 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD,
159 &tcp_log_que_freed, "Number of entries freed after reading");
163 #define TCPLOG_DEBUG_RINGBUF
165 /* Number of requests to consider a PBCID "active". */
166 #define ACTIVE_REQUEST_COUNT 10
168 /* Statistic tracking for "active" PBCIDs. */
169 static counter_u64_t tcp_log_pcb_ids_cur;
170 static counter_u64_t tcp_log_pcb_ids_tot;
172 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD,
173 &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system");
174 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD,
175 &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated");
179 STAILQ_ENTRY(tcp_log_mem) tlm_queue;
180 struct tcp_log_buffer tlm_buf;
181 struct tcp_log_verbose tlm_v;
182 #ifdef TCPLOG_DEBUG_RINGBUF
183 volatile int tlm_refcnt;
187 /* 60 bytes for the header, + 16 bytes for padding */
188 static uint8_t zerobuf[76];
197 * A. You need a lock on the Tree to add/remove buckets.
198 * B. You need a lock on the bucket to add/remove nodes from the bucket.
199 * C. To change information in a node, you need the INP lock if the tln_closed
200 * field is false. Otherwise, you need the bucket lock. (Note that the
201 * tln_closed field can change at any point, so you need to recheck the
202 * entry after acquiring the INP lock.)
203 * D. To remove a node from the bucket, you must have that entry locked,
204 * according to the criteria of Rule C. Also, the node must not be on
206 * E. The exception to C is the expiry queue fields, which are locked by
207 * the TCPLOG_EXPIREQ lock.
209 * Buckets have a reference count. Each node is a reference. Further,
210 * other callers may add reference counts to keep a bucket from disappearing.
211 * You can add a reference as long as you own a lock sufficient to keep the
212 * bucket from disappearing. For example, a common use is:
213 * a. Have a locked INP, but need to lock the TCPID_BUCKET.
214 * b. Add a refcount on the bucket. (Safe because the INP lock prevents
215 * the TCPID_BUCKET from going away.)
216 * c. Drop the INP lock.
217 * d. Acquire a lock on the TCPID_BUCKET.
218 * e. Acquire a lock on the INP.
219 * f. Drop the refcount on the bucket.
220 * (At this point, the bucket may disappear.)
223 * You can acquire this with either the bucket or INP lock. Don't reverse it.
224 * When the expire code has committed to freeing a node, it resets the expiry
225 * time to SBT_MAX. That is the signal to everyone else that they should
226 * leave that node alone.
228 static struct rwlock tcp_id_tree_lock;
229 #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock)
230 #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock)
231 #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock)
232 #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock)
233 #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock)
234 #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED)
235 #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED)
236 #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED)
238 #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF)
239 #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx))
240 #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx))
241 #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx))
242 #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED)
243 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED)
245 #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt))
246 #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt))
248 #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx)
249 #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx)
251 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node);
253 struct tcp_log_id_bucket
256 * tlb_id must be first. This lets us use strcmp on
257 * (struct tcp_log_id_bucket *) and (char *) interchangeably.
259 char tlb_id[TCP_LOG_ID_LEN];
260 char tlb_tag[TCP_LOG_TAG_LEN];
261 RB_ENTRY(tcp_log_id_bucket) tlb_rb;
262 struct tcp_log_id_head tlb_head;
264 volatile u_int tlb_refcnt;
265 volatile u_int tlb_reqcnt;
266 uint32_t tlb_loglimit;
267 uint8_t tlb_logstate;
270 struct tcp_log_id_node
272 SLIST_ENTRY(tcp_log_id_node) tln_list;
273 STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */
274 sbintime_t tln_expiretime; /* Locked by the expireq lock */
277 * If INP is NULL, that means the connection has closed. We've
278 * saved the connection endpoint information and the log entries
279 * in the tln_ie and tln_entries members. We've also saved a pointer
280 * to the enclosing bucket here. If INP is not NULL, the information is
281 * in the PCB and not here.
283 struct inpcb *tln_inp;
284 struct tcpcb *tln_tp;
285 struct tcp_log_id_bucket *tln_bucket;
286 struct in_endpoints tln_ie;
287 struct tcp_log_stailq tln_entries;
289 volatile int tln_closed;
293 enum tree_lock_state {
299 /* Do we want to select this session for auto-logging? */
301 tcp_log_selectauto(void)
305 * If we are doing auto-capturing, figure out whether we will capture
308 if (tcp_log_auto_ratio &&
309 (tcp_disable_all_bb_logs == 0) &&
310 (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
311 tcp_log_auto_ratio) == 0)
317 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b)
319 KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL"));
320 KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL"));
321 return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN);
324 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
327 tcp_log_id_validate_tree_lock(int tree_locked)
331 switch (tree_locked) {
333 TCPID_TREE_WLOCK_ASSERT();
336 TCPID_TREE_RLOCK_ASSERT();
339 TCPID_TREE_UNLOCK_ASSERT();
342 kassert_panic("%s:%d: unknown tree lock state", __func__,
349 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb)
352 TCPID_TREE_WLOCK_ASSERT();
353 KASSERT(SLIST_EMPTY(&tlb->tlb_head),
354 ("%s: Attempt to remove non-empty bucket", __func__));
355 if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) {
357 kassert_panic("%s:%d: error removing element from tree",
361 TCPID_BUCKET_LOCK_DESTROY(tlb);
362 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1);
363 uma_zfree(tcp_log_bucket_zone, tlb);
367 * Call with a referenced and locked bucket.
368 * Will return true if the bucket was freed; otherwise, false.
369 * tlb: The bucket to unreference.
370 * tree_locked: A pointer to the state of the tree lock. If the tree lock
371 * state changes, the function will update it.
372 * inp: If not NULL and the function needs to drop the inp lock to relock the
373 * tree, it will do so. (The caller must ensure inp will not become invalid,
374 * probably by holding a reference to it.)
377 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked,
381 KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__));
382 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
385 tcp_log_id_validate_tree_lock(*tree_locked);
388 * Did we hold the last reference on the tlb? If so, we may need
389 * to free it. (Note that we can realistically only execute the
390 * loop twice: once without a write lock and once with a write
393 while (TCPID_BUCKET_UNREF(tlb)) {
395 * We need a write lock on the tree to free this.
396 * If we can upgrade the tree lock, this is "easy". If we
397 * can't upgrade the tree lock, we need to do this the
398 * "hard" way: unwind all our locks and relock everything.
399 * In the meantime, anything could have changed. We even
400 * need to validate that we still need to free the bucket.
402 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE())
403 *tree_locked = TREE_WLOCKED;
404 else if (*tree_locked != TREE_WLOCKED) {
405 TCPID_BUCKET_REF(tlb);
408 TCPID_BUCKET_UNLOCK(tlb);
409 if (*tree_locked == TREE_RLOCKED)
410 TCPID_TREE_RUNLOCK();
412 *tree_locked = TREE_WLOCKED;
413 TCPID_BUCKET_LOCK(tlb);
420 * We have an empty bucket and a write lock on the tree.
421 * Remove the empty bucket.
423 tcp_log_remove_bucket(tlb);
430 * Call with a locked bucket. This function will release the lock on the
431 * bucket before returning.
433 * The caller is responsible for freeing the tp->t_lin/tln node!
435 * Note: one of tp or both tlb and tln must be supplied.
437 * inp: A pointer to the inp. If the function needs to drop the inp lock to
438 * acquire the tree write lock, it will do so. (The caller must ensure inp
439 * will not become invalid, probably by holding a reference to it.)
440 * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored)
441 * tlb: A pointer to the bucket. (optional; ignored if tp is specified)
442 * tln: A pointer to the node. (optional; ignored if tp is specified)
443 * tree_locked: A pointer to the state of the tree lock. If the tree lock
444 * state changes, the function will update it.
446 * Will return true if the INP lock was reacquired; otherwise, false.
449 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp,
450 struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln,
453 int orig_tree_locked;
455 KASSERT(tp != NULL || (tlb != NULL && tln != NULL),
456 ("%s: called with tp=%p, tlb=%p, tln=%p", __func__,
458 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
464 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__));
465 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__));
468 tcp_log_id_validate_tree_lock(*tree_locked);
469 TCPID_BUCKET_LOCK_ASSERT(tlb);
472 * Remove the node, clear the log bucket and node from the TCPCB, and
473 * decrement the bucket refcount. In the process, if this is the
474 * last reference, the bucket will be freed.
476 SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list);
481 orig_tree_locked = *tree_locked;
482 if (!tcp_log_unref_bucket(tlb, tree_locked, inp))
483 TCPID_BUCKET_UNLOCK(tlb);
484 return (*tree_locked != orig_tree_locked);
487 #define RECHECK_INP_CLEAN(cleanup) do { \
488 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { \
493 tp = intotcpcb(inp); \
496 #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */)
499 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
502 INP_WLOCK_ASSERT(tp->t_inpcb);
505 if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
506 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id));
511 tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb)
514 atomic_fetchadd_int(&tlb->tlb_reqcnt, 1);
518 * Associate the specified tag with a particular TCP log ID.
519 * Called with INPCB locked. Returns with it unlocked.
520 * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID.
523 tcp_log_set_tag(struct tcpcb *tp, char *tag)
525 struct tcp_log_id_bucket *tlb;
528 INP_WLOCK_ASSERT(tp->t_inpcb);
530 tree_locked = TREE_UNLOCKED;
533 INP_WUNLOCK(tp->t_inpcb);
537 TCPID_BUCKET_REF(tlb);
538 INP_WUNLOCK(tp->t_inpcb);
539 TCPID_BUCKET_LOCK(tlb);
540 strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN);
541 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
542 TCPID_BUCKET_UNLOCK(tlb);
544 if (tree_locked == TREE_WLOCKED) {
545 TCPID_TREE_WLOCK_ASSERT();
546 TCPID_TREE_WUNLOCK();
547 } else if (tree_locked == TREE_RLOCKED) {
548 TCPID_TREE_RLOCK_ASSERT();
549 TCPID_TREE_RUNLOCK();
551 TCPID_TREE_UNLOCK_ASSERT();
557 * Set the TCP log ID for a TCPCB.
558 * Called with INPCB locked. Returns with it unlocked.
561 tcp_log_set_id(struct tcpcb *tp, char *id)
563 struct tcp_log_id_bucket *tlb, *tmp_tlb;
564 struct tcp_log_id_node *tln;
572 tree_locked = TREE_UNLOCKED;
573 bucket_locked = false;
576 INP_WLOCK_ASSERT(inp);
578 /* See if the ID is unchanged. */
579 if ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) ||
580 (tp->t_lib == NULL && *id == 0)) {
581 if (tp->t_lib != NULL) {
582 tcp_log_increment_reqcnt(tp->t_lib);
583 if ((tp->t_lib->tlb_logstate) &&
584 (tp->t_log_state_set == 0)) {
585 /* Clone in any logging */
587 tp->t_logstate = tp->t_lib->tlb_logstate;
589 if ((tp->t_lib->tlb_loglimit) &&
590 (tp->t_log_state_set == 0)) {
591 /* We also have a limit set */
593 tp->t_loglimit = tp->t_lib->tlb_loglimit;
601 * If the TCPCB had a previous ID, we need to extricate it from
604 * Drop the TCPCB lock and lock the tree and the bucket.
605 * Because this is called in the socket context, we (theoretically)
606 * don't need to worry about the INPCB completely going away
609 if (tp->t_lib != NULL) {
611 TCPID_BUCKET_REF(tlb);
614 if (tree_locked == TREE_UNLOCKED) {
616 tree_locked = TREE_RLOCKED;
618 TCPID_BUCKET_LOCK(tlb);
619 bucket_locked = true;
623 * Unreference the bucket. If our bucket went away, it is no
624 * longer locked or valid.
626 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) {
627 bucket_locked = false;
631 /* Validate the INP. */
635 * Evaluate whether the bucket changed while we were unlocked.
637 * Possible scenarios here:
638 * 1. Bucket is unchanged and the same one we started with.
639 * 2. The TCPCB no longer has a bucket and our bucket was
641 * 3. The TCPCB has a new bucket, whether ours was freed.
642 * 4. The TCPCB no longer has a bucket and our bucket was
645 * In cases 2-4, we will start over. In case 1, we will
646 * proceed here to remove the bucket.
648 if (tlb == NULL || tp->t_lib != tlb) {
649 KASSERT(bucket_locked || tlb == NULL,
650 ("%s: bucket_locked (%d) and tlb (%p) are "
651 "inconsistent", __func__, bucket_locked, tlb));
654 TCPID_BUCKET_UNLOCK(tlb);
655 bucket_locked = false;
662 * Store the (struct tcp_log_id_node) for reuse. Then, remove
663 * it from the bucket. In the process, we may end up relocking.
664 * If so, we need to validate that the INP is still valid, and
665 * the TCPCB entries match we expect.
667 * We will clear tlb and change the bucket_locked state just
668 * before calling tcp_log_remove_id_node(), since that function
669 * will unlock the bucket.
672 uma_zfree(tcp_log_node_zone, tln);
675 bucket_locked = false;
676 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) {
680 * If the TCPCB moved to a new bucket while we had
681 * dropped the lock, restart.
683 if (tp->t_lib != NULL || tp->t_lin != NULL)
688 * Yay! We successfully removed the TCPCB from its old
691 * On to bigger and better things...
695 /* At this point, the TCPCB should not be in any bucket. */
696 KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__));
699 * If the new ID is not empty, we need to now assign this TCPCB to a
703 /* Get a new tln, if we don't already have one to reuse. */
705 tln = uma_zalloc(tcp_log_node_zone, M_NOWAIT | M_ZERO);
715 * Drop the INP lock for a bit. We don't need it, and dropping
716 * it prevents lock order reversals.
720 /* Make sure we have at least a read lock on the tree. */
721 tcp_log_id_validate_tree_lock(tree_locked);
722 if (tree_locked == TREE_UNLOCKED) {
724 tree_locked = TREE_RLOCKED;
729 * Remember that we constructed (struct tcp_log_id_node) so
730 * we can safely cast the id to it for the purposes of finding.
732 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
733 __func__, __LINE__));
734 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
735 (struct tcp_log_id_bucket *) id);
738 * If we didn't find a matching bucket, we need to add a new
739 * one. This requires a write lock. But, of course, we will
740 * need to recheck some things when we re-acquire the lock.
742 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) {
743 tree_locked = TREE_WLOCKED;
744 if (!TCPID_TREE_UPGRADE()) {
745 TCPID_TREE_RUNLOCK();
749 * The tree may have changed while we were
756 /* If we need to add a new bucket, do it now. */
757 if (tmp_tlb == NULL) {
758 /* Allocate new bucket. */
759 tlb = uma_zalloc(tcp_log_bucket_zone, M_NOWAIT);
764 counter_u64_add(tcp_log_pcb_ids_cur, 1);
765 counter_u64_add(tcp_log_pcb_ids_tot, 1);
767 if ((tcp_log_auto_all == false) &&
769 tcp_log_selectauto()) {
770 /* Save off the log state */
771 tlb->tlb_logstate = tcp_log_auto_mode;
773 tlb->tlb_logstate = TCP_LOG_STATE_OFF;
774 tlb->tlb_loglimit = 0;
775 tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */
778 * Copy the ID to the bucket.
779 * NB: Don't use strlcpy() unless you are sure
780 * we've always validated NULL termination.
782 * TODO: When I'm done writing this, see if we
783 * we have correctly validated NULL termination and
784 * can use strlcpy(). :-)
786 strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1);
787 tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0';
790 * Take the refcount for the first node and go ahead
791 * and lock this. Note that we zero the tlb_mtx
792 * structure, since 0xdeadc0de flips the right bits
793 * for the code to think that this mutex has already
794 * been initialized. :-(
796 SLIST_INIT(&tlb->tlb_head);
797 refcount_init(&tlb->tlb_refcnt, 1);
799 memset(&tlb->tlb_mtx, 0, sizeof(struct mtx));
800 TCPID_BUCKET_LOCK_INIT(tlb);
801 TCPID_BUCKET_LOCK(tlb);
802 bucket_locked = true;
804 #define FREE_NEW_TLB() do { \
805 TCPID_BUCKET_LOCK_DESTROY(tlb); \
806 uma_zfree(tcp_log_bucket_zone, tlb); \
807 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); \
808 counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1); \
809 bucket_locked = false; \
813 * Relock the INP and make sure we are still
817 RECHECK_INP_CLEAN(FREE_NEW_TLB());
818 if (tp->t_lib != NULL) {
823 /* Add the new bucket to the tree. */
824 tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head,
826 KASSERT(tmp_tlb == NULL,
827 ("%s: Unexpected conflicting bucket (%p) while "
828 "adding new bucket (%p)", __func__, tmp_tlb, tlb));
831 * If we found a conflicting bucket, free the new
832 * one we made and fall through to use the existing
835 if (tmp_tlb != NULL) {
842 /* If we found an existing bucket, use it. */
843 if (tmp_tlb != NULL) {
845 TCPID_BUCKET_LOCK(tlb);
846 bucket_locked = true;
849 * Relock the INP and make sure we are still
852 INP_UNLOCK_ASSERT(inp);
855 if (tp->t_lib != NULL) {
856 TCPID_BUCKET_UNLOCK(tlb);
857 bucket_locked = false;
862 /* Take a reference on the bucket. */
863 TCPID_BUCKET_REF(tlb);
865 /* Record the request. */
866 tcp_log_increment_reqcnt(tlb);
869 tcp_log_grow_tlb(tlb->tlb_id, tp);
871 /* Add the new node to the list. */
872 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list);
875 if (tp->t_lib->tlb_logstate) {
876 /* Clone in any logging */
878 tp->t_logstate = tp->t_lib->tlb_logstate;
880 if (tp->t_lib->tlb_loglimit) {
881 /* The loglimit too */
883 tp->t_loglimit = tp->t_lib->tlb_loglimit;
891 /* Unlock things, as needed, and return. */
894 INP_UNLOCK_ASSERT(inp);
896 TCPID_BUCKET_LOCK_ASSERT(tlb);
897 TCPID_BUCKET_UNLOCK(tlb);
898 } else if (tlb != NULL)
899 TCPID_BUCKET_UNLOCK_ASSERT(tlb);
900 if (tree_locked == TREE_WLOCKED) {
901 TCPID_TREE_WLOCK_ASSERT();
902 TCPID_TREE_WUNLOCK();
903 } else if (tree_locked == TREE_RLOCKED) {
904 TCPID_TREE_RLOCK_ASSERT();
905 TCPID_TREE_RUNLOCK();
907 TCPID_TREE_UNLOCK_ASSERT();
909 uma_zfree(tcp_log_node_zone, tln);
914 * Get the TCP log ID for a TCPCB.
915 * Called with INPCB locked.
916 * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long.
917 * Returns number of bytes copied.
920 tcp_log_get_id(struct tcpcb *tp, char *buf)
924 INP_LOCK_ASSERT(tp->t_inpcb);
925 if (tp->t_lib != NULL) {
926 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
927 KASSERT(len < TCP_LOG_ID_LEN,
928 ("%s:%d: tp->t_lib->tlb_id too long (%zu)",
929 __func__, __LINE__, len));
938 * Get the tag associated with the TCPCB's log ID.
939 * Called with INPCB locked. Returns with it unlocked.
940 * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long.
941 * Returns number of bytes copied.
944 tcp_log_get_tag(struct tcpcb *tp, char *buf)
946 struct tcp_log_id_bucket *tlb;
950 INP_WLOCK_ASSERT(tp->t_inpcb);
952 tree_locked = TREE_UNLOCKED;
956 TCPID_BUCKET_REF(tlb);
957 INP_WUNLOCK(tp->t_inpcb);
958 TCPID_BUCKET_LOCK(tlb);
959 len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN);
960 KASSERT(len < TCP_LOG_TAG_LEN,
961 ("%s:%d: tp->t_lib->tlb_tag too long (%zu)",
962 __func__, __LINE__, len));
963 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
964 TCPID_BUCKET_UNLOCK(tlb);
966 if (tree_locked == TREE_WLOCKED) {
967 TCPID_TREE_WLOCK_ASSERT();
968 TCPID_TREE_WUNLOCK();
969 } else if (tree_locked == TREE_RLOCKED) {
970 TCPID_TREE_RLOCK_ASSERT();
971 TCPID_TREE_RUNLOCK();
973 TCPID_TREE_UNLOCK_ASSERT();
975 INP_WUNLOCK(tp->t_inpcb);
984 * Get number of connections with the same log ID.
985 * Log ID is taken from given TCPCB.
986 * Called with INPCB locked.
989 tcp_log_get_id_cnt(struct tcpcb *tp)
992 INP_WLOCK_ASSERT(tp->t_inpcb);
993 return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
996 #ifdef TCPLOG_DEBUG_RINGBUF
998 * Functions/macros to increment/decrement reference count for a log
999 * entry. This should catch when we do a double-free/double-remove or
1003 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func,
1008 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1);
1010 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)",
1011 func, line, log_entry, refcnt);
1013 #define tcp_log_entry_refcnt_add(l) \
1014 _tcp_log_entry_refcnt_add((l), __func__, __LINE__)
1017 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func,
1022 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1);
1024 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)",
1025 func, line, log_entry, refcnt);
1027 #define tcp_log_entry_refcnt_rem(l) \
1028 _tcp_log_entry_refcnt_rem((l), __func__, __LINE__)
1030 #else /* !TCPLOG_DEBUG_RINGBUF */
1032 #define tcp_log_entry_refcnt_add(l)
1033 #define tcp_log_entry_refcnt_rem(l)
1038 * Cleanup after removing a log entry, but only decrement the count if we
1039 * are running INVARIANTS.
1042 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused)
1045 uma_zfree(tcp_log_zone, log_entry);
1048 KASSERT(*count >= 0,
1049 ("%s: count unexpectedly negative", __func__));
1054 tcp_log_free_entries(struct tcp_log_stailq *head, int *count)
1056 struct tcp_log_mem *log_entry;
1058 /* Free the entries. */
1059 while ((log_entry = STAILQ_FIRST(head)) != NULL) {
1060 STAILQ_REMOVE_HEAD(head, tlm_queue);
1061 tcp_log_entry_refcnt_rem(log_entry);
1062 tcp_log_free_log_common(log_entry, count);
1066 /* Cleanup after removing a log entry. */
1068 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1070 uma_zfree(tcp_log_zone, log_entry);
1072 KASSERT(tp->t_lognum >= 0,
1073 ("%s: tp->t_lognum unexpectedly negative", __func__));
1076 /* Remove a log entry from the head of a list. */
1078 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1081 KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs),
1082 ("%s: attempt to remove non-HEAD log entry", __func__));
1083 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1084 tcp_log_entry_refcnt_rem(log_entry);
1085 tcp_log_remove_log_cleanup(tp, log_entry);
1088 #ifdef TCPLOG_DEBUG_RINGBUF
1090 * Initialize the log entry's reference count, which we want to
1091 * survive allocations.
1094 tcp_log_zone_init(void *mem, int size, int flags __unused)
1096 struct tcp_log_mem *tlm;
1098 KASSERT(size >= sizeof(struct tcp_log_mem),
1099 ("%s: unexpectedly short (%d) allocation", __func__, size));
1100 tlm = (struct tcp_log_mem *)mem;
1101 tlm->tlm_refcnt = 0;
1106 * Double check that the refcnt is zero on allocation and return.
1109 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused)
1111 struct tcp_log_mem *tlm;
1113 KASSERT(size >= sizeof(struct tcp_log_mem),
1114 ("%s: unexpectedly short (%d) allocation", __func__, size));
1115 tlm = (struct tcp_log_mem *)mem;
1116 if (tlm->tlm_refcnt != 0)
1117 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1118 __func__, __LINE__, tlm, tlm->tlm_refcnt);
1123 tcp_log_zone_dtor(void *mem, int size, void *args __unused)
1125 struct tcp_log_mem *tlm;
1127 KASSERT(size >= sizeof(struct tcp_log_mem),
1128 ("%s: unexpectedly short (%d) allocation", __func__, size));
1129 tlm = (struct tcp_log_mem *)mem;
1130 if (tlm->tlm_refcnt != 0)
1131 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1132 __func__, __LINE__, tlm, tlm->tlm_refcnt);
1134 #endif /* TCPLOG_DEBUG_RINGBUF */
1136 /* Do global initialization. */
1141 tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem),
1142 #ifdef TCPLOG_DEBUG_RINGBUF
1143 tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init,
1147 NULL, UMA_ALIGN_PTR, 0);
1148 (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT);
1149 tcp_log_bucket_zone = uma_zcreate("tcp_log_bucket",
1150 sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL,
1152 tcp_log_node_zone = uma_zcreate("tcp_log_node",
1153 sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL,
1155 #ifdef TCPLOG_DEBUG_COUNTERS
1156 tcp_log_queued = counter_u64_alloc(M_WAITOK);
1157 tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK);
1158 tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK);
1159 tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK);
1160 tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK);
1161 tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK);
1162 tcp_log_que_copyout = counter_u64_alloc(M_WAITOK);
1163 tcp_log_que_read = counter_u64_alloc(M_WAITOK);
1164 tcp_log_que_freed = counter_u64_alloc(M_WAITOK);
1166 tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK);
1167 tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK);
1169 rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW);
1170 mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF);
1171 callout_init(&tcp_log_expireq_callout, 1);
1174 /* Do per-TCPCB initialization. */
1176 tcp_log_tcpcbinit(struct tcpcb *tp)
1179 /* A new TCPCB should start out zero-initialized. */
1180 STAILQ_INIT(&tp->t_logs);
1183 * If we are doing auto-capturing, figure out whether we will capture
1186 tp->t_loglimit = tcp_log_session_limit;
1187 if ((tcp_log_auto_all == true) &&
1188 tcp_log_auto_mode &&
1189 tcp_log_selectauto()) {
1190 tp->t_logstate = tcp_log_auto_mode;
1191 tp->t_flags2 |= TF2_LOG_AUTO;
1196 /* Remove entries */
1198 tcp_log_expire(void *unused __unused)
1200 struct tcp_log_id_bucket *tlb;
1201 struct tcp_log_id_node *tln;
1202 sbintime_t expiry_limit;
1205 TCPLOG_EXPIREQ_LOCK();
1206 if (callout_pending(&tcp_log_expireq_callout)) {
1207 /* Callout was reset. */
1208 TCPLOG_EXPIREQ_UNLOCK();
1213 * Process entries until we reach one that expires too far in the
1214 * future. Look one second in the future.
1216 expiry_limit = getsbinuptime() + SBT_1S;
1217 tree_locked = TREE_UNLOCKED;
1219 while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL &&
1220 tln->tln_expiretime <= expiry_limit) {
1221 if (!callout_active(&tcp_log_expireq_callout)) {
1223 * Callout was stopped. I guess we should
1224 * just quit at this point.
1226 TCPLOG_EXPIREQ_UNLOCK();
1231 * Remove the node from the head of the list and unlock
1232 * the list. Change the expiry time to SBT_MAX as a signal
1233 * to other threads that we now own this.
1235 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq);
1236 tln->tln_expiretime = SBT_MAX;
1237 TCPLOG_EXPIREQ_UNLOCK();
1240 * Remove the node from the bucket.
1242 tlb = tln->tln_bucket;
1243 TCPID_BUCKET_LOCK(tlb);
1244 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) {
1245 tcp_log_id_validate_tree_lock(tree_locked);
1246 if (tree_locked == TREE_WLOCKED)
1247 TCPID_TREE_WUNLOCK();
1249 TCPID_TREE_RUNLOCK();
1250 tree_locked = TREE_UNLOCKED;
1253 /* Drop the INP reference. */
1254 INP_WLOCK(tln->tln_inp);
1255 if (!in_pcbrele_wlocked(tln->tln_inp))
1256 INP_WUNLOCK(tln->tln_inp);
1258 /* Free the log records. */
1259 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count);
1261 /* Free the node. */
1262 uma_zfree(tcp_log_node_zone, tln);
1264 /* Relock the expiry queue. */
1265 TCPLOG_EXPIREQ_LOCK();
1269 * We've expired all the entries we can. Do we need to reschedule
1272 callout_deactivate(&tcp_log_expireq_callout);
1275 * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and
1276 * set the next callout to that. (This helps ensure we generally
1277 * run the callout no more often than desired.)
1279 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL;
1280 if (expiry_limit < tln->tln_expiretime)
1281 expiry_limit = tln->tln_expiretime;
1282 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit,
1283 SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE);
1287 TCPLOG_EXPIREQ_UNLOCK();
1292 * Move log data from the TCPCB to a new node. This will reset the TCPCB log
1293 * entries and log count; however, it will not touch other things from the
1294 * TCPCB (e.g. t_lin, t_lib).
1296 * NOTE: Must hold a lock on the INP.
1299 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
1302 INP_WLOCK_ASSERT(tp->t_inpcb);
1304 tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie;
1305 if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6)
1306 tln->tln_af = AF_INET6;
1308 tln->tln_af = AF_INET;
1309 tln->tln_entries = tp->t_logs;
1310 tln->tln_count = tp->t_lognum;
1311 tln->tln_bucket = tp->t_lib;
1313 /* Clear information from the PCB. */
1314 STAILQ_INIT(&tp->t_logs);
1318 /* Do per-TCPCB cleanup */
1320 tcp_log_tcpcbfini(struct tcpcb *tp)
1322 struct tcp_log_id_node *tln, *tln_first;
1323 struct tcp_log_mem *log_entry;
1324 sbintime_t callouttime;
1326 INP_WLOCK_ASSERT(tp->t_inpcb);
1328 TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_CONNEND, 0, 0, NULL, false);
1331 * If we were gathering packets to be automatically dumped, try to do
1332 * it now. If this succeeds, the log information in the TCPCB will be
1333 * cleared. Otherwise, we'll handle the log information as we do
1336 switch(tp->t_logstate) {
1337 case TCP_LOG_STATE_HEAD_AUTO:
1338 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1341 case TCP_LOG_STATE_TAIL_AUTO:
1342 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail",
1345 case TCP_LOG_STATE_CONTINUAL:
1346 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1352 * There are two ways we could keep logs: per-socket or per-ID. If
1353 * we are tracking logs with an ID, then the logs survive the
1354 * destruction of the TCPCB.
1356 * If the TCPCB is associated with an ID node, move the logs from the
1357 * TCPCB to the ID node. In theory, this is safe, for reasons which I
1358 * will now explain for my own benefit when I next need to figure out
1361 * We own the INP lock. Therefore, no one else can change the contents
1362 * of this node (Rule C). Further, no one can remove this node from
1363 * the bucket while we hold the lock (Rule D). Basically, no one can
1364 * mess with this node. That leaves two states in which we could be:
1366 * 1. Another thread is currently waiting to acquire the INP lock, with
1367 * plans to do something with this node. When we drop the INP lock,
1368 * they will have a chance to do that. They will recheck the
1369 * tln_closed field (see note to Rule C) and then acquire the
1370 * bucket lock before proceeding further.
1372 * 2. Another thread will try to acquire a lock at some point in the
1373 * future. If they try to acquire a lock before we set the
1374 * tln_closed field, they will follow state #1. If they try to
1375 * acquire a lock after we set the tln_closed field, they will be
1376 * able to make changes to the node, at will, following Rule C.
1378 * Therefore, we currently own this node and can make any changes
1379 * we want. But, as soon as we set the tln_closed field to true, we
1380 * have effectively dropped our lock on the node. (For this reason, we
1381 * also need to make sure our writes are ordered correctly. An atomic
1382 * operation with "release" semantics should be sufficient.)
1385 if (tp->t_lin != NULL) {
1386 /* Copy the relevant information to the log entry. */
1388 KASSERT(tln->tln_inp == tp->t_inpcb,
1389 ("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)",
1390 __func__, tln->tln_inp, tp->t_inpcb));
1391 tcp_log_move_tp_to_node(tp, tln);
1393 /* Clear information from the PCB. */
1398 * Take a reference on the INP. This ensures that the INP
1399 * remains valid while the node is on the expiry queue. This
1400 * ensures the INP is valid for other threads that may be
1401 * racing to lock this node when we move it to the expire
1404 in_pcbref(tp->t_inpcb);
1407 * Store the entry on the expiry list. The exact behavior
1408 * depends on whether we have entries to keep. If so, we
1409 * put the entry at the tail of the list and expire in
1410 * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put
1411 * the entry at the head of the list. (Handling the cleanup
1412 * via the expiry timer lets us avoid locking messy-ness here.)
1414 tln->tln_expiretime = getsbinuptime();
1415 TCPLOG_EXPIREQ_LOCK();
1416 if (tln->tln_count) {
1417 tln->tln_expiretime += TCP_LOG_EXPIRE_TIME;
1418 if (STAILQ_EMPTY(&tcp_log_expireq_head) &&
1419 !callout_active(&tcp_log_expireq_callout)) {
1421 * We are adding the first entry and a callout
1422 * is not currently scheduled; therefore, we
1423 * need to schedule one.
1425 callout_reset_sbt(&tcp_log_expireq_callout,
1426 tln->tln_expiretime, SBT_1S, tcp_log_expire,
1429 STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln,
1432 callouttime = tln->tln_expiretime +
1433 TCP_LOG_EXPIRE_INTVL;
1434 tln_first = STAILQ_FIRST(&tcp_log_expireq_head);
1436 if ((tln_first == NULL ||
1437 callouttime < tln_first->tln_expiretime) &&
1438 (callout_pending(&tcp_log_expireq_callout) ||
1439 !callout_active(&tcp_log_expireq_callout))) {
1441 * The list is empty, or we want to run the
1442 * expire code before the first entry's timer
1443 * fires. Also, we are in a case where a callout
1444 * is not actively running. We want to reset
1445 * the callout to occur sooner.
1447 callout_reset_sbt(&tcp_log_expireq_callout,
1448 callouttime, SBT_1S, tcp_log_expire, NULL,
1453 * Insert to the head, or just after the head, as
1454 * appropriate. (This might result in small
1455 * mis-orderings as a bunch of "expire now" entries
1456 * gather at the start of the list, but that should
1457 * not produce big problems, since the expire timer
1458 * will walk through all of them.)
1460 if (tln_first == NULL ||
1461 tln->tln_expiretime < tln_first->tln_expiretime)
1462 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln,
1465 STAILQ_INSERT_AFTER(&tcp_log_expireq_head,
1466 tln_first, tln, tln_expireq);
1468 TCPLOG_EXPIREQ_UNLOCK();
1471 * We are done messing with the tln. After this point, we
1472 * can't touch it. (Note that the "release" semantics should
1473 * be included with the TCPLOG_EXPIREQ_UNLOCK() call above.
1474 * Therefore, they should be unnecessary here. However, it
1475 * seems like a good idea to include them anyway, since we
1476 * really are releasing a lock here.)
1478 atomic_store_rel_int(&tln->tln_closed, 1);
1480 /* Remove log entries. */
1481 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1482 tcp_log_remove_log_head(tp, log_entry);
1483 KASSERT(tp->t_lognum == 0,
1484 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1485 __func__, tp->t_lognum));
1489 * Change the log state to off (just in case anything tries to sneak
1490 * in a last-minute log).
1492 tp->t_logstate = TCP_LOG_STATE_OFF;
1496 tcp_log_purge_tp_logbuf(struct tcpcb *tp)
1498 struct tcp_log_mem *log_entry;
1502 INP_WLOCK_ASSERT(inp);
1503 if (tp->t_lognum == 0)
1506 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1507 tcp_log_remove_log_head(tp, log_entry);
1508 KASSERT(tp->t_lognum == 0,
1509 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1510 __func__, tp->t_lognum));
1511 tp->t_logstate = TCP_LOG_STATE_OFF;
1515 * This logs an event for a TCP socket. Normally, this is called via
1516 * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for
1520 struct tcp_log_buffer *
1521 tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
1522 struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len,
1523 union tcp_log_stackspecific *stackinfo, int th_hostorder,
1524 const char *output_caller, const char *func, int line, const struct timeval *itv)
1526 struct tcp_log_mem *log_entry;
1527 struct tcp_log_buffer *log_buf;
1528 int attempt_count = 0;
1529 struct tcp_log_verbose *log_verbose;
1532 KASSERT((func == NULL && line == 0) || (func != NULL && line > 0),
1533 ("%s called with inconsistent func (%p) and line (%d) arguments",
1534 __func__, func, line));
1536 INP_WLOCK_ASSERT(tp->t_inpcb);
1537 if (tcp_disable_all_bb_logs) {
1539 * The global shutdown logging
1540 * switch has been thrown. Call
1541 * the purge function that frees
1542 * purges out the logs and
1543 * turns off logging.
1545 tcp_log_purge_tp_logbuf(tp);
1548 KASSERT(tp->t_logstate == TCP_LOG_STATE_HEAD ||
1549 tp->t_logstate == TCP_LOG_STATE_TAIL ||
1550 tp->t_logstate == TCP_LOG_STATE_CONTINUAL ||
1551 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO ||
1552 tp->t_logstate == TCP_LOG_STATE_TAIL_AUTO,
1553 ("%s called with unexpected tp->t_logstate (%d)", __func__,
1557 * Get the serial number. We do this early so it will
1558 * increment even if we end up skipping the log entry for some
1561 logsn = tp->t_logsn++;
1564 * Can we get a new log entry? If so, increment the lognum counter
1568 if (tp->t_lognum < tp->t_loglimit) {
1569 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL)
1574 /* Do we need to try to reuse? */
1575 if (log_entry == NULL) {
1577 * Sacrifice auto-logged sessions without a log ID if
1578 * tcp_log_auto_all is false. (If they don't have a log
1579 * ID by now, it is probable that either they won't get one
1580 * or we are resource-constrained.)
1582 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1583 !tcp_log_auto_all) {
1584 if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) {
1586 panic("%s:%d: tcp_log_state_change() failed "
1587 "to set tp %p to TCP_LOG_STATE_CLEAR",
1588 __func__, __LINE__, tp);
1590 tp->t_logstate = TCP_LOG_STATE_OFF;
1595 * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump
1596 * the buffers. If successful, deactivate tracing. Otherwise,
1597 * leave it active so we will retry.
1599 if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO &&
1600 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1602 tp->t_logstate = TCP_LOG_STATE_OFF;
1604 } else if ((tp->t_logstate == TCP_LOG_STATE_CONTINUAL) &&
1605 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1607 if (attempt_count == 0) {
1611 #ifdef TCPLOG_DEBUG_COUNTERS
1612 counter_u64_add(tcp_log_que_fail4, 1);
1615 } else if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO)
1618 /* If in HEAD state, just deactivate the tracing and return. */
1619 if (tp->t_logstate == TCP_LOG_STATE_HEAD) {
1620 tp->t_logstate = TCP_LOG_STATE_OFF;
1625 * Get a buffer to reuse. If that fails, just give up.
1626 * (We can't log anything without a buffer in which to
1629 * Note that we don't change the t_lognum counter
1630 * here. Because we are re-using the buffer, the total
1631 * number won't change.
1633 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL)
1635 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1636 tcp_log_entry_refcnt_rem(log_entry);
1639 KASSERT(log_entry != NULL,
1640 ("%s: log_entry unexpectedly NULL", __func__));
1642 /* Extract the log buffer and verbose buffer pointers. */
1643 log_buf = &log_entry->tlm_buf;
1644 log_verbose = &log_entry->tlm_v;
1646 /* Basic entries. */
1648 getmicrouptime(&log_buf->tlb_tv);
1650 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval));
1651 log_buf->tlb_ticks = ticks;
1652 log_buf->tlb_sn = logsn;
1653 log_buf->tlb_stackid = tp->t_fb->tfb_id;
1654 log_buf->tlb_eventid = eventid;
1655 log_buf->tlb_eventflags = 0;
1656 log_buf->tlb_errno = errornum;
1658 /* Socket buffers */
1659 if (rxbuf != NULL) {
1660 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF;
1661 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc;
1662 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc;
1663 log_buf->tlb_rxbuf.tls_sb_spare = 0;
1665 if (txbuf != NULL) {
1666 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF;
1667 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc;
1668 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc;
1669 log_buf->tlb_txbuf.tls_sb_spare = 0;
1671 /* Copy values from tp to the log entry. */
1672 #define COPY_STAT(f) log_buf->tlb_ ## f = tp->f
1673 #define COPY_STAT_T(f) log_buf->tlb_ ## f = tp->t_ ## f
1675 COPY_STAT_T(starttime);
1680 COPY_STAT(snd_cwnd);
1682 COPY_STAT(snd_recover);
1684 COPY_STAT(snd_ssthresh);
1686 COPY_STAT_T(rttvar);
1691 COPY_STAT_T(dupacks);
1692 COPY_STAT_T(segqlen);
1693 COPY_STAT(snd_numholes);
1694 COPY_STAT(snd_scale);
1695 COPY_STAT(rcv_scale);
1698 log_buf->tlb_flex1 = 0;
1699 log_buf->tlb_flex2 = 0;
1700 /* Copy stack-specific info. */
1701 if (stackinfo != NULL) {
1702 memcpy(&log_buf->tlb_stackinfo, stackinfo,
1703 sizeof(log_buf->tlb_stackinfo));
1704 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO;
1708 log_buf->tlb_len = len;
1712 log_buf->tlb_eventflags |= TLB_FLAG_HDR;
1713 log_buf->tlb_th = *th;
1715 tcp_fields_to_net(&log_buf->tlb_th);
1716 optlen = (th->th_off << 2) - sizeof (struct tcphdr);
1718 memcpy(log_buf->tlb_opts, th + 1, optlen);
1721 /* Verbose information */
1723 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE;
1724 if (output_caller != NULL)
1725 strlcpy(log_verbose->tlv_snd_frm, output_caller,
1728 *log_verbose->tlv_snd_frm = 0;
1729 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN);
1730 log_verbose->tlv_trace_line = line;
1733 /* Insert the new log at the tail. */
1734 STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue);
1735 tcp_log_entry_refcnt_add(log_entry);
1740 * Change the logging state for a TCPCB. Returns 0 on success or an
1741 * error code on failure.
1744 tcp_log_state_change(struct tcpcb *tp, int state)
1746 struct tcp_log_mem *log_entry;
1748 INP_WLOCK_ASSERT(tp->t_inpcb);
1750 case TCP_LOG_STATE_CLEAR:
1751 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1752 tcp_log_remove_log_head(tp, log_entry);
1755 case TCP_LOG_STATE_OFF:
1756 tp->t_logstate = TCP_LOG_STATE_OFF;
1759 case TCP_LOG_STATE_TAIL:
1760 case TCP_LOG_STATE_HEAD:
1761 case TCP_LOG_STATE_CONTINUAL:
1762 case TCP_LOG_STATE_HEAD_AUTO:
1763 case TCP_LOG_STATE_TAIL_AUTO:
1764 tp->t_logstate = state;
1770 if (tcp_disable_all_bb_logs) {
1771 /* We are prohibited from doing any logs */
1772 tp->t_logstate = TCP_LOG_STATE_OFF;
1774 tp->t_flags2 &= ~(TF2_LOG_AUTO);
1779 /* If tcp_drain() is called, flush half the log entries. */
1781 tcp_log_drain(struct tcpcb *tp)
1783 struct tcp_log_mem *log_entry, *next;
1786 INP_WLOCK_ASSERT(tp->t_inpcb);
1787 if ((target = tp->t_lognum / 2) == 0)
1791 * If we are logging the "head" packets, we want to discard
1792 * from the tail of the queue. Otherwise, we want to discard
1795 if (tp->t_logstate == TCP_LOG_STATE_HEAD ||
1796 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) {
1797 skip = tp->t_lognum - target;
1798 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue)
1801 KASSERT(log_entry != NULL,
1802 ("%s: skipped through all entries!", __func__));
1803 if (log_entry == NULL)
1805 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) {
1806 STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue);
1807 tcp_log_entry_refcnt_rem(next);
1808 tcp_log_remove_log_cleanup(tp, next);
1813 KASSERT(target == 0,
1814 ("%s: After removing from tail, target was %d", __func__,
1816 } else if (tp->t_logstate == TCP_LOG_STATE_CONTINUAL) {
1817 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1820 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL &&
1822 tcp_log_remove_log_head(tp, log_entry);
1823 KASSERT(target <= 0,
1824 ("%s: After removing from head, target was %d", __func__,
1826 KASSERT(tp->t_lognum > 0,
1827 ("%s: After removing from head, tp->t_lognum was %d",
1829 KASSERT(log_entry != NULL,
1830 ("%s: After removing from head, the tailq was empty",
1836 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len)
1839 if (sopt->sopt_td != NULL)
1840 return (copyout(src, dst, len));
1841 bcopy(src, dst, len);
1846 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp,
1847 struct tcp_log_buffer **end, int count)
1849 struct tcp_log_buffer *out_entry;
1850 struct tcp_log_mem *log_entry;
1854 int orig_count = count;
1857 /* Copy the data out. */
1859 out_entry = (struct tcp_log_buffer *) sopt->sopt_val;
1860 STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) {
1863 ("%s:%d: Exceeded expected count (%d) processing list %p",
1864 __func__, __LINE__, orig_count, log_tailqp));
1866 #ifdef TCPLOG_DEBUG_COUNTERS
1867 counter_u64_add(tcp_log_que_copyout, 1);
1871 * Skip copying out the header if it isn't present.
1872 * Instead, copy out zeros (to ensure we don't leak info).
1873 * TODO: Make sure we truly do zero everything we don't
1876 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)
1877 entrysize = sizeof(struct tcp_log_buffer);
1879 entrysize = offsetof(struct tcp_log_buffer, tlb_th);
1880 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry,
1884 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) {
1885 error = tcp_log_copyout(sopt, zerobuf,
1886 ((uint8_t *)out_entry) + entrysize,
1887 sizeof(struct tcp_log_buffer) - entrysize);
1891 * Copy out the verbose bit, if needed. Either way,
1892 * increment the output pointer the correct amount.
1894 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) {
1895 error = tcp_log_copyout(sopt, &log_entry->tlm_v,
1896 out_entry->tlb_verbose,
1897 sizeof(struct tcp_log_verbose));
1900 out_entry = (struct tcp_log_buffer *)
1901 (((uint8_t *) (out_entry + 1)) +
1902 sizeof(struct tcp_log_verbose));
1907 KASSERT(error || count == 0,
1908 ("%s:%d: Less than expected count (%d) processing list %p"
1909 " (%d remain)", __func__, __LINE__, orig_count,
1910 log_tailqp, count));
1916 * Copy out the buffer. Note that we do incremental copying, so
1917 * sooptcopyout() won't work. However, the goal is to produce the same
1918 * end result as if we copied in the entire user buffer, updated it,
1919 * and then used sooptcopyout() to copy it out.
1921 * NOTE: This should be called with a write lock on the PCB; however,
1922 * the function will drop it after it extracts the data from the TCPCB.
1925 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
1927 struct tcp_log_stailq log_tailq;
1928 struct tcp_log_mem *log_entry, *log_next;
1929 struct tcp_log_buffer *out_entry;
1931 size_t outsize, entrysize;
1934 INP_WLOCK_ASSERT(tp->t_inpcb);
1938 * Determine which log entries will fit in the buffer. As an
1939 * optimization, skip this if all the entries will clearly fit
1940 * in the buffer. (However, get an exact size if we are using
1944 if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) +
1945 sizeof(struct tcp_log_verbose)) >= tp->t_lognum) {
1946 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue);
1949 outnum = tp->t_lognum;
1952 outsize = outnum = 0;
1954 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) {
1955 entrysize = sizeof(struct tcp_log_buffer);
1956 if (log_next->tlm_buf.tlb_eventflags &
1958 entrysize += sizeof(struct tcp_log_verbose);
1959 if ((sopt->sopt_valsize - outsize) < entrysize)
1961 outsize += entrysize;
1963 log_entry = log_next;
1965 KASSERT(outsize <= sopt->sopt_valsize,
1966 ("%s: calculated output size (%zu) greater than available"
1967 "space (%zu)", __func__, outsize, sopt->sopt_valsize));
1973 * Copy traditional sooptcopyout() behavior: if sopt->sopt_val
1974 * is NULL, silently skip the copy. However, in this case, we
1975 * will leave the list alone and return. Functionally, this
1976 * gives userspace a way to poll for an approximate buffer
1977 * size they will need to get the log entries.
1979 if (sopt->sopt_val == NULL) {
1982 outsize = outnum * (sizeof(struct tcp_log_buffer) +
1983 sizeof(struct tcp_log_verbose));
1985 if (sopt->sopt_valsize > outsize)
1986 sopt->sopt_valsize = outsize;
1991 * Break apart the list. We'll save the ones we want to copy
1992 * out locally and remove them from the TCPCB list. We can
1993 * then drop the INPCB lock while we do the copyout.
1995 * There are roughly three cases:
1996 * 1. There was nothing to copy out. That's easy: drop the
1998 * 2. We are copying out the entire list. Again, that's easy:
1999 * move the whole list.
2000 * 3. We are copying out a partial list. That's harder. We
2001 * need to update the list book-keeping entries.
2003 if (log_entry != NULL && log_next == NULL) {
2004 /* Move entire list. */
2005 KASSERT(outnum == tp->t_lognum,
2006 ("%s:%d: outnum (%d) should match tp->t_lognum (%d)",
2007 __func__, __LINE__, outnum, tp->t_lognum));
2008 log_tailq = tp->t_logs;
2010 STAILQ_INIT(&tp->t_logs);
2011 } else if (log_entry != NULL) {
2012 /* Move partial list. */
2013 KASSERT(outnum < tp->t_lognum,
2014 ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)",
2015 __func__, __LINE__, outnum, tp->t_lognum));
2016 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs);
2017 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue);
2018 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL,
2019 ("%s:%d: tp->t_logs is unexpectedly shorter than expected"
2020 "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)",
2021 __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum));
2022 STAILQ_NEXT(log_entry, tlm_queue) = NULL;
2023 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue);
2024 tp->t_lognum -= outnum;
2026 STAILQ_INIT(&log_tailq);
2028 /* Drop the PCB lock. */
2031 /* Copy the data out. */
2032 error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum);
2037 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0) {
2038 tp = intotcpcb(inp);
2040 /* Merge the two lists. */
2041 STAILQ_CONCAT(&log_tailq, &tp->t_logs);
2042 tp->t_logs = log_tailq;
2043 tp->t_lognum += outnum;
2047 /* Sanity check entries */
2048 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) ==
2049 outsize, ("%s: Actual output size (%zu) != "
2050 "calculated output size (%zu)", __func__,
2051 (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val),
2054 /* Free the entries we just copied out. */
2055 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) {
2056 tcp_log_entry_refcnt_rem(log_entry);
2057 uma_zfree(tcp_log_zone, log_entry);
2061 sopt->sopt_valsize = (size_t)((caddr_t)out_entry -
2062 (caddr_t)sopt->sopt_val);
2067 tcp_log_free_queue(struct tcp_log_dev_queue *param)
2069 struct tcp_log_dev_log_queue *entry;
2071 KASSERT(param != NULL, ("%s: called with NULL param", __func__));
2075 entry = (struct tcp_log_dev_log_queue *)param;
2077 /* Free the entries. */
2078 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2080 /* Free the buffer, if it is allocated. */
2081 if (entry->tldl_common.tldq_buf != NULL)
2082 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV);
2084 /* Free the queue entry. */
2085 free(entry, M_TCPLOGDEV);
2088 static struct tcp_log_common_header *
2089 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
2091 struct tcp_log_dev_log_queue *entry;
2092 struct tcp_log_header *hdr;
2094 struct sockopt sopt;
2097 entry = (struct tcp_log_dev_log_queue *)param;
2099 /* Take a worst-case guess at space needs. */
2100 sopt.sopt_valsize = sizeof(struct tcp_log_header) +
2101 entry->tldl_count * (sizeof(struct tcp_log_buffer) +
2102 sizeof(struct tcp_log_verbose));
2103 hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT);
2105 #ifdef TCPLOG_DEBUG_COUNTERS
2106 counter_u64_add(tcp_log_que_fail5, entry->tldl_count);
2110 sopt.sopt_val = hdr + 1;
2111 sopt.sopt_valsize -= sizeof(struct tcp_log_header);
2112 sopt.sopt_td = NULL;
2114 error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
2115 (struct tcp_log_buffer **)&end, entry->tldl_count);
2117 free(hdr, M_TCPLOGDEV);
2121 /* Free the entries. */
2122 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2123 entry->tldl_count = 0;
2125 memset(hdr, 0, sizeof(struct tcp_log_header));
2126 hdr->tlh_version = TCP_LOG_BUF_VER;
2127 hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR;
2128 hdr->tlh_length = end - (uint8_t *)hdr;
2129 hdr->tlh_ie = entry->tldl_ie;
2130 hdr->tlh_af = entry->tldl_af;
2131 getboottime(&hdr->tlh_offset);
2132 strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN);
2133 strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN);
2134 strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN);
2135 return ((struct tcp_log_common_header *)hdr);
2139 * Queue the tcpcb's log buffer for transmission via the log buffer facility.
2141 * NOTE: This should be called with a write lock on the PCB.
2143 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2144 * and reacquire the INP lock if it needs to do so.
2146 * If force is false, this will only dump auto-logged sessions if
2147 * tcp_log_auto_all is true or if there is a log ID defined for the session.
2150 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
2152 struct tcp_log_dev_log_queue *entry;
2154 #ifdef TCPLOG_DEBUG_COUNTERS
2159 INP_WLOCK_ASSERT(inp);
2161 /* If there are no log entries, there is nothing to do. */
2162 if (tp->t_lognum == 0)
2165 /* Check for a log ID. */
2166 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
2167 !tcp_log_auto_all && !force) {
2168 struct tcp_log_mem *log_entry;
2171 * We needed a log ID and none was found. Free the log entries
2172 * and return success. Also, cancel further logging. If the
2173 * session doesn't have a log ID by now, we'll assume it isn't
2176 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
2177 tcp_log_remove_log_head(tp, log_entry);
2178 KASSERT(tp->t_lognum == 0,
2179 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
2180 __func__, tp->t_lognum));
2181 tp->t_logstate = TCP_LOG_STATE_OFF;
2186 * Allocate memory. If we must wait, we'll need to drop the locks
2187 * and reacquire them (and do all the related business that goes
2190 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2192 if (entry == NULL && (how & M_NOWAIT)) {
2193 #ifdef TCPLOG_DEBUG_COUNTERS
2194 counter_u64_add(tcp_log_que_fail3, 1);
2198 if (entry == NULL) {
2200 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2201 M_TCPLOGDEV, M_WAITOK);
2204 * Note that this check is slightly overly-restrictive in
2205 * that the TCB can survive either of these events.
2206 * However, there is currently not a good way to ensure
2207 * that is the case. So, if we hit this M_WAIT path, we
2208 * may end up dropping some entries. That seems like a
2209 * small price to pay for safety.
2211 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
2212 free(entry, M_TCPLOGDEV);
2213 #ifdef TCPLOG_DEBUG_COUNTERS
2214 counter_u64_add(tcp_log_que_fail2, 1);
2216 return (ECONNRESET);
2218 tp = intotcpcb(inp);
2219 if (tp->t_lognum == 0) {
2220 free(entry, M_TCPLOGDEV);
2225 /* Fill in the unique parts of the queue entry. */
2226 if (tp->t_lib != NULL) {
2227 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
2228 strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN);
2230 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN);
2231 strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN);
2234 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2236 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2237 entry->tldl_ie = inp->inp_inc.inc_ie;
2238 if (inp->inp_inc.inc_flags & INC_ISIPV6)
2239 entry->tldl_af = AF_INET6;
2241 entry->tldl_af = AF_INET;
2242 entry->tldl_entries = tp->t_logs;
2243 entry->tldl_count = tp->t_lognum;
2245 /* Fill in the common parts of the queue entry. */
2246 entry->tldl_common.tldq_buf = NULL;
2247 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2248 entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2250 /* Clear the log data from the TCPCB. */
2251 #ifdef TCPLOG_DEBUG_COUNTERS
2252 num_entries = tp->t_lognum;
2255 STAILQ_INIT(&tp->t_logs);
2257 /* Add the entry. If no one is listening, free the entry. */
2258 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) {
2259 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2260 #ifdef TCPLOG_DEBUG_COUNTERS
2261 counter_u64_add(tcp_log_que_fail1, num_entries);
2263 counter_u64_add(tcp_log_queued, num_entries);
2270 * Queue the log_id_node's log buffers for transmission via the log buffer
2273 * NOTE: This should be called with the bucket locked and referenced.
2275 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2276 * and reacquire the bucket lock if it needs to do so. (The caller must
2277 * ensure that the tln is no longer on any lists so no one else will mess
2278 * with this while the lock is dropped!)
2281 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how)
2283 struct tcp_log_dev_log_queue *entry;
2284 struct tcp_log_id_bucket *tlb;
2286 tlb = tln->tln_bucket;
2287 TCPID_BUCKET_LOCK_ASSERT(tlb);
2288 KASSERT(tlb->tlb_refcnt > 0,
2289 ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)",
2290 __func__, __LINE__, tln, tlb));
2291 KASSERT(tln->tln_closed,
2292 ("%s:%d: Called for node with tln_closed==false (tln=%p)",
2293 __func__, __LINE__, tln));
2295 /* If there are no log entries, there is nothing to do. */
2296 if (tln->tln_count == 0)
2300 * Allocate memory. If we must wait, we'll need to drop the locks
2301 * and reacquire them (and do all the related business that goes
2304 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2306 if (entry == NULL && (how & M_NOWAIT))
2308 if (entry == NULL) {
2309 TCPID_BUCKET_UNLOCK(tlb);
2310 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2311 M_TCPLOGDEV, M_WAITOK);
2312 TCPID_BUCKET_LOCK(tlb);
2315 /* Fill in the common parts of the queue entry.. */
2316 entry->tldl_common.tldq_buf = NULL;
2317 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2318 entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2320 /* Fill in the unique parts of the queue entry. */
2321 strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN);
2322 strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN);
2324 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2326 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2327 entry->tldl_ie = tln->tln_ie;
2328 entry->tldl_entries = tln->tln_entries;
2329 entry->tldl_count = tln->tln_count;
2330 entry->tldl_af = tln->tln_af;
2332 /* Add the entry. If no one is listening, free the entry. */
2333 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry))
2334 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2341 * Queue the log buffers for all sessions in a bucket for transmissions via
2342 * the log buffer facility.
2344 * NOTE: This should be called with a locked bucket; however, the function
2345 * will drop the lock.
2347 #define LOCAL_SAVE 10
2349 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
2351 struct tcp_log_id_node local_entries[LOCAL_SAVE];
2354 struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln;
2355 int i, num_local_entries, tree_locked;
2356 bool expireq_locked;
2358 TCPID_BUCKET_LOCK_ASSERT(tlb);
2361 * Take a reference on the bucket to keep it from disappearing until
2364 TCPID_BUCKET_REF(tlb);
2367 * We'll try to create these without dropping locks. However, we
2368 * might very well need to drop locks to get memory. If that's the
2369 * case, we'll save up to 10 on the stack, and sacrifice the rest.
2370 * (Otherwise, we need to worry about finding our place again in a
2371 * potentially changed list. It just doesn't seem worth the trouble
2374 expireq_locked = false;
2375 num_local_entries = 0;
2377 tree_locked = TREE_UNLOCKED;
2378 SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) {
2380 * If this isn't associated with a TCPCB, we can pull it off
2381 * the list now. We need to be careful that the expire timer
2382 * hasn't already taken ownership (tln_expiretime == SBT_MAX).
2383 * If so, we let the expire timer code free the data.
2385 if (cur_tln->tln_closed) {
2388 * Get the expireq lock so we can get a consistent
2389 * read of tln_expiretime and so we can remove this
2392 if (!expireq_locked) {
2393 TCPLOG_EXPIREQ_LOCK();
2394 expireq_locked = true;
2398 * We ignore entries with tln_expiretime == SBT_MAX.
2399 * The expire timer code already owns those.
2401 KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0,
2402 ("%s:%d: node on the expire queue without positive "
2403 "expire time", __func__, __LINE__));
2404 if (cur_tln->tln_expiretime == SBT_MAX) {
2409 /* Remove the entry from the expireq. */
2410 STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln,
2411 tcp_log_id_node, tln_expireq);
2413 /* Remove the entry from the bucket. */
2414 if (prev_tln != NULL)
2415 SLIST_REMOVE_AFTER(prev_tln, tln_list);
2417 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list);
2420 * Drop the INP and bucket reference counts. Due to
2421 * lock-ordering rules, we need to drop the expire
2424 TCPLOG_EXPIREQ_UNLOCK();
2425 expireq_locked = false;
2427 /* Drop the INP reference. */
2428 INP_WLOCK(cur_tln->tln_inp);
2429 if (!in_pcbrele_wlocked(cur_tln->tln_inp))
2430 INP_WUNLOCK(cur_tln->tln_inp);
2432 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2434 panic("%s: Bucket refcount unexpectedly 0.",
2438 * Recover as best we can: free the entry we
2441 tcp_log_free_entries(&cur_tln->tln_entries,
2442 &cur_tln->tln_count);
2443 uma_zfree(tcp_log_node_zone, cur_tln);
2447 if (tcp_log_dump_node_logbuf(cur_tln, reason,
2450 * If we have sapce, save the entries locally.
2451 * Otherwise, free them.
2453 if (num_local_entries < LOCAL_SAVE) {
2454 local_entries[num_local_entries] =
2456 num_local_entries++;
2458 tcp_log_free_entries(
2459 &cur_tln->tln_entries,
2460 &cur_tln->tln_count);
2464 /* No matter what, we are done with the node now. */
2465 uma_zfree(tcp_log_node_zone, cur_tln);
2468 * Because we removed this entry from the list, prev_tln
2469 * (which tracks the previous entry still on the tlb
2470 * list) remains unchanged.
2476 * If we get to this point, the session data is still held in
2477 * the TCPCB. So, we need to pull the data out of that.
2479 * We will need to drop the expireq lock so we can lock the INP.
2480 * We can then try to extract the data the "easy" way. If that
2481 * fails, we'll save the log entries for later.
2483 if (expireq_locked) {
2484 TCPLOG_EXPIREQ_UNLOCK();
2485 expireq_locked = false;
2488 /* Lock the INP and then re-check the state. */
2489 inp = cur_tln->tln_inp;
2492 * If we caught this while it was transitioning, the data
2493 * might have moved from the TCPCB to the tln (signified by
2494 * setting tln_closed to true. If so, treat this like an
2495 * inactive connection.
2497 if (cur_tln->tln_closed) {
2499 * It looks like we may have caught this connection
2500 * while it was transitioning from active to inactive.
2501 * Treat this like an inactive connection.
2508 * Try to dump the data from the tp without dropping the lock.
2509 * If this fails, try to save off the data locally.
2511 tp = cur_tln->tln_tp;
2512 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) &&
2513 num_local_entries < LOCAL_SAVE) {
2514 tcp_log_move_tp_to_node(tp,
2515 &local_entries[num_local_entries]);
2516 local_entries[num_local_entries].tln_closed = 1;
2517 KASSERT(local_entries[num_local_entries].tln_bucket ==
2518 tlb, ("%s: %d: bucket mismatch for node %p",
2519 __func__, __LINE__, cur_tln));
2520 num_local_entries++;
2526 * We are goint to leave the current tln on the list. It will
2527 * become the previous tln.
2532 /* Drop our locks, if any. */
2533 KASSERT(tree_locked == TREE_UNLOCKED,
2534 ("%s: %d: tree unexpectedly locked", __func__, __LINE__));
2535 switch (tree_locked) {
2537 TCPID_TREE_WUNLOCK();
2538 tree_locked = TREE_UNLOCKED;
2541 TCPID_TREE_RUNLOCK();
2542 tree_locked = TREE_UNLOCKED;
2545 if (expireq_locked) {
2546 TCPLOG_EXPIREQ_UNLOCK();
2547 expireq_locked = false;
2551 * Try again for any saved entries. tcp_log_dump_node_logbuf() is
2552 * guaranteed to free the log entries within the node. And, since
2553 * the node itself is on our stack, we don't need to free it.
2555 for (i = 0; i < num_local_entries; i++)
2556 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK);
2558 /* Drop our reference. */
2559 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
2560 TCPID_BUCKET_UNLOCK(tlb);
2563 /* Drop our locks, if any. */
2564 switch (tree_locked) {
2566 TCPID_TREE_WUNLOCK();
2569 TCPID_TREE_RUNLOCK();
2573 TCPLOG_EXPIREQ_UNLOCK();
2579 * Queue the log buffers for all sessions in a bucket for transmissions via
2580 * the log buffer facility.
2582 * NOTE: This should be called with a locked INP; however, the function
2583 * will drop the lock.
2586 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
2588 struct tcp_log_id_bucket *tlb;
2591 /* Figure out our bucket and lock it. */
2592 INP_WLOCK_ASSERT(tp->t_inpcb);
2596 * No bucket; treat this like a request to dump a single
2599 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
2600 INP_WUNLOCK(tp->t_inpcb);
2603 TCPID_BUCKET_REF(tlb);
2604 INP_WUNLOCK(tp->t_inpcb);
2605 TCPID_BUCKET_LOCK(tlb);
2607 /* If we are the last reference, we have nothing more to do here. */
2608 tree_locked = TREE_UNLOCKED;
2609 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2610 switch (tree_locked) {
2612 TCPID_TREE_WUNLOCK();
2615 TCPID_TREE_RUNLOCK();
2621 /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
2622 tcp_log_dumpbucketlogs(tlb, reason);
2626 * Mark the end of a flow with the current stack. A stack can add
2627 * stack-specific info to this trace event by overriding this
2628 * function (see bbr_log_flowend() for example).
2631 tcp_log_flowend(struct tcpcb *tp)
2633 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2634 struct socket *so = tp->t_inpcb->inp_socket;
2635 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
2636 TCP_LOG_FLOWEND, 0, 0, NULL, false);