3 * SPDX-License-Identifier: BSD-2-Clause
5 * Copyright (c) 2016-2018 Netflix, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/qmath.h>
42 #include <sys/queue.h>
43 #include <sys/refcount.h>
44 #include <sys/rwlock.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/sysctl.h>
49 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
50 #include <sys/counter.h>
51 #include <dev/tcp_log/tcp_log_dev.h>
54 #include <net/if_var.h>
57 #include <netinet/in.h>
58 #include <netinet/in_pcb.h>
59 #include <netinet/in_var.h>
60 #include <netinet/tcp_var.h>
61 #include <netinet/tcp_log_buf.h>
62 #include <netinet/tcp_seq.h>
63 #include <netinet/tcp_hpts.h>
65 /* Default expiry time */
66 #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S)
68 /* Max interval at which to run the expiry timer */
69 #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S)
72 static uma_zone_t tcp_log_id_bucket_zone, tcp_log_id_node_zone, tcp_log_zone;
73 static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT;
74 static uint32_t tcp_log_version = TCP_LOG_BUF_VER;
75 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket);
76 static struct tcp_log_id_tree tcp_log_id_head;
77 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head =
78 STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head);
79 static struct mtx tcp_log_expireq_mtx;
80 static struct callout tcp_log_expireq_callout;
81 static u_long tcp_log_auto_ratio = 0;
82 static volatile u_long tcp_log_auto_ratio_cur = 0;
83 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL;
84 static bool tcp_log_auto_all = false;
85 static uint32_t tcp_disable_all_bb_logs = 0;
87 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
89 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
90 "TCP Black Box controls");
92 SYSCTL_NODE(_net_inet_tcp_bb, OID_AUTO, tp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
93 "TCP Black Box Trace Point controls");
95 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose,
96 0, "Force verbose logging for TCP traces");
98 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit,
99 CTLFLAG_RW, &tcp_log_session_limit, 0,
100 "Maximum number of events maintained for each TCP session");
102 uint32_t tcp_trace_point_config = 0;
103 SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, number, CTLFLAG_RW,
104 &tcp_trace_point_config, TCP_LOG_STATE_HEAD_AUTO,
105 "What is the trace point number to activate (0=none, 0xffffffff = all)?");
107 uint32_t tcp_trace_point_bb_mode = TCP_LOG_STATE_CONTINUAL;
108 SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, bbmode, CTLFLAG_RW,
109 &tcp_trace_point_bb_mode, TCP_LOG_STATE_HEAD_AUTO,
110 "What is BB logging mode that is activated?");
112 int32_t tcp_trace_point_count = 0;
113 SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, count, CTLFLAG_RW,
114 &tcp_trace_point_count, TCP_LOG_STATE_HEAD_AUTO,
115 "How many connections will have BB logging turned on that hit the tracepoint?");
119 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW,
120 &tcp_log_zone, "Maximum number of events maintained for all TCP sessions");
122 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD,
123 &tcp_log_zone, "Current number of events maintained for all TCP sessions");
125 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW,
126 &tcp_log_id_bucket_zone, "Maximum number of log IDs");
128 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD,
129 &tcp_log_id_bucket_zone, "Current number of log IDs");
131 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW,
132 &tcp_log_id_node_zone, "Maximum number of tcpcbs with log IDs");
134 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD,
135 &tcp_log_id_node_zone, "Current number of tcpcbs with log IDs");
137 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version,
138 0, "Version of log formats exported");
140 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW,
141 &tcp_disable_all_bb_logs, 0,
142 "Disable all BB logging for all connections");
144 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW,
145 &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions");
147 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW,
148 &tcp_log_auto_mode, 0,
149 "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_TAIL)");
151 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW,
152 &tcp_log_auto_all, 0,
153 "Auto-select from all sessions (rather than just those with IDs)");
155 #ifdef TCPLOG_DEBUG_COUNTERS
156 counter_u64_t tcp_log_queued;
157 counter_u64_t tcp_log_que_fail1;
158 counter_u64_t tcp_log_que_fail2;
159 counter_u64_t tcp_log_que_fail3;
160 counter_u64_t tcp_log_que_fail4;
161 counter_u64_t tcp_log_que_fail5;
162 counter_u64_t tcp_log_que_copyout;
163 counter_u64_t tcp_log_que_read;
164 counter_u64_t tcp_log_que_freed;
166 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD,
167 &tcp_log_queued, "Number of entries queued");
168 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD,
169 &tcp_log_que_fail1, "Number of entries queued but fail 1");
170 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD,
171 &tcp_log_que_fail2, "Number of entries queued but fail 2");
172 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD,
173 &tcp_log_que_fail3, "Number of entries queued but fail 3");
174 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD,
175 &tcp_log_que_fail4, "Number of entries queued but fail 4");
176 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD,
177 &tcp_log_que_fail5, "Number of entries queued but fail 4");
178 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD,
179 &tcp_log_que_copyout, "Number of entries copied out");
180 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD,
181 &tcp_log_que_read, "Number of entries read from the queue");
182 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD,
183 &tcp_log_que_freed, "Number of entries freed after reading");
187 #define TCPLOG_DEBUG_RINGBUF
189 /* Number of requests to consider a PBCID "active". */
190 #define ACTIVE_REQUEST_COUNT 10
192 /* Statistic tracking for "active" PBCIDs. */
193 static counter_u64_t tcp_log_pcb_ids_cur;
194 static counter_u64_t tcp_log_pcb_ids_tot;
196 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD,
197 &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system");
198 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD,
199 &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated");
203 STAILQ_ENTRY(tcp_log_mem) tlm_queue;
204 struct tcp_log_buffer tlm_buf;
205 struct tcp_log_verbose tlm_v;
206 #ifdef TCPLOG_DEBUG_RINGBUF
207 volatile int tlm_refcnt;
211 /* 60 bytes for the header, + 16 bytes for padding */
212 static uint8_t zerobuf[76];
221 * A. You need a lock on the Tree to add/remove buckets.
222 * B. You need a lock on the bucket to add/remove nodes from the bucket.
223 * C. To change information in a node, you need the INP lock if the tln_closed
224 * field is false. Otherwise, you need the bucket lock. (Note that the
225 * tln_closed field can change at any point, so you need to recheck the
226 * entry after acquiring the INP lock.)
227 * D. To remove a node from the bucket, you must have that entry locked,
228 * according to the criteria of Rule C. Also, the node must not be on
230 * E. The exception to C is the expiry queue fields, which are locked by
231 * the TCPLOG_EXPIREQ lock.
233 * Buckets have a reference count. Each node is a reference. Further,
234 * other callers may add reference counts to keep a bucket from disappearing.
235 * You can add a reference as long as you own a lock sufficient to keep the
236 * bucket from disappearing. For example, a common use is:
237 * a. Have a locked INP, but need to lock the TCPID_BUCKET.
238 * b. Add a refcount on the bucket. (Safe because the INP lock prevents
239 * the TCPID_BUCKET from going away.)
240 * c. Drop the INP lock.
241 * d. Acquire a lock on the TCPID_BUCKET.
242 * e. Acquire a lock on the INP.
243 * f. Drop the refcount on the bucket.
244 * (At this point, the bucket may disappear.)
247 * You can acquire this with either the bucket or INP lock. Don't reverse it.
248 * When the expire code has committed to freeing a node, it resets the expiry
249 * time to SBT_MAX. That is the signal to everyone else that they should
250 * leave that node alone.
252 static struct rwlock tcp_id_tree_lock;
253 #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock)
254 #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock)
255 #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock)
256 #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock)
257 #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock)
258 #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED)
259 #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED)
260 #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED)
262 #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF)
263 #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx))
264 #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx))
265 #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx))
266 #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED)
267 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED)
269 #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt))
270 #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt))
272 #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx)
273 #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx)
275 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node);
277 struct tcp_log_id_bucket
280 * tlb_id must be first. This lets us use strcmp on
281 * (struct tcp_log_id_bucket *) and (char *) interchangeably.
283 char tlb_id[TCP_LOG_ID_LEN];
284 char tlb_tag[TCP_LOG_TAG_LEN];
285 RB_ENTRY(tcp_log_id_bucket) tlb_rb;
286 struct tcp_log_id_head tlb_head;
288 volatile u_int tlb_refcnt;
289 volatile u_int tlb_reqcnt;
290 uint32_t tlb_loglimit;
294 struct tcp_log_id_node
296 SLIST_ENTRY(tcp_log_id_node) tln_list;
297 STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */
298 sbintime_t tln_expiretime; /* Locked by the expireq lock */
301 * If INP is NULL, that means the connection has closed. We've
302 * saved the connection endpoint information and the log entries
303 * in the tln_ie and tln_entries members. We've also saved a pointer
304 * to the enclosing bucket here. If INP is not NULL, the information is
305 * in the PCB and not here.
307 struct inpcb *tln_inp;
308 struct tcpcb *tln_tp;
309 struct tcp_log_id_bucket *tln_bucket;
310 struct in_endpoints tln_ie;
311 struct tcp_log_stailq tln_entries;
313 volatile int tln_closed;
317 enum tree_lock_state {
323 /* Do we want to select this session for auto-logging? */
325 tcp_log_selectauto(void)
329 * If we are doing auto-capturing, figure out whether we will capture
332 if (tcp_log_auto_ratio &&
333 (tcp_disable_all_bb_logs == 0) &&
334 (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
335 tcp_log_auto_ratio) == 0)
341 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b)
343 KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL"));
344 KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL"));
345 return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN);
348 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
351 tcp_log_id_validate_tree_lock(int tree_locked)
355 switch (tree_locked) {
357 TCPID_TREE_WLOCK_ASSERT();
360 TCPID_TREE_RLOCK_ASSERT();
363 TCPID_TREE_UNLOCK_ASSERT();
366 kassert_panic("%s:%d: unknown tree lock state", __func__,
373 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb)
376 TCPID_TREE_WLOCK_ASSERT();
377 KASSERT(SLIST_EMPTY(&tlb->tlb_head),
378 ("%s: Attempt to remove non-empty bucket", __func__));
379 if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) {
381 kassert_panic("%s:%d: error removing element from tree",
385 TCPID_BUCKET_LOCK_DESTROY(tlb);
386 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1);
387 uma_zfree(tcp_log_id_bucket_zone, tlb);
391 * Call with a referenced and locked bucket.
392 * Will return true if the bucket was freed; otherwise, false.
393 * tlb: The bucket to unreference.
394 * tree_locked: A pointer to the state of the tree lock. If the tree lock
395 * state changes, the function will update it.
396 * inp: If not NULL and the function needs to drop the inp lock to relock the
397 * tree, it will do so. (The caller must ensure inp will not become invalid,
398 * probably by holding a reference to it.)
401 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked,
405 KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__));
406 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
409 tcp_log_id_validate_tree_lock(*tree_locked);
412 * Did we hold the last reference on the tlb? If so, we may need
413 * to free it. (Note that we can realistically only execute the
414 * loop twice: once without a write lock and once with a write
417 while (TCPID_BUCKET_UNREF(tlb)) {
419 * We need a write lock on the tree to free this.
420 * If we can upgrade the tree lock, this is "easy". If we
421 * can't upgrade the tree lock, we need to do this the
422 * "hard" way: unwind all our locks and relock everything.
423 * In the meantime, anything could have changed. We even
424 * need to validate that we still need to free the bucket.
426 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE())
427 *tree_locked = TREE_WLOCKED;
428 else if (*tree_locked != TREE_WLOCKED) {
429 TCPID_BUCKET_REF(tlb);
432 TCPID_BUCKET_UNLOCK(tlb);
433 if (*tree_locked == TREE_RLOCKED)
434 TCPID_TREE_RUNLOCK();
436 *tree_locked = TREE_WLOCKED;
437 TCPID_BUCKET_LOCK(tlb);
444 * We have an empty bucket and a write lock on the tree.
445 * Remove the empty bucket.
447 tcp_log_remove_bucket(tlb);
454 * Call with a locked bucket. This function will release the lock on the
455 * bucket before returning.
457 * The caller is responsible for freeing the tp->t_lin/tln node!
459 * Note: one of tp or both tlb and tln must be supplied.
461 * inp: A pointer to the inp. If the function needs to drop the inp lock to
462 * acquire the tree write lock, it will do so. (The caller must ensure inp
463 * will not become invalid, probably by holding a reference to it.)
464 * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored)
465 * tlb: A pointer to the bucket. (optional; ignored if tp is specified)
466 * tln: A pointer to the node. (optional; ignored if tp is specified)
467 * tree_locked: A pointer to the state of the tree lock. If the tree lock
468 * state changes, the function will update it.
470 * Will return true if the INP lock was reacquired; otherwise, false.
473 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp,
474 struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln,
477 int orig_tree_locked;
479 KASSERT(tp != NULL || (tlb != NULL && tln != NULL),
480 ("%s: called with tp=%p, tlb=%p, tln=%p", __func__,
482 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
488 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__));
489 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__));
492 tcp_log_id_validate_tree_lock(*tree_locked);
493 TCPID_BUCKET_LOCK_ASSERT(tlb);
496 * Remove the node, clear the log bucket and node from the TCPCB, and
497 * decrement the bucket refcount. In the process, if this is the
498 * last reference, the bucket will be freed.
500 SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list);
505 orig_tree_locked = *tree_locked;
506 if (!tcp_log_unref_bucket(tlb, tree_locked, inp))
507 TCPID_BUCKET_UNLOCK(tlb);
508 return (*tree_locked != orig_tree_locked);
511 #define RECHECK_INP_CLEAN(cleanup) do { \
512 if (inp->inp_flags & INP_DROPPED) { \
517 tp = intotcpcb(inp); \
520 #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */)
523 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
526 INP_WLOCK_ASSERT(tptoinpcb(tp));
529 if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
530 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id));
535 tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb)
538 atomic_fetchadd_int(&tlb->tlb_reqcnt, 1);
542 tcp_log_apply_ratio(struct tcpcb *tp, int ratio)
544 struct tcp_log_id_bucket *tlb;
545 struct inpcb *inp = tptoinpcb(tp);
546 uint32_t hash, ratio_hash_thresh;
550 tree_locked = TREE_UNLOCKED;
553 INP_WLOCK_ASSERT(inp);
559 ratio_hash_thresh = max(1, UINT32_MAX / ratio);
561 ratio_hash_thresh = 0;
562 TCPID_BUCKET_REF(tlb);
564 TCPID_BUCKET_LOCK(tlb);
566 hash = hash32_buf(tlb->tlb_id, strlen(tlb->tlb_id), 0);
567 if (hash > ratio_hash_thresh && tp->_t_logstate == TCP_LOG_STATE_OFF &&
568 tlb->tlb_logstate == TCP_LOG_STATE_OFF) {
570 * Ratio decision not to log this log ID (and this connection by
571 * way of association). We only apply a log ratio log disable
572 * decision if it would not interfere with a log enable decision
573 * made elsewhere e.g. tcp_log_selectauto() or setsockopt().
575 tlb->tlb_logstate = TCP_LOG_STATE_RATIO_OFF;
578 (void)tcp_log_state_change(tp, TCP_LOG_STATE_OFF);
583 INP_UNLOCK_ASSERT(inp);
584 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
585 TCPID_BUCKET_UNLOCK(tlb);
587 if (tree_locked == TREE_WLOCKED) {
588 TCPID_TREE_WLOCK_ASSERT();
589 TCPID_TREE_WUNLOCK();
590 } else if (tree_locked == TREE_RLOCKED) {
591 TCPID_TREE_RLOCK_ASSERT();
592 TCPID_TREE_RUNLOCK();
594 TCPID_TREE_UNLOCK_ASSERT();
600 * Associate the specified tag with a particular TCP log ID.
601 * Called with INPCB locked. Returns with it unlocked.
602 * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID.
605 tcp_log_set_tag(struct tcpcb *tp, char *tag)
607 struct inpcb *inp = tptoinpcb(tp);
608 struct tcp_log_id_bucket *tlb;
611 INP_WLOCK_ASSERT(inp);
613 tree_locked = TREE_UNLOCKED;
620 TCPID_BUCKET_REF(tlb);
622 TCPID_BUCKET_LOCK(tlb);
623 strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN);
624 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
625 TCPID_BUCKET_UNLOCK(tlb);
627 if (tree_locked == TREE_WLOCKED) {
628 TCPID_TREE_WLOCK_ASSERT();
629 TCPID_TREE_WUNLOCK();
630 } else if (tree_locked == TREE_RLOCKED) {
631 TCPID_TREE_RLOCK_ASSERT();
632 TCPID_TREE_RUNLOCK();
634 TCPID_TREE_UNLOCK_ASSERT();
640 * Set the TCP log ID for a TCPCB.
641 * Called with INPCB locked. Returns with it unlocked.
644 tcp_log_set_id(struct tcpcb *tp, char *id)
646 struct tcp_log_id_bucket *tlb, *tmp_tlb;
647 struct tcp_log_id_node *tln;
648 struct inpcb *inp = tptoinpcb(tp);
650 bool bucket_locked, same;
654 tree_locked = TREE_UNLOCKED;
655 bucket_locked = false;
658 INP_WLOCK_ASSERT(inp);
659 /* See if the ID is unchanged. */
660 same = ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) ||
661 (tp->t_lib == NULL && *id == 0));
662 if (tp->_t_logstate && STAILQ_FIRST(&tp->t_logs) && !same) {
664 * There are residual logs left we may
665 * be changing id's so dump what we can.
667 switch(tp->_t_logstate) {
668 case TCP_LOG_STATE_HEAD_AUTO:
669 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at id switch",
672 case TCP_LOG_STATE_TAIL_AUTO:
673 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at id switch",
676 case TCP_LOG_STATE_CONTINUAL:
677 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual at id switch",
680 case TCP_LOG_VIA_BBPOINTS:
681 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints at id switch",
687 if (tp->t_lib != NULL) {
688 tcp_log_increment_reqcnt(tp->t_lib);
689 if ((tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) &&
690 (tp->t_log_state_set == 0)) {
691 /* Clone in any logging */
693 tp->_t_logstate = tp->t_lib->tlb_logstate;
695 if ((tp->t_lib->tlb_loglimit) &&
696 (tp->t_log_state_set == 0)) {
697 /* We also have a limit set */
699 tp->t_loglimit = tp->t_lib->tlb_loglimit;
707 * If the TCPCB had a previous ID, we need to extricate it from
710 * Drop the TCPCB lock and lock the tree and the bucket.
711 * Because this is called in the socket context, we (theoretically)
712 * don't need to worry about the INPCB completely going away
715 if (tp->t_lib != NULL) {
717 TCPID_BUCKET_REF(tlb);
720 if (tree_locked == TREE_UNLOCKED) {
722 tree_locked = TREE_RLOCKED;
724 TCPID_BUCKET_LOCK(tlb);
725 bucket_locked = true;
729 * Unreference the bucket. If our bucket went away, it is no
730 * longer locked or valid.
732 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) {
733 bucket_locked = false;
737 /* Validate the INP. */
741 * Evaluate whether the bucket changed while we were unlocked.
743 * Possible scenarios here:
744 * 1. Bucket is unchanged and the same one we started with.
745 * 2. The TCPCB no longer has a bucket and our bucket was
747 * 3. The TCPCB has a new bucket, whether ours was freed.
748 * 4. The TCPCB no longer has a bucket and our bucket was
751 * In cases 2-4, we will start over. In case 1, we will
752 * proceed here to remove the bucket.
754 if (tlb == NULL || tp->t_lib != tlb) {
755 KASSERT(bucket_locked || tlb == NULL,
756 ("%s: bucket_locked (%d) and tlb (%p) are "
757 "inconsistent", __func__, bucket_locked, tlb));
760 TCPID_BUCKET_UNLOCK(tlb);
761 bucket_locked = false;
768 * Store the (struct tcp_log_id_node) for reuse. Then, remove
769 * it from the bucket. In the process, we may end up relocking.
770 * If so, we need to validate that the INP is still valid, and
771 * the TCPCB entries match we expect.
773 * We will clear tlb and change the bucket_locked state just
774 * before calling tcp_log_remove_id_node(), since that function
775 * will unlock the bucket.
778 uma_zfree(tcp_log_id_node_zone, tln);
781 bucket_locked = false;
782 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) {
786 * If the TCPCB moved to a new bucket while we had
787 * dropped the lock, restart.
789 if (tp->t_lib != NULL || tp->t_lin != NULL)
794 * Yay! We successfully removed the TCPCB from its old
797 * On to bigger and better things...
801 /* At this point, the TCPCB should not be in any bucket. */
802 KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__));
805 * If the new ID is not empty, we need to now assign this TCPCB to a
809 /* Get a new tln, if we don't already have one to reuse. */
811 tln = uma_zalloc(tcp_log_id_node_zone,
822 * Drop the INP lock for a bit. We don't need it, and dropping
823 * it prevents lock order reversals.
827 /* Make sure we have at least a read lock on the tree. */
828 tcp_log_id_validate_tree_lock(tree_locked);
829 if (tree_locked == TREE_UNLOCKED) {
831 tree_locked = TREE_RLOCKED;
836 * Remember that we constructed (struct tcp_log_id_node) so
837 * we can safely cast the id to it for the purposes of finding.
839 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
840 __func__, __LINE__));
841 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
842 (struct tcp_log_id_bucket *) id);
845 * If we didn't find a matching bucket, we need to add a new
846 * one. This requires a write lock. But, of course, we will
847 * need to recheck some things when we re-acquire the lock.
849 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) {
850 tree_locked = TREE_WLOCKED;
851 if (!TCPID_TREE_UPGRADE()) {
852 TCPID_TREE_RUNLOCK();
856 * The tree may have changed while we were
863 /* If we need to add a new bucket, do it now. */
864 if (tmp_tlb == NULL) {
865 /* Allocate new bucket. */
866 tlb = uma_zalloc(tcp_log_id_bucket_zone, M_NOWAIT);
871 counter_u64_add(tcp_log_pcb_ids_cur, 1);
872 counter_u64_add(tcp_log_pcb_ids_tot, 1);
874 if ((tcp_log_auto_all == false) &&
876 tcp_log_selectauto()) {
877 /* Save off the log state */
878 tlb->tlb_logstate = tcp_log_auto_mode;
880 tlb->tlb_logstate = TCP_LOG_STATE_OFF;
881 tlb->tlb_loglimit = 0;
882 tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */
885 * Copy the ID to the bucket.
886 * NB: Don't use strlcpy() unless you are sure
887 * we've always validated NULL termination.
889 * TODO: When I'm done writing this, see if we
890 * we have correctly validated NULL termination and
891 * can use strlcpy(). :-)
893 strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1);
894 tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0';
897 * Take the refcount for the first node and go ahead
898 * and lock this. Note that we zero the tlb_mtx
899 * structure, since 0xdeadc0de flips the right bits
900 * for the code to think that this mutex has already
901 * been initialized. :-(
903 SLIST_INIT(&tlb->tlb_head);
904 refcount_init(&tlb->tlb_refcnt, 1);
906 memset(&tlb->tlb_mtx, 0, sizeof(struct mtx));
907 TCPID_BUCKET_LOCK_INIT(tlb);
908 TCPID_BUCKET_LOCK(tlb);
909 bucket_locked = true;
911 #define FREE_NEW_TLB() do { \
912 TCPID_BUCKET_LOCK_DESTROY(tlb); \
913 uma_zfree(tcp_log_id_bucket_zone, tlb); \
914 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); \
915 counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1); \
916 bucket_locked = false; \
920 * Relock the INP and make sure we are still
924 RECHECK_INP_CLEAN(FREE_NEW_TLB());
925 if (tp->t_lib != NULL) {
930 /* Add the new bucket to the tree. */
931 tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head,
933 KASSERT(tmp_tlb == NULL,
934 ("%s: Unexpected conflicting bucket (%p) while "
935 "adding new bucket (%p)", __func__, tmp_tlb, tlb));
938 * If we found a conflicting bucket, free the new
939 * one we made and fall through to use the existing
942 if (tmp_tlb != NULL) {
949 /* If we found an existing bucket, use it. */
950 if (tmp_tlb != NULL) {
952 TCPID_BUCKET_LOCK(tlb);
953 bucket_locked = true;
956 * Relock the INP and make sure we are still
959 INP_UNLOCK_ASSERT(inp);
962 if (tp->t_lib != NULL) {
963 TCPID_BUCKET_UNLOCK(tlb);
964 bucket_locked = false;
969 /* Take a reference on the bucket. */
970 TCPID_BUCKET_REF(tlb);
972 /* Record the request. */
973 tcp_log_increment_reqcnt(tlb);
976 tcp_log_grow_tlb(tlb->tlb_id, tp);
978 /* Add the new node to the list. */
979 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list);
982 if (tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) {
983 /* Clone in any logging */
985 tp->_t_logstate = tp->t_lib->tlb_logstate;
987 if (tp->t_lib->tlb_loglimit) {
988 /* The loglimit too */
990 tp->t_loglimit = tp->t_lib->tlb_loglimit;
998 /* Unlock things, as needed, and return. */
1001 INP_UNLOCK_ASSERT(inp);
1002 if (bucket_locked) {
1003 TCPID_BUCKET_LOCK_ASSERT(tlb);
1004 TCPID_BUCKET_UNLOCK(tlb);
1005 } else if (tlb != NULL)
1006 TCPID_BUCKET_UNLOCK_ASSERT(tlb);
1007 if (tree_locked == TREE_WLOCKED) {
1008 TCPID_TREE_WLOCK_ASSERT();
1009 TCPID_TREE_WUNLOCK();
1010 } else if (tree_locked == TREE_RLOCKED) {
1011 TCPID_TREE_RLOCK_ASSERT();
1012 TCPID_TREE_RUNLOCK();
1014 TCPID_TREE_UNLOCK_ASSERT();
1016 uma_zfree(tcp_log_id_node_zone, tln);
1021 * Get the TCP log ID for a TCPCB.
1022 * Called with INPCB locked.
1023 * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long.
1024 * Returns number of bytes copied.
1027 tcp_log_get_id(struct tcpcb *tp, char *buf)
1031 INP_LOCK_ASSERT(tptoinpcb(tp));
1032 if (tp->t_lib != NULL) {
1033 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
1034 KASSERT(len < TCP_LOG_ID_LEN,
1035 ("%s:%d: tp->t_lib->tlb_id too long (%zu)",
1036 __func__, __LINE__, len));
1045 * Get the tag associated with the TCPCB's log ID.
1046 * Called with INPCB locked. Returns with it unlocked.
1047 * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long.
1048 * Returns number of bytes copied.
1051 tcp_log_get_tag(struct tcpcb *tp, char *buf)
1053 struct inpcb *inp = tptoinpcb(tp);
1054 struct tcp_log_id_bucket *tlb;
1058 INP_WLOCK_ASSERT(inp);
1060 tree_locked = TREE_UNLOCKED;
1064 TCPID_BUCKET_REF(tlb);
1066 TCPID_BUCKET_LOCK(tlb);
1067 len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN);
1068 KASSERT(len < TCP_LOG_TAG_LEN,
1069 ("%s:%d: tp->t_lib->tlb_tag too long (%zu)",
1070 __func__, __LINE__, len));
1071 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
1072 TCPID_BUCKET_UNLOCK(tlb);
1074 if (tree_locked == TREE_WLOCKED) {
1075 TCPID_TREE_WLOCK_ASSERT();
1076 TCPID_TREE_WUNLOCK();
1077 } else if (tree_locked == TREE_RLOCKED) {
1078 TCPID_TREE_RLOCK_ASSERT();
1079 TCPID_TREE_RUNLOCK();
1081 TCPID_TREE_UNLOCK_ASSERT();
1092 * Get number of connections with the same log ID.
1093 * Log ID is taken from given TCPCB.
1094 * Called with INPCB locked.
1097 tcp_log_get_id_cnt(struct tcpcb *tp)
1100 INP_WLOCK_ASSERT(tptoinpcb(tp));
1101 return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
1104 #ifdef TCPLOG_DEBUG_RINGBUF
1106 * Functions/macros to increment/decrement reference count for a log
1107 * entry. This should catch when we do a double-free/double-remove or
1111 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func,
1116 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1);
1118 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)",
1119 func, line, log_entry, refcnt);
1121 #define tcp_log_entry_refcnt_add(l) \
1122 _tcp_log_entry_refcnt_add((l), __func__, __LINE__)
1125 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func,
1130 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1);
1132 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)",
1133 func, line, log_entry, refcnt);
1135 #define tcp_log_entry_refcnt_rem(l) \
1136 _tcp_log_entry_refcnt_rem((l), __func__, __LINE__)
1138 #else /* !TCPLOG_DEBUG_RINGBUF */
1140 #define tcp_log_entry_refcnt_add(l)
1141 #define tcp_log_entry_refcnt_rem(l)
1146 * Cleanup after removing a log entry, but only decrement the count if we
1147 * are running INVARIANTS.
1150 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused)
1153 uma_zfree(tcp_log_zone, log_entry);
1156 KASSERT(*count >= 0,
1157 ("%s: count unexpectedly negative", __func__));
1162 tcp_log_free_entries(struct tcp_log_stailq *head, int *count)
1164 struct tcp_log_mem *log_entry;
1166 /* Free the entries. */
1167 while ((log_entry = STAILQ_FIRST(head)) != NULL) {
1168 STAILQ_REMOVE_HEAD(head, tlm_queue);
1169 tcp_log_entry_refcnt_rem(log_entry);
1170 tcp_log_free_log_common(log_entry, count);
1174 /* Cleanup after removing a log entry. */
1176 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1178 uma_zfree(tcp_log_zone, log_entry);
1180 KASSERT(tp->t_lognum >= 0,
1181 ("%s: tp->t_lognum unexpectedly negative", __func__));
1184 /* Remove a log entry from the head of a list. */
1186 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1189 KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs),
1190 ("%s: attempt to remove non-HEAD log entry", __func__));
1191 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1192 tcp_log_entry_refcnt_rem(log_entry);
1193 tcp_log_remove_log_cleanup(tp, log_entry);
1196 #ifdef TCPLOG_DEBUG_RINGBUF
1198 * Initialize the log entry's reference count, which we want to
1199 * survive allocations.
1202 tcp_log_zone_init(void *mem, int size, int flags __unused)
1204 struct tcp_log_mem *tlm;
1206 KASSERT(size >= sizeof(struct tcp_log_mem),
1207 ("%s: unexpectedly short (%d) allocation", __func__, size));
1208 tlm = (struct tcp_log_mem *)mem;
1209 tlm->tlm_refcnt = 0;
1214 * Double check that the refcnt is zero on allocation and return.
1217 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused)
1219 struct tcp_log_mem *tlm;
1221 KASSERT(size >= sizeof(struct tcp_log_mem),
1222 ("%s: unexpectedly short (%d) allocation", __func__, size));
1223 tlm = (struct tcp_log_mem *)mem;
1224 if (tlm->tlm_refcnt != 0)
1225 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1226 __func__, __LINE__, tlm, tlm->tlm_refcnt);
1231 tcp_log_zone_dtor(void *mem, int size, void *args __unused)
1233 struct tcp_log_mem *tlm;
1235 KASSERT(size >= sizeof(struct tcp_log_mem),
1236 ("%s: unexpectedly short (%d) allocation", __func__, size));
1237 tlm = (struct tcp_log_mem *)mem;
1238 if (tlm->tlm_refcnt != 0)
1239 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1240 __func__, __LINE__, tlm, tlm->tlm_refcnt);
1242 #endif /* TCPLOG_DEBUG_RINGBUF */
1244 /* Do global initialization. */
1249 tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem),
1250 #ifdef TCPLOG_DEBUG_RINGBUF
1251 tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init,
1255 NULL, UMA_ALIGN_PTR, 0);
1256 (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT);
1257 tcp_log_id_bucket_zone = uma_zcreate("tcp_log_id_bucket",
1258 sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL,
1260 tcp_log_id_node_zone = uma_zcreate("tcp_log_id_node",
1261 sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL,
1263 #ifdef TCPLOG_DEBUG_COUNTERS
1264 tcp_log_queued = counter_u64_alloc(M_WAITOK);
1265 tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK);
1266 tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK);
1267 tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK);
1268 tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK);
1269 tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK);
1270 tcp_log_que_copyout = counter_u64_alloc(M_WAITOK);
1271 tcp_log_que_read = counter_u64_alloc(M_WAITOK);
1272 tcp_log_que_freed = counter_u64_alloc(M_WAITOK);
1274 tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK);
1275 tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK);
1277 rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW);
1278 mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF);
1279 callout_init(&tcp_log_expireq_callout, 1);
1282 /* Do per-TCPCB initialization. */
1284 tcp_log_tcpcbinit(struct tcpcb *tp)
1287 /* A new TCPCB should start out zero-initialized. */
1288 STAILQ_INIT(&tp->t_logs);
1291 * If we are doing auto-capturing, figure out whether we will capture
1294 tp->t_loglimit = tcp_log_session_limit;
1295 if ((tcp_log_auto_all == true) &&
1296 tcp_log_auto_mode &&
1297 tcp_log_selectauto()) {
1298 tp->_t_logstate = tcp_log_auto_mode;
1299 tp->t_flags2 |= TF2_LOG_AUTO;
1303 /* Remove entries */
1305 tcp_log_expire(void *unused __unused)
1307 struct tcp_log_id_bucket *tlb;
1308 struct tcp_log_id_node *tln;
1309 sbintime_t expiry_limit;
1312 TCPLOG_EXPIREQ_LOCK();
1313 if (callout_pending(&tcp_log_expireq_callout)) {
1314 /* Callout was reset. */
1315 TCPLOG_EXPIREQ_UNLOCK();
1320 * Process entries until we reach one that expires too far in the
1321 * future. Look one second in the future.
1323 expiry_limit = getsbinuptime() + SBT_1S;
1324 tree_locked = TREE_UNLOCKED;
1326 while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL &&
1327 tln->tln_expiretime <= expiry_limit) {
1328 if (!callout_active(&tcp_log_expireq_callout)) {
1330 * Callout was stopped. I guess we should
1331 * just quit at this point.
1333 TCPLOG_EXPIREQ_UNLOCK();
1338 * Remove the node from the head of the list and unlock
1339 * the list. Change the expiry time to SBT_MAX as a signal
1340 * to other threads that we now own this.
1342 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq);
1343 tln->tln_expiretime = SBT_MAX;
1344 TCPLOG_EXPIREQ_UNLOCK();
1347 * Remove the node from the bucket.
1349 tlb = tln->tln_bucket;
1350 TCPID_BUCKET_LOCK(tlb);
1351 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) {
1352 tcp_log_id_validate_tree_lock(tree_locked);
1353 if (tree_locked == TREE_WLOCKED)
1354 TCPID_TREE_WUNLOCK();
1356 TCPID_TREE_RUNLOCK();
1357 tree_locked = TREE_UNLOCKED;
1360 /* Drop the INP reference. */
1361 INP_WLOCK(tln->tln_inp);
1362 if (!in_pcbrele_wlocked(tln->tln_inp))
1363 INP_WUNLOCK(tln->tln_inp);
1365 /* Free the log records. */
1366 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count);
1368 /* Free the node. */
1369 uma_zfree(tcp_log_id_node_zone, tln);
1371 /* Relock the expiry queue. */
1372 TCPLOG_EXPIREQ_LOCK();
1376 * We've expired all the entries we can. Do we need to reschedule
1379 callout_deactivate(&tcp_log_expireq_callout);
1382 * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and
1383 * set the next callout to that. (This helps ensure we generally
1384 * run the callout no more often than desired.)
1386 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL;
1387 if (expiry_limit < tln->tln_expiretime)
1388 expiry_limit = tln->tln_expiretime;
1389 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit,
1390 SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE);
1394 TCPLOG_EXPIREQ_UNLOCK();
1399 * Move log data from the TCPCB to a new node. This will reset the TCPCB log
1400 * entries and log count; however, it will not touch other things from the
1401 * TCPCB (e.g. t_lin, t_lib).
1403 * NOTE: Must hold a lock on the INP.
1406 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
1408 struct inpcb *inp = tptoinpcb(tp);
1410 INP_WLOCK_ASSERT(inp);
1412 tln->tln_ie = inp->inp_inc.inc_ie;
1413 if (inp->inp_inc.inc_flags & INC_ISIPV6)
1414 tln->tln_af = AF_INET6;
1416 tln->tln_af = AF_INET;
1417 tln->tln_entries = tp->t_logs;
1418 tln->tln_count = tp->t_lognum;
1419 tln->tln_bucket = tp->t_lib;
1421 /* Clear information from the PCB. */
1422 STAILQ_INIT(&tp->t_logs);
1426 /* Do per-TCPCB cleanup */
1428 tcp_log_tcpcbfini(struct tcpcb *tp)
1430 struct tcp_log_id_node *tln, *tln_first;
1431 struct tcp_log_mem *log_entry;
1432 sbintime_t callouttime;
1435 INP_WLOCK_ASSERT(tptoinpcb(tp));
1436 if (tp->_t_logstate) {
1437 union tcp_log_stackspecific log;
1439 #ifdef TCP_ACCOUNTING
1440 struct tcp_log_buffer *lgb;
1443 memset(&log, 0, sizeof(log));
1444 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
1445 for (i = 0; i < TCP_NUM_CNT_COUNTERS; i++) {
1446 log.u_raw.u64_flex[i] = tp->tcp_cnt_counters[i];
1448 lgb = tcp_log_event(tp, NULL,
1451 TCP_LOG_ACCOUNTING, 0,
1452 0, &log, false, NULL, NULL, 0, &tv);
1454 lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS;
1458 for (i = 0; i<TCP_NUM_CNT_COUNTERS; i++) {
1459 log.u_raw.u64_flex[i] = tp->tcp_proc_time[i];
1461 lgb = tcp_log_event(tp, NULL,
1464 TCP_LOG_ACCOUNTING, 0,
1465 0, &log, false, NULL, NULL, 0, &tv);
1467 lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS;
1473 log.u_bbr.timeStamp = tcp_get_usecs(&tv);
1474 log.u_bbr.cur_del_rate = tp->t_end_info;
1475 (void)tcp_log_event(tp, NULL,
1479 0, &log, false, NULL, NULL, 0, &tv);
1482 * If we were gathering packets to be automatically dumped, try to do
1483 * it now. If this succeeds, the log information in the TCPCB will be
1484 * cleared. Otherwise, we'll handle the log information as we do
1487 switch(tp->_t_logstate) {
1488 case TCP_LOG_STATE_HEAD_AUTO:
1489 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1492 case TCP_LOG_STATE_TAIL_AUTO:
1493 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail",
1496 case TCP_LOG_VIA_BBPOINTS:
1497 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints",
1500 case TCP_LOG_STATE_CONTINUAL:
1501 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1507 * There are two ways we could keep logs: per-socket or per-ID. If
1508 * we are tracking logs with an ID, then the logs survive the
1509 * destruction of the TCPCB.
1511 * If the TCPCB is associated with an ID node, move the logs from the
1512 * TCPCB to the ID node. In theory, this is safe, for reasons which I
1513 * will now explain for my own benefit when I next need to figure out
1516 * We own the INP lock. Therefore, no one else can change the contents
1517 * of this node (Rule C). Further, no one can remove this node from
1518 * the bucket while we hold the lock (Rule D). Basically, no one can
1519 * mess with this node. That leaves two states in which we could be:
1521 * 1. Another thread is currently waiting to acquire the INP lock, with
1522 * plans to do something with this node. When we drop the INP lock,
1523 * they will have a chance to do that. They will recheck the
1524 * tln_closed field (see note to Rule C) and then acquire the
1525 * bucket lock before proceeding further.
1527 * 2. Another thread will try to acquire a lock at some point in the
1528 * future. If they try to acquire a lock before we set the
1529 * tln_closed field, they will follow state #1. If they try to
1530 * acquire a lock after we set the tln_closed field, they will be
1531 * able to make changes to the node, at will, following Rule C.
1533 * Therefore, we currently own this node and can make any changes
1534 * we want. But, as soon as we set the tln_closed field to true, we
1535 * have effectively dropped our lock on the node. (For this reason, we
1536 * also need to make sure our writes are ordered correctly. An atomic
1537 * operation with "release" semantics should be sufficient.)
1540 if (tp->t_lin != NULL) {
1541 struct inpcb *inp = tptoinpcb(tp);
1543 /* Copy the relevant information to the log entry. */
1545 KASSERT(tln->tln_inp == inp,
1546 ("%s: Mismatched inp (tln->tln_inp=%p, tp inpcb=%p)",
1547 __func__, tln->tln_inp, inp));
1548 tcp_log_move_tp_to_node(tp, tln);
1550 /* Clear information from the PCB. */
1555 * Take a reference on the INP. This ensures that the INP
1556 * remains valid while the node is on the expiry queue. This
1557 * ensures the INP is valid for other threads that may be
1558 * racing to lock this node when we move it to the expire
1564 * Store the entry on the expiry list. The exact behavior
1565 * depends on whether we have entries to keep. If so, we
1566 * put the entry at the tail of the list and expire in
1567 * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put
1568 * the entry at the head of the list. (Handling the cleanup
1569 * via the expiry timer lets us avoid locking messy-ness here.)
1571 tln->tln_expiretime = getsbinuptime();
1572 TCPLOG_EXPIREQ_LOCK();
1573 if (tln->tln_count) {
1574 tln->tln_expiretime += TCP_LOG_EXPIRE_TIME;
1575 if (STAILQ_EMPTY(&tcp_log_expireq_head) &&
1576 !callout_active(&tcp_log_expireq_callout)) {
1578 * We are adding the first entry and a callout
1579 * is not currently scheduled; therefore, we
1580 * need to schedule one.
1582 callout_reset_sbt(&tcp_log_expireq_callout,
1583 tln->tln_expiretime, SBT_1S, tcp_log_expire,
1586 STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln,
1589 callouttime = tln->tln_expiretime +
1590 TCP_LOG_EXPIRE_INTVL;
1591 tln_first = STAILQ_FIRST(&tcp_log_expireq_head);
1593 if ((tln_first == NULL ||
1594 callouttime < tln_first->tln_expiretime) &&
1595 (callout_pending(&tcp_log_expireq_callout) ||
1596 !callout_active(&tcp_log_expireq_callout))) {
1598 * The list is empty, or we want to run the
1599 * expire code before the first entry's timer
1600 * fires. Also, we are in a case where a callout
1601 * is not actively running. We want to reset
1602 * the callout to occur sooner.
1604 callout_reset_sbt(&tcp_log_expireq_callout,
1605 callouttime, SBT_1S, tcp_log_expire, NULL,
1610 * Insert to the head, or just after the head, as
1611 * appropriate. (This might result in small
1612 * mis-orderings as a bunch of "expire now" entries
1613 * gather at the start of the list, but that should
1614 * not produce big problems, since the expire timer
1615 * will walk through all of them.)
1617 if (tln_first == NULL ||
1618 tln->tln_expiretime < tln_first->tln_expiretime)
1619 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln,
1622 STAILQ_INSERT_AFTER(&tcp_log_expireq_head,
1623 tln_first, tln, tln_expireq);
1625 TCPLOG_EXPIREQ_UNLOCK();
1628 * We are done messing with the tln. After this point, we
1629 * can't touch it. (Note that the "release" semantics should
1630 * be included with the TCPLOG_EXPIREQ_UNLOCK() call above.
1631 * Therefore, they should be unnecessary here. However, it
1632 * seems like a good idea to include them anyway, since we
1633 * really are releasing a lock here.)
1635 atomic_store_rel_int(&tln->tln_closed, 1);
1637 /* Remove log entries. */
1638 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1639 tcp_log_remove_log_head(tp, log_entry);
1640 KASSERT(tp->t_lognum == 0,
1641 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1642 __func__, tp->t_lognum));
1646 * Change the log state to off (just in case anything tries to sneak
1647 * in a last-minute log).
1649 tp->_t_logstate = TCP_LOG_STATE_OFF;
1653 tcp_log_purge_tp_logbuf(struct tcpcb *tp)
1655 struct tcp_log_mem *log_entry;
1657 INP_WLOCK_ASSERT(tptoinpcb(tp));
1658 if (tp->t_lognum == 0)
1661 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1662 tcp_log_remove_log_head(tp, log_entry);
1663 KASSERT(tp->t_lognum == 0,
1664 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1665 __func__, tp->t_lognum));
1666 tp->_t_logstate = TCP_LOG_STATE_OFF;
1670 * This logs an event for a TCP socket. Normally, this is called via
1671 * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for
1675 struct tcp_log_buffer *
1676 tcp_log_event(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
1677 struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len,
1678 union tcp_log_stackspecific *stackinfo, int th_hostorder,
1679 const char *output_caller, const char *func, int line, const struct timeval *itv)
1681 struct tcp_log_mem *log_entry;
1682 struct tcp_log_buffer *log_buf;
1683 int attempt_count = 0;
1684 struct tcp_log_verbose *log_verbose;
1687 KASSERT((func == NULL && line == 0) || (func != NULL && line > 0),
1688 ("%s called with inconsistent func (%p) and line (%d) arguments",
1689 __func__, func, line));
1691 INP_WLOCK_ASSERT(tptoinpcb(tp));
1692 if (tcp_disable_all_bb_logs) {
1694 * The global shutdown logging
1695 * switch has been thrown. Call
1696 * the purge function that frees
1697 * purges out the logs and
1698 * turns off logging.
1700 tcp_log_purge_tp_logbuf(tp);
1703 KASSERT(tp->_t_logstate == TCP_LOG_STATE_HEAD ||
1704 tp->_t_logstate == TCP_LOG_STATE_TAIL ||
1705 tp->_t_logstate == TCP_LOG_STATE_CONTINUAL ||
1706 tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO ||
1707 tp->_t_logstate == TCP_LOG_VIA_BBPOINTS ||
1708 tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO,
1709 ("%s called with unexpected tp->_t_logstate (%d)", __func__,
1713 * Get the serial number. We do this early so it will
1714 * increment even if we end up skipping the log entry for some
1717 logsn = tp->t_logsn++;
1720 * Can we get a new log entry? If so, increment the lognum counter
1724 if (tp->t_lognum < tp->t_loglimit) {
1725 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL)
1730 /* Do we need to try to reuse? */
1731 if (log_entry == NULL) {
1733 * Sacrifice auto-logged sessions without a log ID if
1734 * tcp_log_auto_all is false. (If they don't have a log
1735 * ID by now, it is probable that either they won't get one
1736 * or we are resource-constrained.)
1738 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1739 !tcp_log_auto_all) {
1740 if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) {
1742 panic("%s:%d: tcp_log_state_change() failed "
1743 "to set tp %p to TCP_LOG_STATE_CLEAR",
1744 __func__, __LINE__, tp);
1746 tp->_t_logstate = TCP_LOG_STATE_OFF;
1751 * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump
1752 * the buffers. If successful, deactivate tracing. Otherwise,
1753 * leave it active so we will retry.
1755 if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO &&
1756 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1758 tp->_t_logstate = TCP_LOG_STATE_OFF;
1760 } else if ((tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) &&
1761 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1763 if (attempt_count == 0) {
1767 #ifdef TCPLOG_DEBUG_COUNTERS
1768 counter_u64_add(tcp_log_que_fail4, 1);
1772 } else if ((tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) &&
1773 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints",
1775 if (attempt_count == 0) {
1779 #ifdef TCPLOG_DEBUG_COUNTERS
1780 counter_u64_add(tcp_log_que_fail4, 1);
1783 } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO)
1786 /* If in HEAD state, just deactivate the tracing and return. */
1787 if (tp->_t_logstate == TCP_LOG_STATE_HEAD) {
1788 tp->_t_logstate = TCP_LOG_STATE_OFF;
1792 * Get a buffer to reuse. If that fails, just give up.
1793 * (We can't log anything without a buffer in which to
1796 * Note that we don't change the t_lognum counter
1797 * here. Because we are re-using the buffer, the total
1798 * number won't change.
1800 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL)
1802 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1803 tcp_log_entry_refcnt_rem(log_entry);
1806 KASSERT(log_entry != NULL,
1807 ("%s: log_entry unexpectedly NULL", __func__));
1809 /* Extract the log buffer and verbose buffer pointers. */
1810 log_buf = &log_entry->tlm_buf;
1811 log_verbose = &log_entry->tlm_v;
1813 /* Basic entries. */
1815 microuptime(&log_buf->tlb_tv);
1817 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval));
1818 log_buf->tlb_ticks = ticks;
1819 log_buf->tlb_sn = logsn;
1820 log_buf->tlb_stackid = tp->t_fb->tfb_id;
1821 log_buf->tlb_eventid = eventid;
1822 log_buf->tlb_eventflags = 0;
1823 log_buf->tlb_errno = errornum;
1825 /* Socket buffers */
1826 if (rxbuf != NULL) {
1827 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF;
1828 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc;
1829 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc;
1830 log_buf->tlb_rxbuf.tls_sb_spare = 0;
1832 log_buf->tlb_rxbuf.tls_sb_acc = 0;
1833 log_buf->tlb_rxbuf.tls_sb_ccc = 0;
1835 if (txbuf != NULL) {
1836 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF;
1837 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc;
1838 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc;
1839 log_buf->tlb_txbuf.tls_sb_spare = 0;
1841 log_buf->tlb_txbuf.tls_sb_acc = 0;
1842 log_buf->tlb_txbuf.tls_sb_ccc = 0;
1844 /* Copy values from tp to the log entry. */
1845 #define COPY_STAT(f) log_buf->tlb_ ## f = tp->f
1846 #define COPY_STAT_T(f) log_buf->tlb_ ## f = tp->t_ ## f
1848 COPY_STAT_T(starttime);
1853 COPY_STAT(snd_cwnd);
1855 COPY_STAT(snd_recover);
1857 COPY_STAT(snd_ssthresh);
1859 COPY_STAT_T(rttvar);
1864 COPY_STAT_T(dupacks);
1865 COPY_STAT_T(segqlen);
1866 COPY_STAT(snd_numholes);
1867 COPY_STAT(snd_scale);
1868 COPY_STAT(rcv_scale);
1869 COPY_STAT_T(flags2);
1870 COPY_STAT_T(fbyte_in);
1871 COPY_STAT_T(fbyte_out);
1874 /* Copy stack-specific info. */
1875 if (stackinfo != NULL) {
1876 memcpy(&log_buf->tlb_stackinfo, stackinfo,
1877 sizeof(log_buf->tlb_stackinfo));
1878 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO;
1882 log_buf->tlb_len = len;
1886 log_buf->tlb_eventflags |= TLB_FLAG_HDR;
1887 log_buf->tlb_th = *th;
1889 tcp_fields_to_net(&log_buf->tlb_th);
1890 optlen = (th->th_off << 2) - sizeof (struct tcphdr);
1892 memcpy(log_buf->tlb_opts, th + 1, optlen);
1894 memset(&log_buf->tlb_th, 0, sizeof(*th));
1897 /* Verbose information */
1899 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE;
1900 if (output_caller != NULL)
1901 strlcpy(log_verbose->tlv_snd_frm, output_caller,
1904 *log_verbose->tlv_snd_frm = 0;
1905 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN);
1906 log_verbose->tlv_trace_line = line;
1909 /* Insert the new log at the tail. */
1910 STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue);
1911 tcp_log_entry_refcnt_add(log_entry);
1916 * Change the logging state for a TCPCB. Returns 0 on success or an
1917 * error code on failure.
1920 tcp_log_state_change(struct tcpcb *tp, int state)
1922 struct tcp_log_mem *log_entry;
1925 INP_WLOCK_ASSERT(tptoinpcb(tp));
1928 case TCP_LOG_STATE_CLEAR:
1929 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1930 tcp_log_remove_log_head(tp, log_entry);
1933 case TCP_LOG_STATE_OFF:
1934 tp->_t_logstate = TCP_LOG_STATE_OFF;
1937 case TCP_LOG_STATE_TAIL:
1938 case TCP_LOG_STATE_HEAD:
1939 case TCP_LOG_STATE_CONTINUAL:
1940 case TCP_LOG_VIA_BBPOINTS:
1941 case TCP_LOG_STATE_HEAD_AUTO:
1942 case TCP_LOG_STATE_TAIL_AUTO:
1944 * When the RATIO_OFF state is set for the bucket, the log ID
1945 * this tp is associated with has been probabilistically opted
1946 * out of logging per tcp_log_apply_ratio().
1948 if (tp->t_lib == NULL ||
1949 tp->t_lib->tlb_logstate != TCP_LOG_STATE_RATIO_OFF) {
1950 tp->_t_logstate = state;
1953 tp->_t_logstate = TCP_LOG_STATE_OFF;
1960 if (tcp_disable_all_bb_logs) {
1961 /* We are prohibited from doing any logs */
1962 tp->_t_logstate = TCP_LOG_STATE_OFF;
1965 tp->t_flags2 &= ~(TF2_LOG_AUTO);
1970 /* If tcp_drain() is called, flush half the log entries. */
1972 tcp_log_drain(struct tcpcb *tp)
1974 struct tcp_log_mem *log_entry, *next;
1977 INP_WLOCK_ASSERT(tptoinpcb(tp));
1978 if ((target = tp->t_lognum / 2) == 0)
1982 * XXXRRS: At this I don't think this is wise that
1983 * we do this. All that a drain call means is that
1984 * we are hitting one of the system mbuf limits. BB
1985 * logging, or freeing of them, will not create any
1986 * more mbufs and really has nothing to do with
1987 * the system running out of mbufs. For now I
1988 * am changing this to free any "AUTO" by dumping
1989 * them out. But this should either be changed
1990 * so that it gets called when we hit the BB limit
1991 * or it should just not get called (one of the two)
1992 * since I don't think the mbuf <-> BB log cleanup
1993 * is the right thing to do here.
1996 * If we are logging the "head" packets, we want to discard
1997 * from the tail of the queue. Otherwise, we want to discard
2000 if (tp->_t_logstate == TCP_LOG_STATE_HEAD) {
2001 skip = tp->t_lognum - target;
2002 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue)
2005 KASSERT(log_entry != NULL,
2006 ("%s: skipped through all entries!", __func__));
2007 if (log_entry == NULL)
2009 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) {
2010 STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue);
2011 tcp_log_entry_refcnt_rem(next);
2012 tcp_log_remove_log_cleanup(tp, next);
2017 KASSERT(target == 0,
2018 ("%s: After removing from tail, target was %d", __func__,
2020 } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO) {
2021 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at drain",
2023 } else if (tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO) {
2024 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at drain",
2026 } else if (tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) {
2027 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints",
2029 } else if (tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) {
2030 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
2033 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL &&
2035 tcp_log_remove_log_head(tp, log_entry);
2036 KASSERT(target <= 0,
2037 ("%s: After removing from head, target was %d", __func__,
2039 KASSERT(tp->t_lognum > 0,
2040 ("%s: After removing from head, tp->t_lognum was %d",
2042 KASSERT(log_entry != NULL,
2043 ("%s: After removing from head, the tailq was empty",
2049 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len)
2052 if (sopt->sopt_td != NULL)
2053 return (copyout(src, dst, len));
2054 bcopy(src, dst, len);
2059 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp,
2060 struct tcp_log_buffer **end, int count)
2062 struct tcp_log_buffer *out_entry;
2063 struct tcp_log_mem *log_entry;
2067 int orig_count = count;
2070 /* Copy the data out. */
2072 out_entry = (struct tcp_log_buffer *) sopt->sopt_val;
2073 STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) {
2076 ("%s:%d: Exceeded expected count (%d) processing list %p",
2077 __func__, __LINE__, orig_count, log_tailqp));
2079 #ifdef TCPLOG_DEBUG_COUNTERS
2080 counter_u64_add(tcp_log_que_copyout, 1);
2084 * Skip copying out the header if it isn't present.
2085 * Instead, copy out zeros (to ensure we don't leak info).
2086 * TODO: Make sure we truly do zero everything we don't
2089 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)
2090 entrysize = sizeof(struct tcp_log_buffer);
2092 entrysize = offsetof(struct tcp_log_buffer, tlb_th);
2093 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry,
2097 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) {
2098 error = tcp_log_copyout(sopt, zerobuf,
2099 ((uint8_t *)out_entry) + entrysize,
2100 sizeof(struct tcp_log_buffer) - entrysize);
2104 * Copy out the verbose bit, if needed. Either way,
2105 * increment the output pointer the correct amount.
2107 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) {
2108 error = tcp_log_copyout(sopt, &log_entry->tlm_v,
2109 out_entry->tlb_verbose,
2110 sizeof(struct tcp_log_verbose));
2113 out_entry = (struct tcp_log_buffer *)
2114 (((uint8_t *) (out_entry + 1)) +
2115 sizeof(struct tcp_log_verbose));
2120 KASSERT(error || count == 0,
2121 ("%s:%d: Less than expected count (%d) processing list %p"
2122 " (%d remain)", __func__, __LINE__, orig_count,
2123 log_tailqp, count));
2129 * Copy out the buffer. Note that we do incremental copying, so
2130 * sooptcopyout() won't work. However, the goal is to produce the same
2131 * end result as if we copied in the entire user buffer, updated it,
2132 * and then used sooptcopyout() to copy it out.
2134 * NOTE: This should be called with a write lock on the PCB; however,
2135 * the function will drop it after it extracts the data from the TCPCB.
2138 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
2140 struct tcp_log_stailq log_tailq;
2141 struct tcp_log_mem *log_entry, *log_next;
2142 struct tcp_log_buffer *out_entry;
2143 struct inpcb *inp = tptoinpcb(tp);
2144 size_t outsize, entrysize;
2147 INP_WLOCK_ASSERT(inp);
2150 * Determine which log entries will fit in the buffer. As an
2151 * optimization, skip this if all the entries will clearly fit
2152 * in the buffer. (However, get an exact size if we are using
2156 if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) +
2157 sizeof(struct tcp_log_verbose)) >= tp->t_lognum) {
2158 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue);
2161 outnum = tp->t_lognum;
2164 outsize = outnum = 0;
2166 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) {
2167 entrysize = sizeof(struct tcp_log_buffer);
2168 if (log_next->tlm_buf.tlb_eventflags &
2170 entrysize += sizeof(struct tcp_log_verbose);
2171 if ((sopt->sopt_valsize - outsize) < entrysize)
2173 outsize += entrysize;
2175 log_entry = log_next;
2177 KASSERT(outsize <= sopt->sopt_valsize,
2178 ("%s: calculated output size (%zu) greater than available"
2179 "space (%zu)", __func__, outsize, sopt->sopt_valsize));
2185 * Copy traditional sooptcopyout() behavior: if sopt->sopt_val
2186 * is NULL, silently skip the copy. However, in this case, we
2187 * will leave the list alone and return. Functionally, this
2188 * gives userspace a way to poll for an approximate buffer
2189 * size they will need to get the log entries.
2191 if (sopt->sopt_val == NULL) {
2194 outsize = outnum * (sizeof(struct tcp_log_buffer) +
2195 sizeof(struct tcp_log_verbose));
2197 if (sopt->sopt_valsize > outsize)
2198 sopt->sopt_valsize = outsize;
2203 * Break apart the list. We'll save the ones we want to copy
2204 * out locally and remove them from the TCPCB list. We can
2205 * then drop the INPCB lock while we do the copyout.
2207 * There are roughly three cases:
2208 * 1. There was nothing to copy out. That's easy: drop the
2210 * 2. We are copying out the entire list. Again, that's easy:
2211 * move the whole list.
2212 * 3. We are copying out a partial list. That's harder. We
2213 * need to update the list book-keeping entries.
2215 if (log_entry != NULL && log_next == NULL) {
2216 /* Move entire list. */
2217 KASSERT(outnum == tp->t_lognum,
2218 ("%s:%d: outnum (%d) should match tp->t_lognum (%d)",
2219 __func__, __LINE__, outnum, tp->t_lognum));
2220 log_tailq = tp->t_logs;
2222 STAILQ_INIT(&tp->t_logs);
2223 } else if (log_entry != NULL) {
2224 /* Move partial list. */
2225 KASSERT(outnum < tp->t_lognum,
2226 ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)",
2227 __func__, __LINE__, outnum, tp->t_lognum));
2228 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs);
2229 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue);
2230 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL,
2231 ("%s:%d: tp->t_logs is unexpectedly shorter than expected"
2232 "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)",
2233 __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum));
2234 STAILQ_NEXT(log_entry, tlm_queue) = NULL;
2235 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue);
2236 tp->t_lognum -= outnum;
2238 STAILQ_INIT(&log_tailq);
2240 /* Drop the PCB lock. */
2243 /* Copy the data out. */
2244 error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum);
2249 if ((inp->inp_flags & INP_DROPPED) == 0) {
2250 tp = intotcpcb(inp);
2252 /* Merge the two lists. */
2253 STAILQ_CONCAT(&log_tailq, &tp->t_logs);
2254 tp->t_logs = log_tailq;
2255 tp->t_lognum += outnum;
2259 /* Sanity check entries */
2260 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) ==
2261 outsize, ("%s: Actual output size (%zu) != "
2262 "calculated output size (%zu)", __func__,
2263 (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val),
2266 /* Free the entries we just copied out. */
2267 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) {
2268 tcp_log_entry_refcnt_rem(log_entry);
2269 uma_zfree(tcp_log_zone, log_entry);
2273 sopt->sopt_valsize = (size_t)((caddr_t)out_entry -
2274 (caddr_t)sopt->sopt_val);
2279 tcp_log_free_queue(struct tcp_log_dev_queue *param)
2281 struct tcp_log_dev_log_queue *entry;
2283 KASSERT(param != NULL, ("%s: called with NULL param", __func__));
2287 entry = (struct tcp_log_dev_log_queue *)param;
2289 /* Free the entries. */
2290 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2292 /* Free the buffer, if it is allocated. */
2293 if (entry->tldl_common.tldq_buf != NULL)
2294 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV);
2296 /* Free the queue entry. */
2297 free(entry, M_TCPLOGDEV);
2300 static struct tcp_log_common_header *
2301 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
2303 struct tcp_log_dev_log_queue *entry;
2304 struct tcp_log_header *hdr;
2306 struct sockopt sopt;
2309 entry = (struct tcp_log_dev_log_queue *)param;
2311 /* Take a worst-case guess at space needs. */
2312 sopt.sopt_valsize = sizeof(struct tcp_log_header) +
2313 entry->tldl_count * (sizeof(struct tcp_log_buffer) +
2314 sizeof(struct tcp_log_verbose));
2315 hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT);
2317 #ifdef TCPLOG_DEBUG_COUNTERS
2318 counter_u64_add(tcp_log_que_fail5, entry->tldl_count);
2322 sopt.sopt_val = hdr + 1;
2323 sopt.sopt_valsize -= sizeof(struct tcp_log_header);
2324 sopt.sopt_td = NULL;
2326 error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
2327 (struct tcp_log_buffer **)&end, entry->tldl_count);
2329 free(hdr, M_TCPLOGDEV);
2333 /* Free the entries. */
2334 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2335 entry->tldl_count = 0;
2337 memset(hdr, 0, sizeof(struct tcp_log_header));
2338 hdr->tlh_version = TCP_LOG_BUF_VER;
2339 hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR;
2340 hdr->tlh_length = end - (uint8_t *)hdr;
2341 hdr->tlh_ie = entry->tldl_ie;
2342 hdr->tlh_af = entry->tldl_af;
2343 getboottime(&hdr->tlh_offset);
2344 strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN);
2345 strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN);
2346 strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN);
2347 return ((struct tcp_log_common_header *)hdr);
2351 * Queue the tcpcb's log buffer for transmission via the log buffer facility.
2353 * NOTE: This should be called with a write lock on the PCB.
2355 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2356 * and reacquire the INP lock if it needs to do so.
2358 * If force is false, this will only dump auto-logged sessions if
2359 * tcp_log_auto_all is true or if there is a log ID defined for the session.
2362 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
2364 struct tcp_log_dev_log_queue *entry;
2365 struct inpcb *inp = tptoinpcb(tp);
2366 #ifdef TCPLOG_DEBUG_COUNTERS
2370 INP_WLOCK_ASSERT(inp);
2372 /* If there are no log entries, there is nothing to do. */
2373 if (tp->t_lognum == 0)
2376 /* Check for a log ID. */
2377 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
2378 !tcp_log_auto_all && !force) {
2379 struct tcp_log_mem *log_entry;
2382 * We needed a log ID and none was found. Free the log entries
2383 * and return success. Also, cancel further logging. If the
2384 * session doesn't have a log ID by now, we'll assume it isn't
2387 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
2388 tcp_log_remove_log_head(tp, log_entry);
2389 KASSERT(tp->t_lognum == 0,
2390 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
2391 __func__, tp->t_lognum));
2392 tp->_t_logstate = TCP_LOG_STATE_OFF;
2397 * Allocate memory. If we must wait, we'll need to drop the locks
2398 * and reacquire them (and do all the related business that goes
2401 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2403 if (entry == NULL && (how & M_NOWAIT)) {
2404 #ifdef TCPLOG_DEBUG_COUNTERS
2405 counter_u64_add(tcp_log_que_fail3, 1);
2409 if (entry == NULL) {
2411 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2412 M_TCPLOGDEV, M_WAITOK);
2415 * Note that this check is slightly overly-restrictive in
2416 * that the TCB can survive either of these events.
2417 * However, there is currently not a good way to ensure
2418 * that is the case. So, if we hit this M_WAIT path, we
2419 * may end up dropping some entries. That seems like a
2420 * small price to pay for safety.
2422 if (inp->inp_flags & INP_DROPPED) {
2423 free(entry, M_TCPLOGDEV);
2424 #ifdef TCPLOG_DEBUG_COUNTERS
2425 counter_u64_add(tcp_log_que_fail2, 1);
2427 return (ECONNRESET);
2429 tp = intotcpcb(inp);
2430 if (tp->t_lognum == 0) {
2431 free(entry, M_TCPLOGDEV);
2436 /* Fill in the unique parts of the queue entry. */
2437 if (tp->t_lib != NULL) {
2438 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
2439 strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN);
2441 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN);
2442 strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN);
2445 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2447 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2448 entry->tldl_ie = inp->inp_inc.inc_ie;
2449 if (inp->inp_inc.inc_flags & INC_ISIPV6)
2450 entry->tldl_af = AF_INET6;
2452 entry->tldl_af = AF_INET;
2453 entry->tldl_entries = tp->t_logs;
2454 entry->tldl_count = tp->t_lognum;
2456 /* Fill in the common parts of the queue entry. */
2457 entry->tldl_common.tldq_buf = NULL;
2458 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2459 entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2461 /* Clear the log data from the TCPCB. */
2462 #ifdef TCPLOG_DEBUG_COUNTERS
2463 num_entries = tp->t_lognum;
2466 STAILQ_INIT(&tp->t_logs);
2468 /* Add the entry. If no one is listening, free the entry. */
2469 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) {
2470 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2471 #ifdef TCPLOG_DEBUG_COUNTERS
2472 counter_u64_add(tcp_log_que_fail1, num_entries);
2474 counter_u64_add(tcp_log_queued, num_entries);
2481 * Queue the log_id_node's log buffers for transmission via the log buffer
2484 * NOTE: This should be called with the bucket locked and referenced.
2486 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2487 * and reacquire the bucket lock if it needs to do so. (The caller must
2488 * ensure that the tln is no longer on any lists so no one else will mess
2489 * with this while the lock is dropped!)
2492 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how)
2494 struct tcp_log_dev_log_queue *entry;
2495 struct tcp_log_id_bucket *tlb;
2497 tlb = tln->tln_bucket;
2498 TCPID_BUCKET_LOCK_ASSERT(tlb);
2499 KASSERT(tlb->tlb_refcnt > 0,
2500 ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)",
2501 __func__, __LINE__, tln, tlb));
2502 KASSERT(tln->tln_closed,
2503 ("%s:%d: Called for node with tln_closed==false (tln=%p)",
2504 __func__, __LINE__, tln));
2506 /* If there are no log entries, there is nothing to do. */
2507 if (tln->tln_count == 0)
2511 * Allocate memory. If we must wait, we'll need to drop the locks
2512 * and reacquire them (and do all the related business that goes
2515 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2517 if (entry == NULL && (how & M_NOWAIT))
2519 if (entry == NULL) {
2520 TCPID_BUCKET_UNLOCK(tlb);
2521 entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2522 M_TCPLOGDEV, M_WAITOK);
2523 TCPID_BUCKET_LOCK(tlb);
2526 /* Fill in the common parts of the queue entry.. */
2527 entry->tldl_common.tldq_buf = NULL;
2528 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2529 entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2531 /* Fill in the unique parts of the queue entry. */
2532 strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN);
2533 strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN);
2535 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2537 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2538 entry->tldl_ie = tln->tln_ie;
2539 entry->tldl_entries = tln->tln_entries;
2540 entry->tldl_count = tln->tln_count;
2541 entry->tldl_af = tln->tln_af;
2543 /* Add the entry. If no one is listening, free the entry. */
2544 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry))
2545 tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2551 * Queue the log buffers for all sessions in a bucket for transmissions via
2552 * the log buffer facility.
2554 * NOTE: This should be called with a locked bucket; however, the function
2555 * will drop the lock.
2557 #define LOCAL_SAVE 10
2559 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
2561 struct tcp_log_id_node local_entries[LOCAL_SAVE];
2564 struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln;
2565 int i, num_local_entries, tree_locked;
2566 bool expireq_locked;
2568 TCPID_BUCKET_LOCK_ASSERT(tlb);
2571 * Take a reference on the bucket to keep it from disappearing until
2574 TCPID_BUCKET_REF(tlb);
2577 * We'll try to create these without dropping locks. However, we
2578 * might very well need to drop locks to get memory. If that's the
2579 * case, we'll save up to 10 on the stack, and sacrifice the rest.
2580 * (Otherwise, we need to worry about finding our place again in a
2581 * potentially changed list. It just doesn't seem worth the trouble
2584 expireq_locked = false;
2585 num_local_entries = 0;
2587 tree_locked = TREE_UNLOCKED;
2588 SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) {
2590 * If this isn't associated with a TCPCB, we can pull it off
2591 * the list now. We need to be careful that the expire timer
2592 * hasn't already taken ownership (tln_expiretime == SBT_MAX).
2593 * If so, we let the expire timer code free the data.
2595 if (cur_tln->tln_closed) {
2598 * Get the expireq lock so we can get a consistent
2599 * read of tln_expiretime and so we can remove this
2602 if (!expireq_locked) {
2603 TCPLOG_EXPIREQ_LOCK();
2604 expireq_locked = true;
2608 * We ignore entries with tln_expiretime == SBT_MAX.
2609 * The expire timer code already owns those.
2611 KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0,
2612 ("%s:%d: node on the expire queue without positive "
2613 "expire time", __func__, __LINE__));
2614 if (cur_tln->tln_expiretime == SBT_MAX) {
2619 /* Remove the entry from the expireq. */
2620 STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln,
2621 tcp_log_id_node, tln_expireq);
2623 /* Remove the entry from the bucket. */
2624 if (prev_tln != NULL)
2625 SLIST_REMOVE_AFTER(prev_tln, tln_list);
2627 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list);
2630 * Drop the INP and bucket reference counts. Due to
2631 * lock-ordering rules, we need to drop the expire
2634 TCPLOG_EXPIREQ_UNLOCK();
2635 expireq_locked = false;
2637 /* Drop the INP reference. */
2638 INP_WLOCK(cur_tln->tln_inp);
2639 if (!in_pcbrele_wlocked(cur_tln->tln_inp))
2640 INP_WUNLOCK(cur_tln->tln_inp);
2642 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2644 panic("%s: Bucket refcount unexpectedly 0.",
2648 * Recover as best we can: free the entry we
2651 tcp_log_free_entries(&cur_tln->tln_entries,
2652 &cur_tln->tln_count);
2653 uma_zfree(tcp_log_id_node_zone, cur_tln);
2657 if (tcp_log_dump_node_logbuf(cur_tln, reason,
2660 * If we have sapce, save the entries locally.
2661 * Otherwise, free them.
2663 if (num_local_entries < LOCAL_SAVE) {
2664 local_entries[num_local_entries] =
2666 num_local_entries++;
2668 tcp_log_free_entries(
2669 &cur_tln->tln_entries,
2670 &cur_tln->tln_count);
2674 /* No matter what, we are done with the node now. */
2675 uma_zfree(tcp_log_id_node_zone, cur_tln);
2678 * Because we removed this entry from the list, prev_tln
2679 * (which tracks the previous entry still on the tlb
2680 * list) remains unchanged.
2686 * If we get to this point, the session data is still held in
2687 * the TCPCB. So, we need to pull the data out of that.
2689 * We will need to drop the expireq lock so we can lock the INP.
2690 * We can then try to extract the data the "easy" way. If that
2691 * fails, we'll save the log entries for later.
2693 if (expireq_locked) {
2694 TCPLOG_EXPIREQ_UNLOCK();
2695 expireq_locked = false;
2698 /* Lock the INP and then re-check the state. */
2699 inp = cur_tln->tln_inp;
2702 * If we caught this while it was transitioning, the data
2703 * might have moved from the TCPCB to the tln (signified by
2704 * setting tln_closed to true. If so, treat this like an
2705 * inactive connection.
2707 if (cur_tln->tln_closed) {
2709 * It looks like we may have caught this connection
2710 * while it was transitioning from active to inactive.
2711 * Treat this like an inactive connection.
2718 * Try to dump the data from the tp without dropping the lock.
2719 * If this fails, try to save off the data locally.
2721 tp = cur_tln->tln_tp;
2722 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) &&
2723 num_local_entries < LOCAL_SAVE) {
2724 tcp_log_move_tp_to_node(tp,
2725 &local_entries[num_local_entries]);
2726 local_entries[num_local_entries].tln_closed = 1;
2727 KASSERT(local_entries[num_local_entries].tln_bucket ==
2728 tlb, ("%s: %d: bucket mismatch for node %p",
2729 __func__, __LINE__, cur_tln));
2730 num_local_entries++;
2736 * We are goint to leave the current tln on the list. It will
2737 * become the previous tln.
2742 /* Drop our locks, if any. */
2743 KASSERT(tree_locked == TREE_UNLOCKED,
2744 ("%s: %d: tree unexpectedly locked", __func__, __LINE__));
2745 switch (tree_locked) {
2747 TCPID_TREE_WUNLOCK();
2748 tree_locked = TREE_UNLOCKED;
2751 TCPID_TREE_RUNLOCK();
2752 tree_locked = TREE_UNLOCKED;
2755 if (expireq_locked) {
2756 TCPLOG_EXPIREQ_UNLOCK();
2757 expireq_locked = false;
2761 * Try again for any saved entries. tcp_log_dump_node_logbuf() is
2762 * guaranteed to free the log entries within the node. And, since
2763 * the node itself is on our stack, we don't need to free it.
2765 for (i = 0; i < num_local_entries; i++)
2766 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK);
2768 /* Drop our reference. */
2769 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
2770 TCPID_BUCKET_UNLOCK(tlb);
2773 /* Drop our locks, if any. */
2774 switch (tree_locked) {
2776 TCPID_TREE_WUNLOCK();
2779 TCPID_TREE_RUNLOCK();
2783 TCPLOG_EXPIREQ_UNLOCK();
2788 * Queue the log buffers for all sessions in a bucket for transmissions via
2789 * the log buffer facility.
2791 * NOTE: This should be called with a locked INP; however, the function
2792 * will drop the lock.
2795 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
2797 struct inpcb *inp = tptoinpcb(tp);
2798 struct tcp_log_id_bucket *tlb;
2801 /* Figure out our bucket and lock it. */
2802 INP_WLOCK_ASSERT(inp);
2806 * No bucket; treat this like a request to dump a single
2809 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
2813 TCPID_BUCKET_REF(tlb);
2815 TCPID_BUCKET_LOCK(tlb);
2817 /* If we are the last reference, we have nothing more to do here. */
2818 tree_locked = TREE_UNLOCKED;
2819 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2820 switch (tree_locked) {
2822 TCPID_TREE_WUNLOCK();
2825 TCPID_TREE_RUNLOCK();
2831 /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
2832 tcp_log_dumpbucketlogs(tlb, reason);
2836 * Mark the end of a flow with the current stack. A stack can add
2837 * stack-specific info to this trace event by overriding this
2838 * function (see bbr_log_flowend() for example).
2841 tcp_log_flowend(struct tcpcb *tp)
2843 if (tp->_t_logstate != TCP_LOG_STATE_OFF) {
2844 struct socket *so = tptosocket(tp);
2845 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
2846 TCP_LOG_FLOWEND, 0, 0, NULL, false);
2851 tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags)
2855 #ifdef TCP_REQUEST_TRK
2856 struct tcp_sendfile_track *ent;
2860 inp = sotoinpcb(so);
2861 KASSERT(inp != NULL, ("tcp_log_sendfile: inp == NULL"));
2863 /* quick check to see if logging is enabled for this connection */
2864 tp = intotcpcb(inp);
2865 if ((inp->inp_flags & INP_DROPPED) ||
2866 (tp->_t_logstate == TCP_LOG_STATE_OFF)) {
2871 /* double check log state now that we have the lock */
2872 if (inp->inp_flags & INP_DROPPED)
2874 if (tp->_t_logstate != TCP_LOG_STATE_OFF) {
2876 tcp_log_eventspecific_t log;
2879 log.u_sf.offset = offset;
2880 log.u_sf.length = nbytes;
2881 log.u_sf.flags = flags;
2883 TCP_LOG_EVENTP(tp, NULL,
2884 &tptosocket(tp)->so_rcv,
2885 &tptosocket(tp)->so_snd,
2886 TCP_LOG_SENDFILE, 0, 0, &log, false, &tv);
2888 #ifdef TCP_REQUEST_TRK
2889 if (tp->t_tcpreq_req == 0) {
2890 /* No http requests to track */
2894 if (tp->t_tcpreq_closed == 0) {
2895 /* No closed end req to track */
2896 goto skip_closed_req;
2898 for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
2899 /* Lets see if this one can be found */
2900 ent = &tp->t_tcpreq_info[i];
2901 if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
2905 if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) {
2906 /* This pass does not consider open requests */
2909 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) {
2910 /* Don't look at what we have completed */
2913 /* If we reach here its a allocated closed end request */
2914 if ((ent->start == offset) ||
2915 ((offset > ent->start) && (offset < ent->end))){
2916 /* Its within this request?? */
2921 * It is at or past the end, its complete.
2923 ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
2925 * When an entry completes we can take (snd_una + sb_cc) and know where
2926 * the end of the range really is. Note that this works since two
2927 * requests must be sequential and sendfile now is complete for *this* request.
2928 * we must use sb_ccc since the data may still be in-flight in TLS.
2930 * We always cautiously move the end_seq only if our calculations
2931 * show it happened (just in case sf has the call to here at the wrong
2932 * place). When we go COMP we will stop coming here and hopefully be
2933 * left with the correct end_seq.
2935 if (SEQ_GT((tp->snd_una + so->so_snd.sb_ccc), ent->end_seq))
2936 ent->end_seq = tp->snd_una + so->so_snd.sb_ccc;
2937 if ((offset + nbytes) >= ent->end) {
2938 ent->flags |= TCP_TRK_TRACK_FLG_COMP;
2939 tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_COMPLETE, offset, nbytes);
2941 tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_MOREYET, offset, nbytes);
2943 /* We assume that sendfile never sends overlapping requests */
2949 /* Ok now lets look for open requests */
2950 for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
2951 ent = &tp->t_tcpreq_info[i];
2952 if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
2956 if ((ent->flags & TCP_TRK_TRACK_FLG_OPEN) == 0)
2958 /* If we reach here its an allocated open request */
2959 if (ent->start == offset) {
2960 /* It begins this request */
2961 ent->start_seq = tp->snd_una +
2962 tptosocket(tp)->so_snd.sb_ccc;
2963 ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
2965 } else if (offset > ent->start) {
2966 ent->flags |= TCP_TRK_TRACK_FLG_SEQV;