3 * Jonathan Looney. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/queue.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <sys/socketvar.h>
34 #include <sys/sysctl.h>
35 #include <sys/systm.h>
37 #include <sys/eventhandler.h>
38 #include <machine/atomic.h>
39 #include <netinet/in.h>
40 #include <netinet/in_pcb.h>
41 #include <netinet/tcp_var.h>
42 #include <netinet/tcp_pcap.h>
44 #define M_LEADINGSPACE_NOWRITE(m) \
45 ((m)->m_data - M_START(m))
47 int tcp_pcap_aggressive_free = 1;
48 static int tcp_pcap_clusters_referenced_cur = 0;
49 static int tcp_pcap_clusters_referenced_max = 0;
51 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_aggressive_free,
52 CTLFLAG_RW, &tcp_pcap_aggressive_free, 0,
53 "Free saved packets when the memory system comes under pressure");
54 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_clusters_referenced_cur,
55 CTLFLAG_RD, &tcp_pcap_clusters_referenced_cur, 0,
56 "Number of clusters currently referenced on TCP PCAP queues");
57 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_clusters_referenced_max,
58 CTLFLAG_RW, &tcp_pcap_clusters_referenced_max, 0,
59 "Maximum number of clusters allowed to be referenced on TCP PCAP "
62 static int tcp_pcap_alloc_reuse_ext = 0;
63 static int tcp_pcap_alloc_reuse_mbuf = 0;
64 static int tcp_pcap_alloc_new_mbuf = 0;
65 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_reuse_ext,
66 CTLFLAG_RD, &tcp_pcap_alloc_reuse_ext, 0,
67 "Number of mbufs with external storage reused for the TCP PCAP "
69 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_reuse_mbuf,
70 CTLFLAG_RD, &tcp_pcap_alloc_reuse_mbuf, 0,
71 "Number of mbufs with internal storage reused for the TCP PCAP "
73 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_new_mbuf,
74 CTLFLAG_RD, &tcp_pcap_alloc_new_mbuf, 0,
75 "Number of new mbufs allocated for the TCP PCAP functionality");
77 VNET_DEFINE(int, tcp_pcap_packets) = 0;
78 #define V_tcp_pcap_packets VNET(tcp_pcap_packets)
79 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_packets,
80 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_pcap_packets), 0,
81 "Default number of packets saved per direction per TCPCB");
83 /* Initialize the values. */
85 tcp_pcap_max_set(void)
88 tcp_pcap_clusters_referenced_max = nmbclusters / 4;
96 EVENTHANDLER_REGISTER(nmbclusters_change, tcp_pcap_max_set,
97 NULL, EVENTHANDLER_PRI_ANY);
101 * If we are below the maximum allowed cluster references,
102 * increment the reference count and return TRUE. Otherwise,
103 * leave the reference count alone and return FALSE.
106 tcp_pcap_take_cluster_reference(void)
108 if (atomic_fetchadd_int(&tcp_pcap_clusters_referenced_cur, 1) >=
109 tcp_pcap_clusters_referenced_max) {
110 atomic_add_int(&tcp_pcap_clusters_referenced_cur, -1);
117 * For all the external entries in m, apply the given adjustment.
118 * This can be used to adjust the counter when an mbuf chain is
122 tcp_pcap_adj_cluster_reference(struct mbuf *m, int adj)
125 if (m->m_flags & M_EXT)
126 atomic_add_int(&tcp_pcap_clusters_referenced_cur, adj);
133 * Free all mbufs in a chain, decrementing the reference count as
136 * Functions in this file should use this instead of m_freem() when
137 * they are freeing mbuf chains that may contain clusters that were
138 * already included in tcp_pcap_clusters_referenced_cur.
141 tcp_pcap_m_freem(struct mbuf *mb)
144 if (mb->m_flags & M_EXT)
145 atomic_subtract_int(&tcp_pcap_clusters_referenced_cur,
152 * Copy data from m to n, where n cannot fit all the data we might
155 * Prioritize data like this:
161 tcp_pcap_copy_bestfit(struct tcphdr *th, struct mbuf *m, struct mbuf *n)
163 struct mbuf *m_cur = m;
164 int bytes_to_copy=0, trailing_data, skip=0, tcp_off;
166 /* Below, we assume these will be non-NULL. */
167 KASSERT(th, ("%s: called with th == NULL", __func__));
168 KASSERT(m, ("%s: called with m == NULL", __func__));
169 KASSERT(n, ("%s: called with n == NULL", __func__));
171 /* We assume this initialization occurred elsewhere. */
172 KASSERT(n->m_len == 0, ("%s: called with n->m_len=%d (expected 0)",
173 __func__, n->m_len));
174 KASSERT(n->m_data == M_START(n),
175 ("%s: called with n->m_data != M_START(n)", __func__));
178 * Calculate the size of the TCP header. We use this often
179 * enough that it is worth just calculating at the start.
181 tcp_off = th->th_off << 2;
183 /* Trim off leading empty mbufs. */
184 while (m && m->m_len == 0)
192 * No data? Highly unusual. We would expect to at
193 * least see a TCP header in the mbuf.
194 * As we have a pointer to the TCP header, I guess
195 * we should just copy that. (???)
198 bytes_to_copy = tcp_off;
199 if (bytes_to_copy > M_SIZE(n))
200 bytes_to_copy = M_SIZE(n);
201 bcopy(th, n->m_data, bytes_to_copy);
202 n->m_len = bytes_to_copy;
207 * Find TCP header. Record the total number of bytes up to,
208 * and including, the TCP header.
211 if ((caddr_t) th >= (caddr_t) m_cur->m_data &&
212 (caddr_t) th < (caddr_t) (m_cur->m_data + m_cur->m_len))
214 bytes_to_copy += m_cur->m_len;
215 m_cur = m_cur->m_next;
218 bytes_to_copy += (caddr_t) th - (caddr_t) m_cur->m_data;
221 bytes_to_copy += tcp_off;
224 * If we already want to copy more bytes than we can hold
225 * in the destination mbuf, skip leading bytes and copy
228 * Otherwise, consider trailing data.
230 if (bytes_to_copy > M_SIZE(n)) {
231 skip = bytes_to_copy - M_SIZE(n);
232 bytes_to_copy = M_SIZE(n);
236 * Determine how much trailing data is in the chain.
237 * We start with the length of this mbuf (the one
238 * containing th) and subtract the size of the TCP
239 * header (tcp_off) and the size of the data prior
240 * to th (th - m_cur->m_data).
242 * This *should not* be negative, as the TCP code
243 * should put the whole TCP header in a single
244 * mbuf. But, it isn't a problem if it is. We will
245 * simple work off our negative balance as we look
246 * at subsequent mbufs.
248 trailing_data = m_cur->m_len - tcp_off;
249 trailing_data -= (caddr_t) th - (caddr_t) m_cur->m_data;
250 m_cur = m_cur->m_next;
252 trailing_data += m_cur->m_len;
253 m_cur = m_cur->m_next;
255 if ((bytes_to_copy + trailing_data) > M_SIZE(n))
256 bytes_to_copy = M_SIZE(n);
258 bytes_to_copy += trailing_data;
261 m_copydata(m, skip, bytes_to_copy, n->m_data);
262 n->m_len = bytes_to_copy;
266 tcp_pcap_add(struct tcphdr *th, struct mbuf *m, struct mbufq *queue)
268 struct mbuf *n = NULL, *mhead;
270 KASSERT(th, ("%s: called with th == NULL", __func__));
271 KASSERT(m, ("%s: called with m == NULL", __func__));
272 KASSERT(queue, ("%s: called with queue == NULL", __func__));
274 /* We only care about data packets. */
275 while (m && m->m_type != MT_DATA)
278 /* We only need to do something if we still have an mbuf. */
282 /* If we are not saving mbufs, return now. */
283 if (queue->mq_maxlen == 0)
287 * Check to see if we will need to recycle mbufs.
289 * If we need to get rid of mbufs to stay below
290 * our packet count, try to reuse the mbuf. Once
291 * we already have a new mbuf (n), then we can
292 * simply free subsequent mbufs.
294 * Note that most of the logic in here is to deal
295 * with the reuse. If we are fine with constant
296 * mbuf allocs/deallocs, we could ditch this logic.
297 * But, it only seems to make sense to reuse
298 * mbufs we already have.
300 while (mbufq_full(queue)) {
301 mhead = mbufq_dequeue(queue);
304 tcp_pcap_m_freem(mhead);
308 * If this held an external cluster, try to
309 * detach the cluster. But, if we held the
310 * last reference, go through the normal
313 if (mhead->m_flags & M_EXTPG) {
314 /* Don't mess around with these. */
315 tcp_pcap_m_freem(mhead);
317 } else if (mhead->m_flags & M_EXT) {
318 switch (mhead->m_ext.ext_type) {
320 /* Don't mess around with these. */
321 tcp_pcap_m_freem(mhead);
324 if (atomic_fetchadd_int(
325 mhead->m_ext.ext_cnt, -1) == 1)
328 * We held the last reference
329 * on this cluster. Restore
330 * the reference count and put
331 * it back in the pool.
333 *(mhead->m_ext.ext_cnt) = 1;
334 tcp_pcap_m_freem(mhead);
338 * We were able to cleanly free the
342 &tcp_pcap_clusters_referenced_cur,
344 tcp_pcap_alloc_reuse_ext++;
348 tcp_pcap_alloc_reuse_mbuf++;
352 tcp_pcap_m_freem(n->m_next);
353 m_init(n, M_NOWAIT, MT_DATA, 0);
357 /* Check to see if we need to get a new mbuf. */
359 if (!(n = m_get(M_NOWAIT, MT_DATA)))
361 tcp_pcap_alloc_new_mbuf++;
365 * What are we dealing with? If a cluster, attach it. Otherwise,
366 * try to copy the data from the beginning of the mbuf to the
367 * end of data. (There may be data between the start of the data
368 * area and the current data pointer. We want to get this, because
369 * it may contain header information that is useful.)
370 * In cases where that isn't possible, settle for what we can
373 if ((m->m_flags & (M_EXT|M_EXTPG)) &&
374 tcp_pcap_take_cluster_reference()) {
375 n->m_data = m->m_data;
379 else if (((m->m_data + m->m_len) - M_START(m)) <= M_SIZE(n)) {
381 * At this point, n is guaranteed to be a normal mbuf
382 * with no cluster and no packet header. Because the
383 * logic in this code block requires this, the assert
384 * is here to catch any instances where someone
385 * changes the logic to invalidate that assumption.
387 KASSERT((n->m_flags & (M_EXT | M_PKTHDR)) == 0,
388 ("%s: Unexpected flags (%#x) for mbuf",
389 __func__, n->m_flags));
390 n->m_data = n->m_dat + M_LEADINGSPACE_NOWRITE(m);
392 if (m->m_flags & M_EXTPG)
393 m_copydata(m, 0, m->m_len, n->m_data);
395 bcopy(M_START(m), n->m_dat,
396 m->m_len + M_LEADINGSPACE_NOWRITE(m));
400 * This is the case where we need to "settle for what
401 * we can get". The most probable way to this code
402 * path is that we've already taken references to the
403 * maximum number of mbuf clusters we can, and the data
404 * is too long to fit in an mbuf's internal storage.
405 * Try for a "best fit".
407 tcp_pcap_copy_bestfit(th, m, n);
409 /* Don't try to get additional data. */
414 n->m_next = m_copym(m->m_next, 0, M_COPYALL, M_NOWAIT);
415 tcp_pcap_adj_cluster_reference(n->m_next, 1);
419 /* Add the new mbuf to the list. */
420 if (mbufq_enqueue(queue, n)) {
421 /* This shouldn't happen. If INVARIANTS is defined, panic. */
422 KASSERT(0, ("%s: mbufq was unexpectedly full!", __func__));
428 tcp_pcap_drain(struct mbufq *queue)
431 while ((m = mbufq_dequeue(queue)))
436 tcp_pcap_tcpcb_init(struct tcpcb *tp)
438 mbufq_init(&(tp->t_inpkts), V_tcp_pcap_packets);
439 mbufq_init(&(tp->t_outpkts), V_tcp_pcap_packets);
443 tcp_pcap_set_sock_max(struct mbufq *queue, int newval)
445 queue->mq_maxlen = newval;
446 while (queue->mq_len > queue->mq_maxlen)
447 tcp_pcap_m_freem(mbufq_dequeue(queue));
451 tcp_pcap_get_sock_max(struct mbufq *queue)
453 return queue->mq_maxlen;