3 * Jonathan Looney. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/queue.h>
28 #include <sys/param.h>
29 #include <sys/types.h>
30 #include <sys/socket.h>
31 #include <sys/socketvar.h>
32 #include <sys/sysctl.h>
33 #include <sys/systm.h>
35 #include <sys/eventhandler.h>
36 #include <machine/atomic.h>
37 #include <netinet/tcp_var.h>
38 #include <netinet/tcp_pcap.h>
40 #define M_LEADINGSPACE_NOWRITE(m) \
41 ((m)->m_data - M_START(m))
43 int tcp_pcap_aggressive_free = 1;
44 static int tcp_pcap_clusters_referenced_cur = 0;
45 static int tcp_pcap_clusters_referenced_max = 0;
47 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_aggressive_free,
48 CTLFLAG_RW, &tcp_pcap_aggressive_free, 0,
49 "Free saved packets when the memory system comes under pressure");
50 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_clusters_referenced_cur,
51 CTLFLAG_RD, &tcp_pcap_clusters_referenced_cur, 0,
52 "Number of clusters currently referenced on TCP PCAP queues");
53 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_clusters_referenced_max,
54 CTLFLAG_RW, &tcp_pcap_clusters_referenced_max, 0,
55 "Maximum number of clusters allowed to be referenced on TCP PCAP "
58 static int tcp_pcap_alloc_reuse_ext = 0;
59 static int tcp_pcap_alloc_reuse_mbuf = 0;
60 static int tcp_pcap_alloc_new_mbuf = 0;
61 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_reuse_ext,
62 CTLFLAG_RD, &tcp_pcap_alloc_reuse_ext, 0,
63 "Number of mbufs with external storage reused for the TCP PCAP "
65 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_reuse_mbuf,
66 CTLFLAG_RD, &tcp_pcap_alloc_reuse_mbuf, 0,
67 "Number of mbufs with internal storage reused for the TCP PCAP "
69 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_new_mbuf,
70 CTLFLAG_RD, &tcp_pcap_alloc_new_mbuf, 0,
71 "Number of new mbufs allocated for the TCP PCAP functionality");
73 VNET_DEFINE(int, tcp_pcap_packets) = 0;
74 #define V_tcp_pcap_packets VNET(tcp_pcap_packets)
75 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_packets,
76 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_pcap_packets), 0,
77 "Default number of packets saved per direction per TCPCB");
79 /* Initialize the values. */
81 tcp_pcap_max_set(void)
84 tcp_pcap_clusters_referenced_max = nmbclusters / 4;
92 EVENTHANDLER_REGISTER(nmbclusters_change, tcp_pcap_max_set,
93 NULL, EVENTHANDLER_PRI_ANY);
97 * If we are below the maximum allowed cluster references,
98 * increment the reference count and return TRUE. Otherwise,
99 * leave the reference count alone and return FALSE.
102 tcp_pcap_take_cluster_reference(void)
104 if (atomic_fetchadd_int(&tcp_pcap_clusters_referenced_cur, 1) >=
105 tcp_pcap_clusters_referenced_max) {
106 atomic_add_int(&tcp_pcap_clusters_referenced_cur, -1);
113 * For all the external entries in m, apply the given adjustment.
114 * This can be used to adjust the counter when an mbuf chain is
118 tcp_pcap_adj_cluster_reference(struct mbuf *m, int adj)
121 if (m->m_flags & M_EXT)
122 atomic_add_int(&tcp_pcap_clusters_referenced_cur, adj);
129 * Free all mbufs in a chain, decrementing the reference count as
132 * Functions in this file should use this instead of m_freem() when
133 * they are freeing mbuf chains that may contain clusters that were
134 * already included in tcp_pcap_clusters_referenced_cur.
137 tcp_pcap_m_freem(struct mbuf *mb)
140 if (mb->m_flags & M_EXT)
141 atomic_subtract_int(&tcp_pcap_clusters_referenced_cur,
148 * Copy data from m to n, where n cannot fit all the data we might
151 * Prioritize data like this:
157 tcp_pcap_copy_bestfit(struct tcphdr *th, struct mbuf *m, struct mbuf *n)
159 struct mbuf *m_cur = m;
160 int bytes_to_copy=0, trailing_data, skip=0, tcp_off;
162 /* Below, we assume these will be non-NULL. */
163 KASSERT(th, ("%s: called with th == NULL", __func__));
164 KASSERT(m, ("%s: called with m == NULL", __func__));
165 KASSERT(n, ("%s: called with n == NULL", __func__));
167 /* We assume this initialization occurred elsewhere. */
168 KASSERT(n->m_len == 0, ("%s: called with n->m_len=%d (expected 0)",
169 __func__, n->m_len));
170 KASSERT(n->m_data == M_START(n),
171 ("%s: called with n->m_data != M_START(n)", __func__));
174 * Calculate the size of the TCP header. We use this often
175 * enough that it is worth just calculating at the start.
177 tcp_off = th->th_off << 2;
179 /* Trim off leading empty mbufs. */
180 while (m && m->m_len == 0)
188 * No data? Highly unusual. We would expect to at
189 * least see a TCP header in the mbuf.
190 * As we have a pointer to the TCP header, I guess
191 * we should just copy that. (???)
194 bytes_to_copy = tcp_off;
195 if (bytes_to_copy > M_SIZE(n))
196 bytes_to_copy = M_SIZE(n);
197 bcopy(th, n->m_data, bytes_to_copy);
198 n->m_len = bytes_to_copy;
203 * Find TCP header. Record the total number of bytes up to,
204 * and including, the TCP header.
207 if ((caddr_t) th >= (caddr_t) m_cur->m_data &&
208 (caddr_t) th < (caddr_t) (m_cur->m_data + m_cur->m_len))
210 bytes_to_copy += m_cur->m_len;
211 m_cur = m_cur->m_next;
214 bytes_to_copy += (caddr_t) th - (caddr_t) m_cur->m_data;
217 bytes_to_copy += tcp_off;
220 * If we already want to copy more bytes than we can hold
221 * in the destination mbuf, skip leading bytes and copy
224 * Otherwise, consider trailing data.
226 if (bytes_to_copy > M_SIZE(n)) {
227 skip = bytes_to_copy - M_SIZE(n);
228 bytes_to_copy = M_SIZE(n);
232 * Determine how much trailing data is in the chain.
233 * We start with the length of this mbuf (the one
234 * containing th) and subtract the size of the TCP
235 * header (tcp_off) and the size of the data prior
236 * to th (th - m_cur->m_data).
238 * This *should not* be negative, as the TCP code
239 * should put the whole TCP header in a single
240 * mbuf. But, it isn't a problem if it is. We will
241 * simple work off our negative balance as we look
242 * at subsequent mbufs.
244 trailing_data = m_cur->m_len - tcp_off;
245 trailing_data -= (caddr_t) th - (caddr_t) m_cur->m_data;
246 m_cur = m_cur->m_next;
248 trailing_data += m_cur->m_len;
249 m_cur = m_cur->m_next;
251 if ((bytes_to_copy + trailing_data) > M_SIZE(n))
252 bytes_to_copy = M_SIZE(n);
254 bytes_to_copy += trailing_data;
257 m_copydata(m, skip, bytes_to_copy, n->m_data);
258 n->m_len = bytes_to_copy;
262 tcp_pcap_add(struct tcphdr *th, struct mbuf *m, struct mbufq *queue)
264 struct mbuf *n = NULL, *mhead;
266 KASSERT(th, ("%s: called with th == NULL", __func__));
267 KASSERT(m, ("%s: called with m == NULL", __func__));
268 KASSERT(queue, ("%s: called with queue == NULL", __func__));
270 /* We only care about data packets. */
271 while (m && m->m_type != MT_DATA)
274 /* We only need to do something if we still have an mbuf. */
278 /* If we are not saving mbufs, return now. */
279 if (queue->mq_maxlen == 0)
283 * Check to see if we will need to recycle mbufs.
285 * If we need to get rid of mbufs to stay below
286 * our packet count, try to reuse the mbuf. Once
287 * we already have a new mbuf (n), then we can
288 * simply free subsequent mbufs.
290 * Note that most of the logic in here is to deal
291 * with the reuse. If we are fine with constant
292 * mbuf allocs/deallocs, we could ditch this logic.
293 * But, it only seems to make sense to reuse
294 * mbufs we already have.
296 while (mbufq_full(queue)) {
297 mhead = mbufq_dequeue(queue);
300 tcp_pcap_m_freem(mhead);
304 * If this held an external cluster, try to
305 * detach the cluster. But, if we held the
306 * last reference, go through the normal
309 if (mhead->m_flags & M_EXTPG) {
310 /* Don't mess around with these. */
311 tcp_pcap_m_freem(mhead);
313 } else if (mhead->m_flags & M_EXT) {
314 switch (mhead->m_ext.ext_type) {
316 /* Don't mess around with these. */
317 tcp_pcap_m_freem(mhead);
320 if (atomic_fetchadd_int(
321 mhead->m_ext.ext_cnt, -1) == 1)
324 * We held the last reference
325 * on this cluster. Restore
326 * the reference count and put
327 * it back in the pool.
329 *(mhead->m_ext.ext_cnt) = 1;
330 tcp_pcap_m_freem(mhead);
334 * We were able to cleanly free the
338 &tcp_pcap_clusters_referenced_cur,
340 tcp_pcap_alloc_reuse_ext++;
344 tcp_pcap_alloc_reuse_mbuf++;
348 tcp_pcap_m_freem(n->m_next);
349 m_init(n, M_NOWAIT, MT_DATA, 0);
353 /* Check to see if we need to get a new mbuf. */
355 if (!(n = m_get(M_NOWAIT, MT_DATA)))
357 tcp_pcap_alloc_new_mbuf++;
361 * What are we dealing with? If a cluster, attach it. Otherwise,
362 * try to copy the data from the beginning of the mbuf to the
363 * end of data. (There may be data between the start of the data
364 * area and the current data pointer. We want to get this, because
365 * it may contain header information that is useful.)
366 * In cases where that isn't possible, settle for what we can
369 if ((m->m_flags & (M_EXT|M_EXTPG)) &&
370 tcp_pcap_take_cluster_reference()) {
371 n->m_data = m->m_data;
375 else if (((m->m_data + m->m_len) - M_START(m)) <= M_SIZE(n)) {
377 * At this point, n is guaranteed to be a normal mbuf
378 * with no cluster and no packet header. Because the
379 * logic in this code block requires this, the assert
380 * is here to catch any instances where someone
381 * changes the logic to invalidate that assumption.
383 KASSERT((n->m_flags & (M_EXT | M_PKTHDR)) == 0,
384 ("%s: Unexpected flags (%#x) for mbuf",
385 __func__, n->m_flags));
386 n->m_data = n->m_dat + M_LEADINGSPACE_NOWRITE(m);
388 if (m->m_flags & M_EXTPG)
389 m_copydata(m, 0, m->m_len, n->m_data);
391 bcopy(M_START(m), n->m_dat,
392 m->m_len + M_LEADINGSPACE_NOWRITE(m));
396 * This is the case where we need to "settle for what
397 * we can get". The most probable way to this code
398 * path is that we've already taken references to the
399 * maximum number of mbuf clusters we can, and the data
400 * is too long to fit in an mbuf's internal storage.
401 * Try for a "best fit".
403 tcp_pcap_copy_bestfit(th, m, n);
405 /* Don't try to get additional data. */
410 n->m_next = m_copym(m->m_next, 0, M_COPYALL, M_NOWAIT);
411 tcp_pcap_adj_cluster_reference(n->m_next, 1);
415 /* Add the new mbuf to the list. */
416 if (mbufq_enqueue(queue, n)) {
417 /* This shouldn't happen. If INVARIANTS is defined, panic. */
418 KASSERT(0, ("%s: mbufq was unexpectedly full!", __func__));
424 tcp_pcap_drain(struct mbufq *queue)
427 while ((m = mbufq_dequeue(queue)))
432 tcp_pcap_tcpcb_init(struct tcpcb *tp)
434 mbufq_init(&(tp->t_inpkts), V_tcp_pcap_packets);
435 mbufq_init(&(tp->t_outpkts), V_tcp_pcap_packets);
439 tcp_pcap_set_sock_max(struct mbufq *queue, int newval)
441 queue->mq_maxlen = newval;
442 while (queue->mq_len > queue->mq_maxlen)
443 tcp_pcap_m_freem(mbufq_dequeue(queue));
447 tcp_pcap_get_sock_max(struct mbufq *queue)
449 return queue->mq_maxlen;