2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 2019 Andrey V. Elsukov <ae@FreeBSD.org>
8 * This code is derived from the Stanford/CMU enet packet filter,
9 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
10 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
11 * Berkeley Laboratory.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
45 #include "opt_netgraph.h"
47 #include <sys/param.h>
49 #include <sys/eventhandler.h>
50 #include <sys/fcntl.h>
54 #include <sys/malloc.h>
56 #include <sys/mutex.h>
60 #include <sys/signalvar.h>
61 #include <sys/filio.h>
62 #include <sys/sockio.h>
63 #include <sys/ttycom.h>
65 #include <sys/sysent.h>
66 #include <sys/systm.h>
68 #include <sys/event.h>
73 #include <sys/socket.h>
80 #include <net/if_var.h>
81 #include <net/if_dl.h>
83 #include <net/bpf_buffer.h>
85 #include <net/bpf_jitter.h>
87 #include <net/bpf_zerocopy.h>
88 #include <net/bpfdesc.h>
89 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/if_ether.h>
94 #include <sys/kernel.h>
95 #include <sys/sysctl.h>
97 #include <net80211/ieee80211_freebsd.h>
99 #include <security/mac/mac_framework.h>
101 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
103 static struct bpf_if_ext dead_bpf_if = {
104 .bif_dlist = CK_LIST_HEAD_INITIALIZER()
108 #define bif_next bif_ext.bif_next
109 #define bif_dlist bif_ext.bif_dlist
110 struct bpf_if_ext bif_ext; /* public members */
111 u_int bif_dlt; /* link layer type */
112 u_int bif_hdrlen; /* length of link header */
113 struct bpfd_list bif_wlist; /* writer-only list */
114 struct ifnet *bif_ifp; /* corresponding interface */
115 struct bpf_if **bif_bpf; /* Pointer to pointer to us */
116 volatile u_int bif_refcnt;
117 struct epoch_context epoch_ctx;
120 CTASSERT(offsetof(struct bpf_if, bif_ext) == 0);
122 struct bpf_program_buffer {
123 struct epoch_context epoch_ctx;
125 bpf_jit_filter *func;
130 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
132 #define PRINET 26 /* interruptible */
134 #define SIZEOF_BPF_HDR(type) \
135 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
137 #ifdef COMPAT_FREEBSD32
138 #include <sys/mount.h>
139 #include <compat/freebsd32/freebsd32.h>
140 #define BPF_ALIGNMENT32 sizeof(int32_t)
141 #define BPF_WORDALIGN32(x) roundup2(x, BPF_ALIGNMENT32)
145 * 32-bit version of structure prepended to each packet. We use this header
146 * instead of the standard one for 32-bit streams. We mark the a stream as
147 * 32-bit the first time we see a 32-bit compat ioctl request.
150 struct timeval32 bh_tstamp; /* time stamp */
151 uint32_t bh_caplen; /* length of captured portion */
152 uint32_t bh_datalen; /* original length of packet */
153 uint16_t bh_hdrlen; /* length of bpf header (this struct
154 plus alignment padding) */
158 struct bpf_program32 {
163 struct bpf_dltlist32 {
168 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32)
169 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32)
170 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32)
171 #define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32)
172 #define BIOCSETWF32 _IOW('B', 123, struct bpf_program32)
173 #define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32)
176 #define BPF_LOCK() sx_xlock(&bpf_sx)
177 #define BPF_UNLOCK() sx_xunlock(&bpf_sx)
178 #define BPF_LOCK_ASSERT() sx_assert(&bpf_sx, SA_XLOCKED)
180 * bpf_iflist is a list of BPF interface structures, each corresponding to a
181 * specific DLT. The same network interface might have several BPF interface
182 * structures registered by different layers in the stack (i.e., 802.11
183 * frames, ethernet frames, etc).
185 CK_LIST_HEAD(bpf_iflist, bpf_if);
186 static struct bpf_iflist bpf_iflist;
187 static struct sx bpf_sx; /* bpf global lock */
188 static int bpf_bpfd_cnt;
190 static void bpfif_ref(struct bpf_if *);
191 static void bpfif_rele(struct bpf_if *);
193 static void bpfd_ref(struct bpf_d *);
194 static void bpfd_rele(struct bpf_d *);
195 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
196 static void bpf_detachd(struct bpf_d *);
197 static void bpf_detachd_locked(struct bpf_d *, bool);
198 static void bpfd_free(epoch_context_t);
199 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
200 struct sockaddr *, int *, struct bpf_d *);
201 static int bpf_setif(struct bpf_d *, struct ifreq *);
202 static void bpf_timed_out(void *);
204 bpf_wakeup(struct bpf_d *);
205 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
206 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
208 static void reset_d(struct bpf_d *);
209 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
210 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
211 static int bpf_setdlt(struct bpf_d *, u_int);
212 static void filt_bpfdetach(struct knote *);
213 static int filt_bpfread(struct knote *, long);
214 static void bpf_drvinit(void *);
215 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
217 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
218 int bpf_maxinsns = BPF_MAXINSNS;
219 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
220 &bpf_maxinsns, 0, "Maximum bpf program instructions");
221 static int bpf_zerocopy_enable = 0;
222 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
223 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
224 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
225 bpf_stats_sysctl, "bpf statistics portal");
227 VNET_DEFINE_STATIC(int, bpf_optimize_writers) = 0;
228 #define V_bpf_optimize_writers VNET(bpf_optimize_writers)
229 SYSCTL_INT(_net_bpf, OID_AUTO, optimize_writers, CTLFLAG_VNET | CTLFLAG_RW,
230 &VNET_NAME(bpf_optimize_writers), 0,
231 "Do not send packets until BPF program is set");
233 static d_open_t bpfopen;
234 static d_read_t bpfread;
235 static d_write_t bpfwrite;
236 static d_ioctl_t bpfioctl;
237 static d_poll_t bpfpoll;
238 static d_kqfilter_t bpfkqfilter;
240 static struct cdevsw bpf_cdevsw = {
241 .d_version = D_VERSION,
248 .d_kqfilter = bpfkqfilter,
251 static struct filterops bpfread_filtops = {
253 .f_detach = filt_bpfdetach,
254 .f_event = filt_bpfread,
258 * LOCKING MODEL USED BY BPF
261 * 1) global lock (BPF_LOCK). Sx, used to protect some global counters,
262 * every bpf_iflist changes, serializes ioctl access to bpf descriptors.
263 * 2) Descriptor lock. Mutex, used to protect BPF buffers and various
264 * structure fields used by bpf_*tap* code.
266 * Lock order: global lock, then descriptor lock.
268 * There are several possible consumers:
270 * 1. The kernel registers interface pointer with bpfattach().
271 * Each call allocates new bpf_if structure, references ifnet pointer
272 * and links bpf_if into bpf_iflist chain. This is protected with global
275 * 2. An userland application uses ioctl() call to bpf_d descriptor.
276 * All such call are serialized with global lock. BPF filters can be
277 * changed, but pointer to old filter will be freed using epoch_call().
278 * Thus it should be safe for bpf_tap/bpf_mtap* code to do access to
279 * filter pointers, even if change will happen during bpf_tap execution.
280 * Destroying of bpf_d descriptor also is doing using epoch_call().
282 * 3. An userland application can write packets into bpf_d descriptor.
283 * There we need to be sure, that ifnet won't disappear during bpfwrite().
285 * 4. The kernel invokes bpf_tap/bpf_mtap* functions. The access to
286 * bif_dlist is protected with net_epoch_preempt section. So, it should
287 * be safe to make access to bpf_d descriptor inside the section.
289 * 5. The kernel invokes bpfdetach() on interface destroying. All lists
290 * are modified with global lock held and actual free() is done using
295 bpfif_free(epoch_context_t ctx)
299 bp = __containerof(ctx, struct bpf_if, epoch_ctx);
300 if_rele(bp->bif_ifp);
305 bpfif_ref(struct bpf_if *bp)
308 refcount_acquire(&bp->bif_refcnt);
312 bpfif_rele(struct bpf_if *bp)
315 if (!refcount_release(&bp->bif_refcnt))
317 epoch_call(net_epoch_preempt, &bp->epoch_ctx, bpfif_free);
321 bpfd_ref(struct bpf_d *d)
324 refcount_acquire(&d->bd_refcnt);
328 bpfd_rele(struct bpf_d *d)
331 if (!refcount_release(&d->bd_refcnt))
333 epoch_call(net_epoch_preempt, &d->epoch_ctx, bpfd_free);
336 static struct bpf_program_buffer*
337 bpf_program_buffer_alloc(size_t size, int flags)
340 return (malloc(sizeof(struct bpf_program_buffer) + size,
345 bpf_program_buffer_free(epoch_context_t ctx)
347 struct bpf_program_buffer *ptr;
349 ptr = __containerof(ctx, struct bpf_program_buffer, epoch_ctx);
351 if (ptr->func != NULL)
352 bpf_destroy_jit_filter(ptr->func);
358 * Wrapper functions for various buffering methods. If the set of buffer
359 * modes expands, we will probably want to introduce a switch data structure
360 * similar to protosw, et.
363 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
369 switch (d->bd_bufmode) {
370 case BPF_BUFMODE_BUFFER:
371 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
373 case BPF_BUFMODE_ZBUF:
374 counter_u64_add(d->bd_zcopy, 1);
375 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
378 panic("bpf_buf_append_bytes");
383 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
389 switch (d->bd_bufmode) {
390 case BPF_BUFMODE_BUFFER:
391 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
393 case BPF_BUFMODE_ZBUF:
394 counter_u64_add(d->bd_zcopy, 1);
395 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
398 panic("bpf_buf_append_mbuf");
403 * This function gets called when the free buffer is re-assigned.
406 bpf_buf_reclaimed(struct bpf_d *d)
411 switch (d->bd_bufmode) {
412 case BPF_BUFMODE_BUFFER:
415 case BPF_BUFMODE_ZBUF:
416 bpf_zerocopy_buf_reclaimed(d);
420 panic("bpf_buf_reclaimed");
425 * If the buffer mechanism has a way to decide that a held buffer can be made
426 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is
427 * returned if the buffer can be discarded, (0) is returned if it cannot.
430 bpf_canfreebuf(struct bpf_d *d)
435 switch (d->bd_bufmode) {
436 case BPF_BUFMODE_ZBUF:
437 return (bpf_zerocopy_canfreebuf(d));
443 * Allow the buffer model to indicate that the current store buffer is
444 * immutable, regardless of the appearance of space. Return (1) if the
445 * buffer is writable, and (0) if not.
448 bpf_canwritebuf(struct bpf_d *d)
452 switch (d->bd_bufmode) {
453 case BPF_BUFMODE_ZBUF:
454 return (bpf_zerocopy_canwritebuf(d));
460 * Notify buffer model that an attempt to write to the store buffer has
461 * resulted in a dropped packet, in which case the buffer may be considered
465 bpf_buffull(struct bpf_d *d)
470 switch (d->bd_bufmode) {
471 case BPF_BUFMODE_ZBUF:
472 bpf_zerocopy_buffull(d);
478 * Notify the buffer model that a buffer has moved into the hold position.
481 bpf_bufheld(struct bpf_d *d)
486 switch (d->bd_bufmode) {
487 case BPF_BUFMODE_ZBUF:
488 bpf_zerocopy_bufheld(d);
494 bpf_free(struct bpf_d *d)
497 switch (d->bd_bufmode) {
498 case BPF_BUFMODE_BUFFER:
499 return (bpf_buffer_free(d));
501 case BPF_BUFMODE_ZBUF:
502 return (bpf_zerocopy_free(d));
505 panic("bpf_buf_free");
510 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
513 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
515 return (bpf_buffer_uiomove(d, buf, len, uio));
519 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
522 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
524 return (bpf_buffer_ioctl_sblen(d, i));
528 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
531 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
533 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
537 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
540 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
542 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
546 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
549 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
551 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
555 * General BPF functions.
558 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
559 struct sockaddr *sockp, int *hdrlen, struct bpf_d *d)
561 const struct ieee80211_bpf_params *p;
562 struct ether_header *eh;
570 * Build a sockaddr based on the data link layer type.
571 * We do this at this level because the ethernet header
572 * is copied directly into the data field of the sockaddr.
573 * In the case of SLIP, there is no header and the packet
574 * is forwarded as is.
575 * Also, we are careful to leave room at the front of the mbuf
576 * for the link level header.
581 sockp->sa_family = AF_INET;
586 sockp->sa_family = AF_UNSPEC;
587 /* XXX Would MAXLINKHDR be better? */
588 hlen = ETHER_HDR_LEN;
592 sockp->sa_family = AF_IMPLINK;
597 sockp->sa_family = AF_UNSPEC;
603 * null interface types require a 4 byte pseudo header which
604 * corresponds to the address family of the packet.
606 sockp->sa_family = AF_UNSPEC;
610 case DLT_ATM_RFC1483:
612 * en atm driver requires 4-byte atm pseudo header.
613 * though it isn't standard, vpi:vci needs to be
616 sockp->sa_family = AF_UNSPEC;
617 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
621 sockp->sa_family = AF_UNSPEC;
622 hlen = 4; /* This should match PPP_HDRLEN */
625 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
626 sockp->sa_family = AF_IEEE80211;
630 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
631 sockp->sa_family = AF_IEEE80211;
632 sockp->sa_len = 12; /* XXX != 0 */
633 hlen = sizeof(struct ieee80211_bpf_params);
640 len = uio->uio_resid;
641 if (len < hlen || len - hlen > ifp->if_mtu)
644 m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
647 m->m_pkthdr.len = m->m_len = len;
650 error = uiomove(mtod(m, u_char *), len, uio);
654 slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len);
660 /* Check for multicast destination */
663 eh = mtod(m, struct ether_header *);
664 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
665 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
666 ETHER_ADDR_LEN) == 0)
667 m->m_flags |= M_BCAST;
669 m->m_flags |= M_MCAST;
671 if (d->bd_hdrcmplt == 0) {
672 memcpy(eh->ether_shost, IF_LLADDR(ifp),
673 sizeof(eh->ether_shost));
679 * Make room for link header, and copy it to sockaddr
682 if (sockp->sa_family == AF_IEEE80211) {
684 * Collect true length from the parameter header
685 * NB: sockp is known to be zero'd so if we do a
686 * short copy unspecified parameters will be
688 * NB: packet may not be aligned after stripping
692 p = mtod(m, const struct ieee80211_bpf_params *);
694 if (hlen > sizeof(sockp->sa_data)) {
699 bcopy(mtod(m, const void *), sockp->sa_data, hlen);
710 * Attach descriptor to the bpf interface, i.e. make d listen on bp,
711 * then reset its buffers and counters with reset_d().
714 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
721 * Save sysctl value to protect from sysctl change
724 op_w = V_bpf_optimize_writers || d->bd_writer;
726 if (d->bd_bif != NULL)
727 bpf_detachd_locked(d, false);
729 * Point d at bp, and add d to the interface's list.
730 * Since there are many applications using BPF for
731 * sending raw packets only (dhcpd, cdpd are good examples)
732 * we can delay adding d to the list of active listeners until
733 * some filter is configured.
738 * Hold reference to bpif while descriptor uses this interface.
743 /* Add to writers-only list */
744 CK_LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
746 * We decrement bd_writer on every filter set operation.
747 * First BIOCSETF is done by pcap_open_live() to set up
748 * snap length. After that appliation usually sets its own
753 CK_LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
759 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
760 __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
763 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
767 * Check if we need to upgrade our descriptor @d from write-only mode.
770 bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode,
773 int is_snap, need_upgrade;
776 * Check if we've already upgraded or new filter is empty.
778 if (d->bd_writer == 0 || fcode == NULL)
784 * Check if cmd looks like snaplen setting from
785 * pcap_bpf.c:pcap_open_live().
786 * Note we're not checking .k value here:
787 * while pcap_open_live() definitely sets to non-zero value,
788 * we'd prefer to treat k=0 (deny ALL) case the same way: e.g.
789 * do not consider upgrading immediately
791 if (cmd == BIOCSETF && flen == 1 &&
792 fcode[0].code == (BPF_RET | BPF_K))
799 * We're setting first filter and it doesn't look like
800 * setting snaplen. We're probably using bpf directly.
801 * Upgrade immediately.
806 * Do not require upgrade by first BIOCSETF
807 * (used to set snaplen) by pcap_open_live().
810 if (--d->bd_writer == 0) {
812 * First snaplen filter has already
813 * been set. This is probably catch-all
821 "%s: filter function set by pid %d, "
822 "bd_writer counter %d, snap %d upgrade %d",
823 __func__, d->bd_pid, d->bd_writer,
824 is_snap, need_upgrade);
826 return (need_upgrade);
830 * Detach a file from its interface.
833 bpf_detachd(struct bpf_d *d)
836 bpf_detachd_locked(d, false);
841 bpf_detachd_locked(struct bpf_d *d, bool detached_ifp)
848 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
850 /* Check if descriptor is attached */
851 if ((bp = d->bd_bif) == NULL)
854 * Remove d from the interface's descriptor list.
855 * And wait until bpf_[m]tap*() will finish their possible work
858 CK_LIST_REMOVE(d, bd_next);
862 /* Save bd_writer value */
863 error = d->bd_writer;
868 * Notify descriptor as it's detached, so that any
869 * sleepers wake up and get ENXIO.
876 /* Call event handler iff d is attached */
878 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
881 * Check if this descriptor had requested promiscuous mode.
882 * If so and ifnet is not detached, turn it off.
884 if (d->bd_promisc && !detached_ifp) {
886 CURVNET_SET(ifp->if_vnet);
887 error = ifpromisc(ifp, 0);
889 if (error != 0 && error != ENXIO) {
891 * ENXIO can happen if a pccard is unplugged
892 * Something is really wrong if we were able to put
893 * the driver into promiscuous mode, but can't
896 if_printf(bp->bif_ifp,
897 "bpf_detach: ifpromisc failed (%d)\n", error);
904 * Close the descriptor by detaching it from its interface,
905 * deallocating its buffers, and marking it free.
910 struct bpf_d *d = data;
913 if (d->bd_state == BPF_WAITING)
914 callout_stop(&d->bd_callout);
915 d->bd_state = BPF_IDLE;
917 funsetown(&d->bd_sigio);
920 mac_bpfdesc_destroy(d);
922 seldrain(&d->bd_sel);
923 knlist_destroy(&d->bd_sel.si_note);
924 callout_drain(&d->bd_callout);
929 * Open ethernet device. Returns ENXIO for illegal minor device number,
930 * EBUSY if file is open by another process.
934 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
939 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
940 error = devfs_set_cdevpriv(d, bpf_dtor);
947 d->bd_rcount = counter_u64_alloc(M_WAITOK);
948 d->bd_dcount = counter_u64_alloc(M_WAITOK);
949 d->bd_fcount = counter_u64_alloc(M_WAITOK);
950 d->bd_wcount = counter_u64_alloc(M_WAITOK);
951 d->bd_wfcount = counter_u64_alloc(M_WAITOK);
952 d->bd_wdcount = counter_u64_alloc(M_WAITOK);
953 d->bd_zcopy = counter_u64_alloc(M_WAITOK);
956 * For historical reasons, perform a one-time initialization call to
957 * the buffer routines, even though we're not yet committed to a
958 * particular buffer method.
961 if ((flags & FREAD) == 0)
963 d->bd_hbuf_in_use = 0;
964 d->bd_bufmode = BPF_BUFMODE_BUFFER;
966 d->bd_direction = BPF_D_INOUT;
968 BPF_PID_REFRESH(d, td);
971 mac_bpfdesc_create(td->td_ucred, d);
973 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
974 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
975 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
981 * bpfread - read next chunk of packets from buffers
984 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
991 error = devfs_get_cdevpriv((void **)&d);
996 * Restrict application to use a buffer the same size as
999 if (uio->uio_resid != d->bd_bufsize)
1002 non_block = ((ioflag & O_NONBLOCK) != 0);
1005 BPF_PID_REFRESH_CUR(d);
1006 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
1008 return (EOPNOTSUPP);
1010 if (d->bd_state == BPF_WAITING)
1011 callout_stop(&d->bd_callout);
1012 timed_out = (d->bd_state == BPF_TIMED_OUT);
1013 d->bd_state = BPF_IDLE;
1014 while (d->bd_hbuf_in_use) {
1015 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1016 PRINET|PCATCH, "bd_hbuf", 0);
1023 * If the hold buffer is empty, then do a timed sleep, which
1024 * ends when the timeout expires or when enough packets
1025 * have arrived to fill the store buffer.
1027 while (d->bd_hbuf == NULL) {
1028 if (d->bd_slen != 0) {
1030 * A packet(s) either arrived since the previous
1031 * read or arrived while we were asleep.
1033 if (d->bd_immediate || non_block || timed_out) {
1035 * Rotate the buffers and return what's here
1036 * if we are in immediate mode, non-blocking
1037 * flag is set, or this descriptor timed out.
1045 * No data is available, check to see if the bpf device
1046 * is still pointed at a real interface. If not, return
1047 * ENXIO so that the userland process knows to rebind
1048 * it before using it again.
1050 if (d->bd_bif == NULL) {
1057 return (EWOULDBLOCK);
1059 error = msleep(d, &d->bd_lock, PRINET|PCATCH,
1060 "bpf", d->bd_rtout);
1061 if (error == EINTR || error == ERESTART) {
1065 if (error == EWOULDBLOCK) {
1067 * On a timeout, return what's in the buffer,
1068 * which may be nothing. If there is something
1069 * in the store buffer, we can rotate the buffers.
1073 * We filled up the buffer in between
1074 * getting the timeout and arriving
1075 * here, so we don't need to rotate.
1079 if (d->bd_slen == 0) {
1088 * At this point, we know we have something in the hold slot.
1090 d->bd_hbuf_in_use = 1;
1094 * Move data from hold buffer into user space.
1095 * We know the entire buffer is transferred since
1096 * we checked above that the read buffer is bpf_bufsize bytes.
1098 * We do not have to worry about simultaneous reads because
1099 * we waited for sole access to the hold buffer above.
1101 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
1104 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
1105 d->bd_fbuf = d->bd_hbuf;
1108 bpf_buf_reclaimed(d);
1109 d->bd_hbuf_in_use = 0;
1110 wakeup(&d->bd_hbuf_in_use);
1117 * If there are processes sleeping on this descriptor, wake them up.
1119 static __inline void
1120 bpf_wakeup(struct bpf_d *d)
1123 BPFD_LOCK_ASSERT(d);
1124 if (d->bd_state == BPF_WAITING) {
1125 callout_stop(&d->bd_callout);
1126 d->bd_state = BPF_IDLE;
1129 if (d->bd_async && d->bd_sig && d->bd_sigio)
1130 pgsigio(&d->bd_sigio, d->bd_sig, 0);
1132 selwakeuppri(&d->bd_sel, PRINET);
1133 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
1137 bpf_timed_out(void *arg)
1139 struct bpf_d *d = (struct bpf_d *)arg;
1141 BPFD_LOCK_ASSERT(d);
1143 if (callout_pending(&d->bd_callout) ||
1144 !callout_active(&d->bd_callout))
1146 if (d->bd_state == BPF_WAITING) {
1147 d->bd_state = BPF_TIMED_OUT;
1148 if (d->bd_slen != 0)
1154 bpf_ready(struct bpf_d *d)
1157 BPFD_LOCK_ASSERT(d);
1159 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1161 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1168 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1171 struct sockaddr dst;
1172 struct epoch_tracker et;
1176 struct mbuf *m, *mc;
1179 error = devfs_get_cdevpriv((void **)&d);
1183 NET_EPOCH_ENTER(et);
1185 BPF_PID_REFRESH_CUR(d);
1186 counter_u64_add(d->bd_wcount, 1);
1187 if ((bp = d->bd_bif) == NULL) {
1193 if ((ifp->if_flags & IFF_UP) == 0) {
1198 if (uio->uio_resid == 0)
1201 bzero(&dst, sizeof(dst));
1206 * Take extra reference, unlock d and exit from epoch section,
1207 * since bpf_movein() can sleep.
1213 error = bpf_movein(uio, (int)bp->bif_dlt, ifp,
1214 &m, &dst, &hlen, d);
1217 counter_u64_add(d->bd_wdcount, 1);
1224 * Check that descriptor is still attached to the interface.
1225 * This can happen on bpfdetach(). To avoid access to detached
1226 * ifnet, free mbuf and return ENXIO.
1228 if (d->bd_bif == NULL) {
1229 counter_u64_add(d->bd_wdcount, 1);
1235 counter_u64_add(d->bd_wfcount, 1);
1237 dst.sa_family = pseudo_AF_HDRCMPLT;
1239 if (d->bd_feedback) {
1240 mc = m_dup(m, M_NOWAIT);
1242 mc->m_pkthdr.rcvif = ifp;
1243 /* Set M_PROMISC for outgoing packets to be discarded. */
1244 if (d->bd_direction == BPF_D_INOUT)
1245 m->m_flags |= M_PROMISC;
1249 m->m_pkthdr.len -= hlen;
1251 m->m_data += hlen; /* XXX */
1253 CURVNET_SET(ifp->if_vnet);
1255 mac_bpfdesc_create_mbuf(d, m);
1257 mac_bpfdesc_create_mbuf(d, mc);
1260 bzero(&ro, sizeof(ro));
1262 ro.ro_prepend = (u_char *)&dst.sa_data;
1264 ro.ro_flags = RT_HAS_HEADER;
1267 /* Avoid possible recursion on BPFD_LOCK(). */
1268 NET_EPOCH_ENTER(et);
1270 error = (*ifp->if_output)(ifp, m, &dst, &ro);
1272 counter_u64_add(d->bd_wdcount, 1);
1276 (*ifp->if_input)(ifp, mc);
1286 counter_u64_add(d->bd_wdcount, 1);
1293 * Reset a descriptor by flushing its packet buffer and clearing the receive
1294 * and drop counts. This is doable for kernel-only buffers, but with
1295 * zero-copy buffers, we can't write to (or rotate) buffers that are
1296 * currently owned by userspace. It would be nice if we could encapsulate
1297 * this logic in the buffer code rather than here.
1300 reset_d(struct bpf_d *d)
1303 BPFD_LOCK_ASSERT(d);
1305 while (d->bd_hbuf_in_use)
1306 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1308 if ((d->bd_hbuf != NULL) &&
1309 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1310 /* Free the hold buffer. */
1311 d->bd_fbuf = d->bd_hbuf;
1314 bpf_buf_reclaimed(d);
1316 if (bpf_canwritebuf(d))
1318 counter_u64_zero(d->bd_rcount);
1319 counter_u64_zero(d->bd_dcount);
1320 counter_u64_zero(d->bd_fcount);
1321 counter_u64_zero(d->bd_wcount);
1322 counter_u64_zero(d->bd_wfcount);
1323 counter_u64_zero(d->bd_wdcount);
1324 counter_u64_zero(d->bd_zcopy);
1328 * FIONREAD Check for read packet available.
1329 * BIOCGBLEN Get buffer len [for read()].
1330 * BIOCSETF Set read filter.
1331 * BIOCSETFNR Set read filter without resetting descriptor.
1332 * BIOCSETWF Set write filter.
1333 * BIOCFLUSH Flush read packet buffer.
1334 * BIOCPROMISC Put interface into promiscuous mode.
1335 * BIOCGDLT Get link layer type.
1336 * BIOCGETIF Get interface name.
1337 * BIOCSETIF Set interface.
1338 * BIOCSRTIMEOUT Set read timeout.
1339 * BIOCGRTIMEOUT Get read timeout.
1340 * BIOCGSTATS Get packet stats.
1341 * BIOCIMMEDIATE Set immediate mode.
1342 * BIOCVERSION Get filter language version.
1343 * BIOCGHDRCMPLT Get "header already complete" flag
1344 * BIOCSHDRCMPLT Set "header already complete" flag
1345 * BIOCGDIRECTION Get packet direction flag
1346 * BIOCSDIRECTION Set packet direction flag
1347 * BIOCGTSTAMP Get time stamp format and resolution.
1348 * BIOCSTSTAMP Set time stamp format and resolution.
1349 * BIOCLOCK Set "locked" flag
1350 * BIOCFEEDBACK Set packet feedback mode.
1351 * BIOCSETZBUF Set current zero-copy buffer locations.
1352 * BIOCGETZMAX Get maximum zero-copy buffer size.
1353 * BIOCROTZBUF Force rotation of zero-copy buffer
1354 * BIOCSETBUFMODE Set buffer mode.
1355 * BIOCGETBUFMODE Get current buffer mode.
1359 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1365 error = devfs_get_cdevpriv((void **)&d);
1370 * Refresh PID associated with this descriptor.
1373 BPF_PID_REFRESH(d, td);
1374 if (d->bd_state == BPF_WAITING)
1375 callout_stop(&d->bd_callout);
1376 d->bd_state = BPF_IDLE;
1379 if (d->bd_locked == 1) {
1385 #ifdef COMPAT_FREEBSD32
1386 case BIOCGDLTLIST32:
1390 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1391 case BIOCGRTIMEOUT32:
1402 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1403 case BIOCSRTIMEOUT32:
1413 #ifdef COMPAT_FREEBSD32
1415 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1416 * that it will get 32-bit packet headers.
1422 case BIOCGDLTLIST32:
1423 case BIOCGRTIMEOUT32:
1424 case BIOCSRTIMEOUT32:
1425 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1433 CURVNET_SET(TD_TO_VNET(td));
1441 * Check for read packet available.
1449 while (d->bd_hbuf_in_use)
1450 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1451 PRINET, "bd_hbuf", 0);
1461 * Get buffer len [for read()].
1465 *(u_int *)addr = d->bd_bufsize;
1470 * Set buffer length.
1473 error = bpf_ioctl_sblen(d, (u_int *)addr);
1477 * Set link layer read filter.
1482 #ifdef COMPAT_FREEBSD32
1487 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1491 * Flush read packet buffer.
1500 * Put interface into promiscuous mode.
1503 if (d->bd_bif == NULL) {
1505 * No interface attached yet.
1510 if (d->bd_promisc == 0) {
1511 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1518 * Get current data link type.
1522 if (d->bd_bif == NULL)
1525 *(u_int *)addr = d->bd_bif->bif_dlt;
1530 * Get a list of supported data link types.
1532 #ifdef COMPAT_FREEBSD32
1533 case BIOCGDLTLIST32:
1535 struct bpf_dltlist32 *list32;
1536 struct bpf_dltlist dltlist;
1538 list32 = (struct bpf_dltlist32 *)addr;
1539 dltlist.bfl_len = list32->bfl_len;
1540 dltlist.bfl_list = PTRIN(list32->bfl_list);
1542 if (d->bd_bif == NULL)
1545 error = bpf_getdltlist(d, &dltlist);
1547 list32->bfl_len = dltlist.bfl_len;
1556 if (d->bd_bif == NULL)
1559 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1564 * Set data link type.
1568 if (d->bd_bif == NULL)
1571 error = bpf_setdlt(d, *(u_int *)addr);
1576 * Get interface name.
1580 if (d->bd_bif == NULL)
1583 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1584 struct ifreq *const ifr = (struct ifreq *)addr;
1586 strlcpy(ifr->ifr_name, ifp->if_xname,
1587 sizeof(ifr->ifr_name));
1597 int alloc_buf, size;
1600 * Behavior here depends on the buffering model. If
1601 * we're using kernel memory buffers, then we can
1602 * allocate them here. If we're using zero-copy,
1603 * then the user process must have registered buffers
1604 * by the time we get here.
1608 if (d->bd_bufmode == BPF_BUFMODE_BUFFER &&
1613 size = d->bd_bufsize;
1614 error = bpf_buffer_ioctl_sblen(d, &size);
1619 error = bpf_setif(d, (struct ifreq *)addr);
1628 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1629 case BIOCSRTIMEOUT32:
1632 struct timeval *tv = (struct timeval *)addr;
1633 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1634 struct timeval32 *tv32;
1635 struct timeval tv64;
1637 if (cmd == BIOCSRTIMEOUT32) {
1638 tv32 = (struct timeval32 *)addr;
1640 tv->tv_sec = tv32->tv_sec;
1641 tv->tv_usec = tv32->tv_usec;
1644 tv = (struct timeval *)addr;
1647 * Subtract 1 tick from tvtohz() since this isn't
1650 if ((error = itimerfix(tv)) == 0)
1651 d->bd_rtout = tvtohz(tv) - 1;
1659 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1660 case BIOCGRTIMEOUT32:
1664 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1665 struct timeval32 *tv32;
1666 struct timeval tv64;
1668 if (cmd == BIOCGRTIMEOUT32)
1672 tv = (struct timeval *)addr;
1674 tv->tv_sec = d->bd_rtout / hz;
1675 tv->tv_usec = (d->bd_rtout % hz) * tick;
1676 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1677 if (cmd == BIOCGRTIMEOUT32) {
1678 tv32 = (struct timeval32 *)addr;
1679 tv32->tv_sec = tv->tv_sec;
1680 tv32->tv_usec = tv->tv_usec;
1692 struct bpf_stat *bs = (struct bpf_stat *)addr;
1694 /* XXXCSJP overflow */
1695 bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount);
1696 bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount);
1701 * Set immediate mode.
1705 d->bd_immediate = *(u_int *)addr;
1711 struct bpf_version *bv = (struct bpf_version *)addr;
1713 bv->bv_major = BPF_MAJOR_VERSION;
1714 bv->bv_minor = BPF_MINOR_VERSION;
1719 * Get "header already complete" flag
1723 *(u_int *)addr = d->bd_hdrcmplt;
1728 * Set "header already complete" flag
1732 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1737 * Get packet direction flag
1739 case BIOCGDIRECTION:
1741 *(u_int *)addr = d->bd_direction;
1746 * Set packet direction flag
1748 case BIOCSDIRECTION:
1752 direction = *(u_int *)addr;
1753 switch (direction) {
1758 d->bd_direction = direction;
1768 * Get packet timestamp format and resolution.
1772 *(u_int *)addr = d->bd_tstamp;
1777 * Set packet timestamp format and resolution.
1783 func = *(u_int *)addr;
1784 if (BPF_T_VALID(func))
1785 d->bd_tstamp = func;
1793 d->bd_feedback = *(u_int *)addr;
1803 case FIONBIO: /* Non-blocking I/O */
1806 case FIOASYNC: /* Send signal on receive packets */
1808 d->bd_async = *(int *)addr;
1814 * XXX: Add some sort of locking here?
1815 * fsetown() can sleep.
1817 error = fsetown(*(int *)addr, &d->bd_sigio);
1822 *(int *)addr = fgetown(&d->bd_sigio);
1826 /* This is deprecated, FIOSETOWN should be used instead. */
1828 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1831 /* This is deprecated, FIOGETOWN should be used instead. */
1833 *(int *)addr = -fgetown(&d->bd_sigio);
1836 case BIOCSRSIG: /* Set receive signal */
1840 sig = *(u_int *)addr;
1853 *(u_int *)addr = d->bd_sig;
1857 case BIOCGETBUFMODE:
1859 *(u_int *)addr = d->bd_bufmode;
1863 case BIOCSETBUFMODE:
1865 * Allow the buffering mode to be changed as long as we
1866 * haven't yet committed to a particular mode. Our
1867 * definition of commitment, for now, is whether or not a
1868 * buffer has been allocated or an interface attached, since
1869 * that's the point where things get tricky.
1871 switch (*(u_int *)addr) {
1872 case BPF_BUFMODE_BUFFER:
1875 case BPF_BUFMODE_ZBUF:
1876 if (bpf_zerocopy_enable)
1886 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1887 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1892 d->bd_bufmode = *(u_int *)addr;
1897 error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1901 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1905 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1913 * Set d's packet filter program to fp. If this file already has a filter,
1914 * free it and replace it. Returns EINVAL for bogus requests.
1916 * Note we use global lock here to serialize bpf_setf() and bpf_setif()
1920 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1922 #ifdef COMPAT_FREEBSD32
1923 struct bpf_program fp_swab;
1924 struct bpf_program32 *fp32;
1926 struct bpf_program_buffer *fcode;
1927 struct bpf_insn *filter;
1929 bpf_jit_filter *jfunc;
1935 #ifdef COMPAT_FREEBSD32
1940 fp32 = (struct bpf_program32 *)fp;
1941 fp_swab.bf_len = fp32->bf_len;
1943 (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1962 * Check new filter validness before acquiring any locks.
1963 * Allocate memory for new filter, if needed.
1966 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1968 size = flen * sizeof(*fp->bf_insns);
1970 /* We're setting up new filter. Copy and check actual data. */
1971 fcode = bpf_program_buffer_alloc(size, M_WAITOK);
1972 filter = (struct bpf_insn *)fcode->buffer;
1973 if (copyin(fp->bf_insns, filter, size) != 0 ||
1974 !bpf_validate(filter, flen)) {
1979 if (cmd != BIOCSETWF) {
1981 * Filter is copied inside fcode and is
1984 jfunc = bpf_jitter(filter, flen);
1989 track_event = false;
1994 /* Set up new filter. */
1995 if (cmd == BIOCSETWF) {
1996 if (d->bd_wfilter != NULL) {
1997 fcode = __containerof((void *)d->bd_wfilter,
1998 struct bpf_program_buffer, buffer);
2003 d->bd_wfilter = filter;
2005 if (d->bd_rfilter != NULL) {
2006 fcode = __containerof((void *)d->bd_rfilter,
2007 struct bpf_program_buffer, buffer);
2009 fcode->func = d->bd_bfilter;
2012 d->bd_rfilter = filter;
2014 d->bd_bfilter = jfunc;
2016 if (cmd == BIOCSETF)
2019 if (bpf_check_upgrade(cmd, d, filter, flen) != 0) {
2021 * Filter can be set several times without
2022 * specifying interface. In this case just mark d
2026 if (d->bd_bif != NULL) {
2028 * Remove descriptor from writers-only list
2029 * and add it to active readers list.
2031 CK_LIST_REMOVE(d, bd_next);
2032 CK_LIST_INSERT_HEAD(&d->bd_bif->bif_dlist,
2035 "%s: upgrade required by pid %d",
2036 __func__, d->bd_pid);
2044 epoch_call(net_epoch_preempt, &fcode->epoch_ctx,
2045 bpf_program_buffer_free);
2048 EVENTHANDLER_INVOKE(bpf_track,
2049 d->bd_bif->bif_ifp, d->bd_bif->bif_dlt, 1);
2056 * Detach a file from its current interface (if attached at all) and attach
2057 * to the interface indicated by the name stored in ifr.
2058 * Return an errno or 0.
2061 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
2064 struct ifnet *theywant;
2068 theywant = ifunit(ifr->ifr_name);
2069 if (theywant == NULL || theywant->if_bpf == NULL)
2072 bp = theywant->if_bpf;
2074 * At this point, we expect the buffer is already allocated. If not,
2077 switch (d->bd_bufmode) {
2078 case BPF_BUFMODE_BUFFER:
2079 case BPF_BUFMODE_ZBUF:
2080 if (d->bd_sbuf == NULL)
2085 panic("bpf_setif: bufmode %d", d->bd_bufmode);
2087 if (bp != d->bd_bif)
2098 * Support for select() and poll() system calls
2100 * Return true iff the specific operation will not block indefinitely.
2101 * Otherwise, return false but make a note that a selwakeup() must be done.
2104 bpfpoll(struct cdev *dev, int events, struct thread *td)
2109 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
2111 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
2114 * Refresh PID associated with this descriptor.
2116 revents = events & (POLLOUT | POLLWRNORM);
2118 BPF_PID_REFRESH(d, td);
2119 if (events & (POLLIN | POLLRDNORM)) {
2121 revents |= events & (POLLIN | POLLRDNORM);
2123 selrecord(td, &d->bd_sel);
2124 /* Start the read timeout if necessary. */
2125 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2126 callout_reset(&d->bd_callout, d->bd_rtout,
2128 d->bd_state = BPF_WAITING;
2137 * Support for kevent() system call. Register EVFILT_READ filters and
2138 * reject all others.
2141 bpfkqfilter(struct cdev *dev, struct knote *kn)
2145 if (devfs_get_cdevpriv((void **)&d) != 0 ||
2146 kn->kn_filter != EVFILT_READ)
2150 * Refresh PID associated with this descriptor.
2153 BPF_PID_REFRESH_CUR(d);
2154 kn->kn_fop = &bpfread_filtops;
2156 knlist_add(&d->bd_sel.si_note, kn, 1);
2163 filt_bpfdetach(struct knote *kn)
2165 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2167 knlist_remove(&d->bd_sel.si_note, kn, 0);
2171 filt_bpfread(struct knote *kn, long hint)
2173 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2176 BPFD_LOCK_ASSERT(d);
2177 ready = bpf_ready(d);
2179 kn->kn_data = d->bd_slen;
2181 * Ignore the hold buffer if it is being copied to user space.
2183 if (!d->bd_hbuf_in_use && d->bd_hbuf)
2184 kn->kn_data += d->bd_hlen;
2185 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2186 callout_reset(&d->bd_callout, d->bd_rtout,
2188 d->bd_state = BPF_WAITING;
2194 #define BPF_TSTAMP_NONE 0
2195 #define BPF_TSTAMP_FAST 1
2196 #define BPF_TSTAMP_NORMAL 2
2197 #define BPF_TSTAMP_EXTERN 3
2200 bpf_ts_quality(int tstype)
2203 if (tstype == BPF_T_NONE)
2204 return (BPF_TSTAMP_NONE);
2205 if ((tstype & BPF_T_FAST) != 0)
2206 return (BPF_TSTAMP_FAST);
2208 return (BPF_TSTAMP_NORMAL);
2212 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2217 quality = bpf_ts_quality(tstype);
2218 if (quality == BPF_TSTAMP_NONE)
2222 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2224 *bt = *(struct bintime *)(tag + 1);
2225 return (BPF_TSTAMP_EXTERN);
2228 if (quality == BPF_TSTAMP_NORMAL)
2237 * Incoming linkage from device drivers. Process the packet pkt, of length
2238 * pktlen, which is stored in a contiguous buffer. The packet is parsed
2239 * by each process' filter, and if accepted, stashed into the corresponding
2243 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2245 struct epoch_tracker et;
2254 gottime = BPF_TSTAMP_NONE;
2255 NET_EPOCH_ENTER(et);
2256 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2257 counter_u64_add(d->bd_rcount, 1);
2259 * NB: We dont call BPF_CHECK_DIRECTION() here since there
2260 * is no way for the caller to indiciate to us whether this
2261 * packet is inbound or outbound. In the bpf_mtap() routines,
2262 * we use the interface pointers on the mbuf to figure it out.
2265 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2267 slen = (*(bf->func))(pkt, pktlen, pktlen);
2270 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2273 * Filter matches. Let's to acquire write lock.
2276 counter_u64_add(d->bd_fcount, 1);
2277 if (gottime < bpf_ts_quality(d->bd_tstamp))
2278 gottime = bpf_gettime(&bt, d->bd_tstamp,
2281 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2283 catchpacket(d, pkt, pktlen, slen,
2284 bpf_append_bytes, &bt);
2291 #define BPF_CHECK_DIRECTION(d, r, i) \
2292 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
2293 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2296 * Incoming linkage from device drivers, when packet is in an mbuf chain.
2297 * Locking model is explained in bpf_tap().
2300 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2302 struct epoch_tracker et;
2311 /* Skip outgoing duplicate packets. */
2312 if ((m->m_flags & M_PROMISC) != 0 && m_rcvif(m) == NULL) {
2313 m->m_flags &= ~M_PROMISC;
2317 pktlen = m_length(m, NULL);
2318 gottime = BPF_TSTAMP_NONE;
2320 NET_EPOCH_ENTER(et);
2321 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2322 if (BPF_CHECK_DIRECTION(d, m_rcvif(m), bp->bif_ifp))
2324 counter_u64_add(d->bd_rcount, 1);
2326 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2327 /* XXX We cannot handle multiple mbufs. */
2328 if (bf != NULL && m->m_next == NULL)
2329 slen = (*(bf->func))(mtod(m, u_char *), pktlen,
2333 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2337 counter_u64_add(d->bd_fcount, 1);
2338 if (gottime < bpf_ts_quality(d->bd_tstamp))
2339 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2341 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2343 catchpacket(d, (u_char *)m, pktlen, slen,
2344 bpf_append_mbuf, &bt);
2352 * Incoming linkage from device drivers, when packet is in
2353 * an mbuf chain and to be prepended by a contiguous header.
2356 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2358 struct epoch_tracker et;
2365 /* Skip outgoing duplicate packets. */
2366 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2367 m->m_flags &= ~M_PROMISC;
2371 pktlen = m_length(m, NULL);
2373 * Craft on-stack mbuf suitable for passing to bpf_filter.
2374 * Note that we cut corners here; we only setup what's
2375 * absolutely needed--this mbuf should never go anywhere else.
2382 gottime = BPF_TSTAMP_NONE;
2384 NET_EPOCH_ENTER(et);
2385 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2386 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2388 counter_u64_add(d->bd_rcount, 1);
2389 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2393 counter_u64_add(d->bd_fcount, 1);
2394 if (gottime < bpf_ts_quality(d->bd_tstamp))
2395 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2397 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2399 catchpacket(d, (u_char *)&mb, pktlen, slen,
2400 bpf_append_mbuf, &bt);
2407 #undef BPF_CHECK_DIRECTION
2408 #undef BPF_TSTAMP_NONE
2409 #undef BPF_TSTAMP_FAST
2410 #undef BPF_TSTAMP_NORMAL
2411 #undef BPF_TSTAMP_EXTERN
2414 bpf_hdrlen(struct bpf_d *d)
2418 hdrlen = d->bd_bif->bif_hdrlen;
2419 #ifndef BURN_BRIDGES
2420 if (d->bd_tstamp == BPF_T_NONE ||
2421 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2422 #ifdef COMPAT_FREEBSD32
2424 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2427 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2430 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2431 #ifdef COMPAT_FREEBSD32
2433 hdrlen = BPF_WORDALIGN32(hdrlen);
2436 hdrlen = BPF_WORDALIGN(hdrlen);
2438 return (hdrlen - d->bd_bif->bif_hdrlen);
2442 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2444 struct bintime bt2, boottimebin;
2446 struct timespec tsn;
2448 if ((tstype & BPF_T_MONOTONIC) == 0) {
2450 getboottimebin(&boottimebin);
2451 bintime_add(&bt2, &boottimebin);
2454 switch (BPF_T_FORMAT(tstype)) {
2455 case BPF_T_MICROTIME:
2456 bintime2timeval(bt, &tsm);
2457 ts->bt_sec = tsm.tv_sec;
2458 ts->bt_frac = tsm.tv_usec;
2460 case BPF_T_NANOTIME:
2461 bintime2timespec(bt, &tsn);
2462 ts->bt_sec = tsn.tv_sec;
2463 ts->bt_frac = tsn.tv_nsec;
2466 ts->bt_sec = bt->sec;
2467 ts->bt_frac = bt->frac;
2473 * Move the packet data from interface memory (pkt) into the
2474 * store buffer. "cpfn" is the routine called to do the actual data
2475 * transfer. bcopy is passed in to copy contiguous chunks, while
2476 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
2477 * pkt is really an mbuf.
2480 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2481 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2484 struct bpf_xhdr hdr;
2485 #ifndef BURN_BRIDGES
2486 struct bpf_hdr hdr_old;
2487 #ifdef COMPAT_FREEBSD32
2488 struct bpf_hdr32 hdr32_old;
2491 int caplen, curlen, hdrlen, totlen;
2496 BPFD_LOCK_ASSERT(d);
2499 * Detect whether user space has released a buffer back to us, and if
2500 * so, move it from being a hold buffer to a free buffer. This may
2501 * not be the best place to do it (for example, we might only want to
2502 * run this check if we need the space), but for now it's a reliable
2505 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2506 d->bd_fbuf = d->bd_hbuf;
2509 bpf_buf_reclaimed(d);
2513 * Figure out how many bytes to move. If the packet is
2514 * greater or equal to the snapshot length, transfer that
2515 * much. Otherwise, transfer the whole packet (unless
2516 * we hit the buffer size limit).
2518 hdrlen = bpf_hdrlen(d);
2519 totlen = hdrlen + min(snaplen, pktlen);
2520 if (totlen > d->bd_bufsize)
2521 totlen = d->bd_bufsize;
2524 * Round up the end of the previous packet to the next longword.
2526 * Drop the packet if there's no room and no hope of room
2527 * If the packet would overflow the storage buffer or the storage
2528 * buffer is considered immutable by the buffer model, try to rotate
2529 * the buffer and wakeup pending processes.
2531 #ifdef COMPAT_FREEBSD32
2533 curlen = BPF_WORDALIGN32(d->bd_slen);
2536 curlen = BPF_WORDALIGN(d->bd_slen);
2537 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2538 if (d->bd_fbuf == NULL) {
2540 * There's no room in the store buffer, and no
2541 * prospect of room, so drop the packet. Notify the
2545 counter_u64_add(d->bd_dcount, 1);
2548 KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
2552 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2554 * Immediate mode is set, or the read timeout has already
2555 * expired during a select call. A packet arrived, so the
2556 * reader should be woken up.
2559 caplen = totlen - hdrlen;
2560 tstype = d->bd_tstamp;
2561 do_timestamp = tstype != BPF_T_NONE;
2562 #ifndef BURN_BRIDGES
2563 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2566 bpf_bintime2ts(bt, &ts, tstype);
2567 #ifdef COMPAT_FREEBSD32
2568 if (d->bd_compat32) {
2569 bzero(&hdr32_old, sizeof(hdr32_old));
2571 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2572 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2574 hdr32_old.bh_datalen = pktlen;
2575 hdr32_old.bh_hdrlen = hdrlen;
2576 hdr32_old.bh_caplen = caplen;
2577 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2582 bzero(&hdr_old, sizeof(hdr_old));
2584 hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2585 hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2587 hdr_old.bh_datalen = pktlen;
2588 hdr_old.bh_hdrlen = hdrlen;
2589 hdr_old.bh_caplen = caplen;
2590 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2597 * Append the bpf header. Note we append the actual header size, but
2598 * move forward the length of the header plus padding.
2600 bzero(&hdr, sizeof(hdr));
2602 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2603 hdr.bh_datalen = pktlen;
2604 hdr.bh_hdrlen = hdrlen;
2605 hdr.bh_caplen = caplen;
2606 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2609 * Copy the packet data into the store buffer and update its length.
2611 #ifndef BURN_BRIDGES
2614 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2615 d->bd_slen = curlen + totlen;
2622 * Free buffers currently in use by a descriptor.
2626 bpfd_free(epoch_context_t ctx)
2629 struct bpf_program_buffer *p;
2632 * We don't need to lock out interrupts since this descriptor has
2633 * been detached from its interface and it yet hasn't been marked
2636 d = __containerof(ctx, struct bpf_d, epoch_ctx);
2638 if (d->bd_rfilter != NULL) {
2639 p = __containerof((void *)d->bd_rfilter,
2640 struct bpf_program_buffer, buffer);
2642 p->func = d->bd_bfilter;
2644 bpf_program_buffer_free(&p->epoch_ctx);
2646 if (d->bd_wfilter != NULL) {
2647 p = __containerof((void *)d->bd_wfilter,
2648 struct bpf_program_buffer, buffer);
2652 bpf_program_buffer_free(&p->epoch_ctx);
2655 mtx_destroy(&d->bd_lock);
2656 counter_u64_free(d->bd_rcount);
2657 counter_u64_free(d->bd_dcount);
2658 counter_u64_free(d->bd_fcount);
2659 counter_u64_free(d->bd_wcount);
2660 counter_u64_free(d->bd_wfcount);
2661 counter_u64_free(d->bd_wdcount);
2662 counter_u64_free(d->bd_zcopy);
2667 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
2668 * fixed size of the link header (variable length headers not yet supported).
2671 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2674 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2678 * Attach an interface to bpf. ifp is a pointer to the structure
2679 * defining the interface to be attached, dlt is the link layer type,
2680 * and hdrlen is the fixed size of the link header (variable length
2681 * headers are not yet supporrted).
2684 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen,
2685 struct bpf_if **driverp)
2689 KASSERT(*driverp == NULL,
2690 ("bpfattach2: driverp already initialized"));
2692 bp = malloc(sizeof(*bp), M_BPF, M_WAITOK | M_ZERO);
2694 CK_LIST_INIT(&bp->bif_dlist);
2695 CK_LIST_INIT(&bp->bif_wlist);
2698 bp->bif_hdrlen = hdrlen;
2699 bp->bif_bpf = driverp;
2703 * Reference ifnet pointer, so it won't freed until
2708 CK_LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2711 if (bootverbose && IS_DEFAULT_VNET(curvnet))
2712 if_printf(ifp, "bpf attached\n");
2717 * When moving interfaces between vnet instances we need a way to
2718 * query the dlt and hdrlen before detach so we can re-attch the if_bpf
2719 * after the vmove. We unfortunately have no device driver infrastructure
2720 * to query the interface for these values after creation/attach, thus
2721 * add this as a workaround.
2724 bpf_get_bp_params(struct bpf_if *bp, u_int *bif_dlt, u_int *bif_hdrlen)
2729 if (bif_dlt == NULL && bif_hdrlen == NULL)
2732 if (bif_dlt != NULL)
2733 *bif_dlt = bp->bif_dlt;
2734 if (bif_hdrlen != NULL)
2735 *bif_hdrlen = bp->bif_hdrlen;
2742 * Detach bpf from an interface. This involves detaching each descriptor
2743 * associated with the interface. Notify each descriptor as it's detached
2744 * so that any sleepers wake up and get ENXIO.
2747 bpfdetach(struct ifnet *ifp)
2749 struct bpf_if *bp, *bp_temp;
2753 /* Find all bpf_if struct's which reference ifp and detach them. */
2754 CK_LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
2755 if (ifp != bp->bif_ifp)
2758 CK_LIST_REMOVE(bp, bif_next);
2759 *bp->bif_bpf = (struct bpf_if *)&dead_bpf_if;
2762 "%s: sheduling free for encap %d (%p) for if %p",
2763 __func__, bp->bif_dlt, bp, ifp);
2765 /* Detach common descriptors */
2766 while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) {
2767 bpf_detachd_locked(d, true);
2770 /* Detach writer-only descriptors */
2771 while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) {
2772 bpf_detachd_locked(d, true);
2780 * Get a list of available data link type of the interface.
2783 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2792 ifp = d->bd_bif->bif_ifp;
2794 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2795 if (bp->bif_ifp == ifp)
2798 if (bfl->bfl_list == NULL) {
2802 if (n1 > bfl->bfl_len)
2805 lst = malloc(n1 * sizeof(u_int), M_TEMP, M_WAITOK);
2807 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2808 if (bp->bif_ifp != ifp)
2810 lst[n++] = bp->bif_dlt;
2812 error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n);
2819 * Set the data link type of a BPF instance.
2822 bpf_setdlt(struct bpf_d *d, u_int dlt)
2824 int error, opromisc;
2829 MPASS(d->bd_bif != NULL);
2832 * It is safe to check bd_bif without BPFD_LOCK, it can not be
2833 * changed while we hold global lock.
2835 if (d->bd_bif->bif_dlt == dlt)
2838 ifp = d->bd_bif->bif_ifp;
2839 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2840 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2846 opromisc = d->bd_promisc;
2849 error = ifpromisc(bp->bif_ifp, 1);
2851 if_printf(bp->bif_ifp, "%s: ifpromisc failed (%d)\n",
2860 bpf_drvinit(void *unused)
2864 sx_init(&bpf_sx, "bpf global lock");
2865 CK_LIST_INIT(&bpf_iflist);
2867 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2868 /* For compatibility */
2869 make_dev_alias(dev, "bpf0");
2873 * Zero out the various packet counters associated with all of the bpf
2874 * descriptors. At some point, we will probably want to get a bit more
2875 * granular and allow the user to specify descriptors to be zeroed.
2878 bpf_zero_counters(void)
2885 * We are protected by global lock here, interfaces and
2886 * descriptors can not be deleted while we hold it.
2888 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2889 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2890 counter_u64_zero(bd->bd_rcount);
2891 counter_u64_zero(bd->bd_dcount);
2892 counter_u64_zero(bd->bd_fcount);
2893 counter_u64_zero(bd->bd_wcount);
2894 counter_u64_zero(bd->bd_wfcount);
2895 counter_u64_zero(bd->bd_zcopy);
2902 * Fill filter statistics
2905 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2909 bzero(d, sizeof(*d));
2910 d->bd_structsize = sizeof(*d);
2911 d->bd_immediate = bd->bd_immediate;
2912 d->bd_promisc = bd->bd_promisc;
2913 d->bd_hdrcmplt = bd->bd_hdrcmplt;
2914 d->bd_direction = bd->bd_direction;
2915 d->bd_feedback = bd->bd_feedback;
2916 d->bd_async = bd->bd_async;
2917 d->bd_rcount = counter_u64_fetch(bd->bd_rcount);
2918 d->bd_dcount = counter_u64_fetch(bd->bd_dcount);
2919 d->bd_fcount = counter_u64_fetch(bd->bd_fcount);
2920 d->bd_sig = bd->bd_sig;
2921 d->bd_slen = bd->bd_slen;
2922 d->bd_hlen = bd->bd_hlen;
2923 d->bd_bufsize = bd->bd_bufsize;
2924 d->bd_pid = bd->bd_pid;
2925 strlcpy(d->bd_ifname,
2926 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2927 d->bd_locked = bd->bd_locked;
2928 d->bd_wcount = counter_u64_fetch(bd->bd_wcount);
2929 d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount);
2930 d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount);
2931 d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy);
2932 d->bd_bufmode = bd->bd_bufmode;
2936 * Handle `netstat -B' stats request
2939 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2941 static const struct xbpf_d zerostats;
2942 struct xbpf_d *xbdbuf, *xbd, tempstats;
2948 * XXX This is not technically correct. It is possible for non
2949 * privileged users to open bpf devices. It would make sense
2950 * if the users who opened the devices were able to retrieve
2951 * the statistics for them, too.
2953 error = priv_check(req->td, PRIV_NET_BPF);
2957 * Check to see if the user is requesting that the counters be
2958 * zeroed out. Explicitly check that the supplied data is zeroed,
2959 * as we aren't allowing the user to set the counters currently.
2961 if (req->newptr != NULL) {
2962 if (req->newlen != sizeof(tempstats))
2964 memset(&tempstats, 0, sizeof(tempstats));
2965 error = SYSCTL_IN(req, &tempstats, sizeof(tempstats));
2968 if (bcmp(&tempstats, &zerostats, sizeof(tempstats)) != 0)
2970 bpf_zero_counters();
2973 if (req->oldptr == NULL)
2974 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2975 if (bpf_bpfd_cnt == 0)
2976 return (SYSCTL_OUT(req, 0, 0));
2977 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2979 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2981 free(xbdbuf, M_BPF);
2985 CK_LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2986 /* Send writers-only first */
2987 CK_LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2988 xbd = &xbdbuf[index++];
2989 bpfstats_fill_xbpf(xbd, bd);
2991 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2992 xbd = &xbdbuf[index++];
2993 bpfstats_fill_xbpf(xbd, bd);
2997 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2998 free(xbdbuf, M_BPF);
3002 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
3004 #else /* !DEV_BPF && !NETGRAPH_BPF */
3007 * NOP stubs to allow bpf-using drivers to load and function.
3009 * A 'better' implementation would allow the core bpf functionality
3010 * to be loaded at runtime.
3014 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
3019 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
3024 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
3029 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
3032 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
3036 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
3039 *driverp = (struct bpf_if *)&dead_bpf_if;
3043 bpfdetach(struct ifnet *ifp)
3048 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
3050 return -1; /* "no filter" behaviour */
3054 bpf_validate(const struct bpf_insn *f, int len)
3056 return 0; /* false */
3059 #endif /* !DEV_BPF && !NETGRAPH_BPF */
3063 bpf_show_bpf_if(struct bpf_if *bpf_if)
3068 db_printf("%p:\n", bpf_if);
3069 #define BPF_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, bpf_if->e);
3070 /* bif_ext.bif_next */
3071 /* bif_ext.bif_dlist */
3072 BPF_DB_PRINTF("%#x", bif_dlt);
3073 BPF_DB_PRINTF("%u", bif_hdrlen);
3075 BPF_DB_PRINTF("%p", bif_ifp);
3076 BPF_DB_PRINTF("%p", bif_bpf);
3077 BPF_DB_PRINTF("%u", bif_refcnt);
3080 DB_SHOW_COMMAND(bpf_if, db_show_bpf_if)
3084 db_printf("usage: show bpf_if <struct bpf_if *>\n");
3088 bpf_show_bpf_if((struct bpf_if *)addr);