2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 #include "opt_netgraph.h"
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/fcntl.h>
49 #include <sys/malloc.h>
54 #include <sys/signalvar.h>
55 #include <sys/filio.h>
56 #include <sys/sockio.h>
57 #include <sys/ttycom.h>
60 #include <sys/event.h>
65 #include <sys/socket.h>
69 #include <net/bpf_buffer.h>
71 #include <net/bpf_jitter.h>
73 #include <net/bpf_zerocopy.h>
74 #include <net/bpfdesc.h>
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
82 #include <net80211/ieee80211_freebsd.h>
84 #include <security/mac/mac_framework.h>
86 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
88 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
90 #define PRINET 26 /* interruptible */
93 * bpf_iflist is a list of BPF interface structures, each corresponding to a
94 * specific DLT. The same network interface might have several BPF interface
95 * structures registered by different layers in the stack (i.e., 802.11
96 * frames, ethernet frames, etc).
98 static LIST_HEAD(, bpf_if) bpf_iflist;
99 static struct mtx bpf_mtx; /* bpf global lock */
100 static int bpf_bpfd_cnt;
102 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
103 static void bpf_detachd(struct bpf_d *);
104 static void bpf_freed(struct bpf_d *);
105 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
106 struct sockaddr *, int *, struct bpf_insn *);
107 static int bpf_setif(struct bpf_d *, struct ifreq *);
108 static void bpf_timed_out(void *);
110 bpf_wakeup(struct bpf_d *);
111 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
112 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
114 static void reset_d(struct bpf_d *);
115 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
116 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
117 static int bpf_setdlt(struct bpf_d *, u_int);
118 static void filt_bpfdetach(struct knote *);
119 static int filt_bpfread(struct knote *, long);
120 static void bpf_drvinit(void *);
121 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
123 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
124 int bpf_maxinsns = BPF_MAXINSNS;
125 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
126 &bpf_maxinsns, 0, "Maximum bpf program instructions");
127 static int bpf_zerocopy_enable = 0;
128 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
129 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
130 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
131 bpf_stats_sysctl, "bpf statistics portal");
133 static d_open_t bpfopen;
134 static d_read_t bpfread;
135 static d_write_t bpfwrite;
136 static d_ioctl_t bpfioctl;
137 static d_poll_t bpfpoll;
138 static d_kqfilter_t bpfkqfilter;
140 static struct cdevsw bpf_cdevsw = {
141 .d_version = D_VERSION,
148 .d_kqfilter = bpfkqfilter,
151 static struct filterops bpfread_filtops =
152 { 1, NULL, filt_bpfdetach, filt_bpfread };
155 * Wrapper functions for various buffering methods. If the set of buffer
156 * modes expands, we will probably want to introduce a switch data structure
157 * similar to protosw, et.
160 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
166 switch (d->bd_bufmode) {
167 case BPF_BUFMODE_BUFFER:
168 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
170 case BPF_BUFMODE_ZBUF:
172 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
175 panic("bpf_buf_append_bytes");
180 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
186 switch (d->bd_bufmode) {
187 case BPF_BUFMODE_BUFFER:
188 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
190 case BPF_BUFMODE_ZBUF:
192 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
195 panic("bpf_buf_append_mbuf");
200 * This function gets called when the free buffer is re-assigned.
203 bpf_buf_reclaimed(struct bpf_d *d)
208 switch (d->bd_bufmode) {
209 case BPF_BUFMODE_BUFFER:
212 case BPF_BUFMODE_ZBUF:
213 bpf_zerocopy_buf_reclaimed(d);
217 panic("bpf_buf_reclaimed");
222 * If the buffer mechanism has a way to decide that a held buffer can be made
223 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is
224 * returned if the buffer can be discarded, (0) is returned if it cannot.
227 bpf_canfreebuf(struct bpf_d *d)
232 switch (d->bd_bufmode) {
233 case BPF_BUFMODE_ZBUF:
234 return (bpf_zerocopy_canfreebuf(d));
240 * Allow the buffer model to indicate that the current store buffer is
241 * immutable, regardless of the appearance of space. Return (1) if the
242 * buffer is writable, and (0) if not.
245 bpf_canwritebuf(struct bpf_d *d)
250 switch (d->bd_bufmode) {
251 case BPF_BUFMODE_ZBUF:
252 return (bpf_zerocopy_canwritebuf(d));
258 * Notify buffer model that an attempt to write to the store buffer has
259 * resulted in a dropped packet, in which case the buffer may be considered
263 bpf_buffull(struct bpf_d *d)
268 switch (d->bd_bufmode) {
269 case BPF_BUFMODE_ZBUF:
270 bpf_zerocopy_buffull(d);
276 * Notify the buffer model that a buffer has moved into the hold position.
279 bpf_bufheld(struct bpf_d *d)
284 switch (d->bd_bufmode) {
285 case BPF_BUFMODE_ZBUF:
286 bpf_zerocopy_bufheld(d);
292 bpf_free(struct bpf_d *d)
295 switch (d->bd_bufmode) {
296 case BPF_BUFMODE_BUFFER:
297 return (bpf_buffer_free(d));
299 case BPF_BUFMODE_ZBUF:
300 return (bpf_zerocopy_free(d));
303 panic("bpf_buf_free");
308 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
311 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
313 return (bpf_buffer_uiomove(d, buf, len, uio));
317 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
320 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
322 return (bpf_buffer_ioctl_sblen(d, i));
326 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
329 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
331 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
335 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
338 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
340 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
344 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
347 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
349 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
353 * General BPF functions.
356 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
357 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
359 const struct ieee80211_bpf_params *p;
360 struct ether_header *eh;
368 * Build a sockaddr based on the data link layer type.
369 * We do this at this level because the ethernet header
370 * is copied directly into the data field of the sockaddr.
371 * In the case of SLIP, there is no header and the packet
372 * is forwarded as is.
373 * Also, we are careful to leave room at the front of the mbuf
374 * for the link level header.
379 sockp->sa_family = AF_INET;
384 sockp->sa_family = AF_UNSPEC;
385 /* XXX Would MAXLINKHDR be better? */
386 hlen = ETHER_HDR_LEN;
390 sockp->sa_family = AF_IMPLINK;
395 sockp->sa_family = AF_UNSPEC;
401 * null interface types require a 4 byte pseudo header which
402 * corresponds to the address family of the packet.
404 sockp->sa_family = AF_UNSPEC;
408 case DLT_ATM_RFC1483:
410 * en atm driver requires 4-byte atm pseudo header.
411 * though it isn't standard, vpi:vci needs to be
414 sockp->sa_family = AF_UNSPEC;
415 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
419 sockp->sa_family = AF_UNSPEC;
420 hlen = 4; /* This should match PPP_HDRLEN */
423 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
424 sockp->sa_family = AF_IEEE80211;
428 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
429 sockp->sa_family = AF_IEEE80211;
430 sockp->sa_len = 12; /* XXX != 0 */
431 hlen = sizeof(struct ieee80211_bpf_params);
438 len = uio->uio_resid;
440 if (len - hlen > ifp->if_mtu)
443 if ((unsigned)len > MJUM16BYTES)
447 MGETHDR(m, M_WAIT, MT_DATA);
448 else if (len <= MCLBYTES)
449 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
451 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR,
452 #if (MJUMPAGESIZE > MCLBYTES)
453 len <= MJUMPAGESIZE ? MJUMPAGESIZE :
455 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
456 m->m_pkthdr.len = m->m_len = len;
457 m->m_pkthdr.rcvif = NULL;
460 if (m->m_len < hlen) {
465 error = uiomove(mtod(m, u_char *), len, uio);
469 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
475 /* Check for multicast destination */
478 eh = mtod(m, struct ether_header *);
479 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
480 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
481 ETHER_ADDR_LEN) == 0)
482 m->m_flags |= M_BCAST;
484 m->m_flags |= M_MCAST;
490 * Make room for link header, and copy it to sockaddr
493 if (sockp->sa_family == AF_IEEE80211) {
495 * Collect true length from the parameter header
496 * NB: sockp is known to be zero'd so if we do a
497 * short copy unspecified parameters will be
499 * NB: packet may not be aligned after stripping
503 p = mtod(m, const struct ieee80211_bpf_params *);
505 if (hlen > sizeof(sockp->sa_data)) {
510 bcopy(m->m_data, sockp->sa_data, hlen);
521 * Attach file to the bpf interface, i.e. make d listen on bp.
524 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
527 * Point d at bp, and add d to the interface's list of listeners.
528 * Finally, point the driver's bpf cookie at the interface so
529 * it will divert packets to bpf.
533 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
538 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
542 * Detach a file from its interface.
545 bpf_detachd(struct bpf_d *d)
554 ifp = d->bd_bif->bif_ifp;
557 * Remove d from the interface's descriptor list.
559 LIST_REMOVE(d, bd_next);
566 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
569 * Check if this descriptor had requested promiscuous mode.
570 * If so, turn it off.
574 CURVNET_SET(ifp->if_vnet);
575 error = ifpromisc(ifp, 0);
577 if (error != 0 && error != ENXIO) {
579 * ENXIO can happen if a pccard is unplugged
580 * Something is really wrong if we were able to put
581 * the driver into promiscuous mode, but can't
584 if_printf(bp->bif_ifp,
585 "bpf_detach: ifpromisc failed (%d)\n", error);
591 * Close the descriptor by detaching it from its interface,
592 * deallocating its buffers, and marking it free.
597 struct bpf_d *d = data;
600 if (d->bd_state == BPF_WAITING)
601 callout_stop(&d->bd_callout);
602 d->bd_state = BPF_IDLE;
604 funsetown(&d->bd_sigio);
608 mtx_unlock(&bpf_mtx);
609 selwakeuppri(&d->bd_sel, PRINET);
611 mac_bpfdesc_destroy(d);
613 knlist_destroy(&d->bd_sel.si_note);
614 callout_drain(&d->bd_callout);
620 * Open ethernet device. Returns ENXIO for illegal minor device number,
621 * EBUSY if file is open by another process.
625 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
630 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
631 error = devfs_set_cdevpriv(d, bpf_dtor);
638 * For historical reasons, perform a one-time initialization call to
639 * the buffer routines, even though we're not yet committed to a
640 * particular buffer method.
643 d->bd_bufmode = BPF_BUFMODE_BUFFER;
645 d->bd_direction = BPF_D_INOUT;
646 d->bd_pid = td->td_proc->p_pid;
649 mac_bpfdesc_create(td->td_ucred, d);
651 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
652 callout_init_mtx(&d->bd_callout, &d->bd_mtx, 0);
653 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_mtx);
659 * bpfread - read next chunk of packets from buffers
662 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
669 error = devfs_get_cdevpriv((void **)&d);
674 * Restrict application to use a buffer the same size as
677 if (uio->uio_resid != d->bd_bufsize)
680 non_block = ((ioflag & O_NONBLOCK) != 0);
683 d->bd_pid = curthread->td_proc->p_pid;
684 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
688 if (d->bd_state == BPF_WAITING)
689 callout_stop(&d->bd_callout);
690 timed_out = (d->bd_state == BPF_TIMED_OUT);
691 d->bd_state = BPF_IDLE;
693 * If the hold buffer is empty, then do a timed sleep, which
694 * ends when the timeout expires or when enough packets
695 * have arrived to fill the store buffer.
697 while (d->bd_hbuf == NULL) {
698 if (d->bd_slen != 0) {
700 * A packet(s) either arrived since the previous
701 * read or arrived while we were asleep.
703 if (d->bd_immediate || non_block || timed_out) {
705 * Rotate the buffers and return what's here
706 * if we are in immediate mode, non-blocking
707 * flag is set, or this descriptor timed out.
715 * No data is available, check to see if the bpf device
716 * is still pointed at a real interface. If not, return
717 * ENXIO so that the userland process knows to rebind
718 * it before using it again.
720 if (d->bd_bif == NULL) {
727 return (EWOULDBLOCK);
729 error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
731 if (error == EINTR || error == ERESTART) {
735 if (error == EWOULDBLOCK) {
737 * On a timeout, return what's in the buffer,
738 * which may be nothing. If there is something
739 * in the store buffer, we can rotate the buffers.
743 * We filled up the buffer in between
744 * getting the timeout and arriving
745 * here, so we don't need to rotate.
749 if (d->bd_slen == 0) {
758 * At this point, we know we have something in the hold slot.
763 * Move data from hold buffer into user space.
764 * We know the entire buffer is transferred since
765 * we checked above that the read buffer is bpf_bufsize bytes.
767 * XXXRW: More synchronization needed here: what if a second thread
768 * issues a read on the same fd at the same time? Don't want this
769 * getting invalidated.
771 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
774 d->bd_fbuf = d->bd_hbuf;
777 bpf_buf_reclaimed(d);
784 * If there are processes sleeping on this descriptor, wake them up.
787 bpf_wakeup(struct bpf_d *d)
791 if (d->bd_state == BPF_WAITING) {
792 callout_stop(&d->bd_callout);
793 d->bd_state = BPF_IDLE;
796 if (d->bd_async && d->bd_sig && d->bd_sigio)
797 pgsigio(&d->bd_sigio, d->bd_sig, 0);
799 selwakeuppri(&d->bd_sel, PRINET);
800 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
804 bpf_timed_out(void *arg)
806 struct bpf_d *d = (struct bpf_d *)arg;
810 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
812 if (d->bd_state == BPF_WAITING) {
813 d->bd_state = BPF_TIMED_OUT;
820 bpf_ready(struct bpf_d *d)
825 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
827 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
834 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
842 error = devfs_get_cdevpriv((void **)&d);
846 d->bd_pid = curthread->td_proc->p_pid;
848 if (d->bd_bif == NULL) {
853 ifp = d->bd_bif->bif_ifp;
855 if ((ifp->if_flags & IFF_UP) == 0) {
860 if (uio->uio_resid == 0) {
865 bzero(&dst, sizeof(dst));
868 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
869 &m, &dst, &hlen, d->bd_wfilter);
876 dst.sa_family = pseudo_AF_HDRCMPLT;
878 if (d->bd_feedback) {
879 mc = m_dup(m, M_DONTWAIT);
881 mc->m_pkthdr.rcvif = ifp;
882 /* Set M_PROMISC for outgoing packets to be discarded. */
883 if (d->bd_direction == BPF_D_INOUT)
884 m->m_flags |= M_PROMISC;
888 m->m_pkthdr.len -= hlen;
890 m->m_data += hlen; /* XXX */
892 CURVNET_SET(ifp->if_vnet);
895 mac_bpfdesc_create_mbuf(d, m);
897 mac_bpfdesc_create_mbuf(d, mc);
901 error = (*ifp->if_output)(ifp, m, &dst, NULL);
907 (*ifp->if_input)(ifp, mc);
917 * Reset a descriptor by flushing its packet buffer and clearing the receive
918 * and drop counts. This is doable for kernel-only buffers, but with
919 * zero-copy buffers, we can't write to (or rotate) buffers that are
920 * currently owned by userspace. It would be nice if we could encapsulate
921 * this logic in the buffer code rather than here.
924 reset_d(struct bpf_d *d)
927 mtx_assert(&d->bd_mtx, MA_OWNED);
929 if ((d->bd_hbuf != NULL) &&
930 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
931 /* Free the hold buffer. */
932 d->bd_fbuf = d->bd_hbuf;
935 bpf_buf_reclaimed(d);
937 if (bpf_canwritebuf(d))
949 * FIONREAD Check for read packet available.
950 * SIOCGIFADDR Get interface address - convenient hook to driver.
951 * BIOCGBLEN Get buffer len [for read()].
952 * BIOCSETF Set read filter.
953 * BIOCSETFNR Set read filter without resetting descriptor.
954 * BIOCSETWF Set write filter.
955 * BIOCFLUSH Flush read packet buffer.
956 * BIOCPROMISC Put interface into promiscuous mode.
957 * BIOCGDLT Get link layer type.
958 * BIOCGETIF Get interface name.
959 * BIOCSETIF Set interface.
960 * BIOCSRTIMEOUT Set read timeout.
961 * BIOCGRTIMEOUT Get read timeout.
962 * BIOCGSTATS Get packet stats.
963 * BIOCIMMEDIATE Set immediate mode.
964 * BIOCVERSION Get filter language version.
965 * BIOCGHDRCMPLT Get "header already complete" flag
966 * BIOCSHDRCMPLT Set "header already complete" flag
967 * BIOCGDIRECTION Get packet direction flag
968 * BIOCSDIRECTION Set packet direction flag
969 * BIOCLOCK Set "locked" flag
970 * BIOCFEEDBACK Set packet feedback mode.
971 * BIOCSETZBUF Set current zero-copy buffer locations.
972 * BIOCGETZMAX Get maximum zero-copy buffer size.
973 * BIOCROTZBUF Force rotation of zero-copy buffer
974 * BIOCSETBUFMODE Set buffer mode.
975 * BIOCGETBUFMODE Get current buffer mode.
979 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
985 error = devfs_get_cdevpriv((void **)&d);
990 * Refresh PID associated with this descriptor.
993 d->bd_pid = td->td_proc->p_pid;
994 if (d->bd_state == BPF_WAITING)
995 callout_stop(&d->bd_callout);
996 d->bd_state = BPF_IDLE;
999 if (d->bd_locked == 1) {
1023 CURVNET_SET(TD_TO_VNET(td));
1031 * Check for read packet available.
1051 if (d->bd_bif == NULL)
1054 ifp = d->bd_bif->bif_ifp;
1055 error = (*ifp->if_ioctl)(ifp, cmd, addr);
1061 * Get buffer len [for read()].
1064 *(u_int *)addr = d->bd_bufsize;
1068 * Set buffer length.
1071 error = bpf_ioctl_sblen(d, (u_int *)addr);
1075 * Set link layer read filter.
1080 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1084 * Flush read packet buffer.
1093 * Put interface into promiscuous mode.
1096 if (d->bd_bif == NULL) {
1098 * No interface attached yet.
1103 if (d->bd_promisc == 0) {
1104 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1111 * Get current data link type.
1114 if (d->bd_bif == NULL)
1117 *(u_int *)addr = d->bd_bif->bif_dlt;
1121 * Get a list of supported data link types.
1124 if (d->bd_bif == NULL)
1127 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1131 * Set data link type.
1134 if (d->bd_bif == NULL)
1137 error = bpf_setdlt(d, *(u_int *)addr);
1141 * Get interface name.
1144 if (d->bd_bif == NULL)
1147 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1148 struct ifreq *const ifr = (struct ifreq *)addr;
1150 strlcpy(ifr->ifr_name, ifp->if_xname,
1151 sizeof(ifr->ifr_name));
1159 error = bpf_setif(d, (struct ifreq *)addr);
1167 struct timeval *tv = (struct timeval *)addr;
1170 * Subtract 1 tick from tvtohz() since this isn't
1173 if ((error = itimerfix(tv)) == 0)
1174 d->bd_rtout = tvtohz(tv) - 1;
1183 struct timeval *tv = (struct timeval *)addr;
1185 tv->tv_sec = d->bd_rtout / hz;
1186 tv->tv_usec = (d->bd_rtout % hz) * tick;
1195 struct bpf_stat *bs = (struct bpf_stat *)addr;
1197 /* XXXCSJP overflow */
1198 bs->bs_recv = d->bd_rcount;
1199 bs->bs_drop = d->bd_dcount;
1204 * Set immediate mode.
1207 d->bd_immediate = *(u_int *)addr;
1212 struct bpf_version *bv = (struct bpf_version *)addr;
1214 bv->bv_major = BPF_MAJOR_VERSION;
1215 bv->bv_minor = BPF_MINOR_VERSION;
1220 * Get "header already complete" flag
1223 *(u_int *)addr = d->bd_hdrcmplt;
1227 * Set "header already complete" flag
1230 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1234 * Get packet direction flag
1236 case BIOCGDIRECTION:
1237 *(u_int *)addr = d->bd_direction;
1241 * Set packet direction flag
1243 case BIOCSDIRECTION:
1247 direction = *(u_int *)addr;
1248 switch (direction) {
1252 d->bd_direction = direction;
1261 d->bd_feedback = *(u_int *)addr;
1268 case FIONBIO: /* Non-blocking I/O */
1271 case FIOASYNC: /* Send signal on receive packets */
1272 d->bd_async = *(int *)addr;
1276 error = fsetown(*(int *)addr, &d->bd_sigio);
1280 *(int *)addr = fgetown(&d->bd_sigio);
1283 /* This is deprecated, FIOSETOWN should be used instead. */
1285 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1288 /* This is deprecated, FIOGETOWN should be used instead. */
1290 *(int *)addr = -fgetown(&d->bd_sigio);
1293 case BIOCSRSIG: /* Set receive signal */
1297 sig = *(u_int *)addr;
1306 *(u_int *)addr = d->bd_sig;
1309 case BIOCGETBUFMODE:
1310 *(u_int *)addr = d->bd_bufmode;
1313 case BIOCSETBUFMODE:
1315 * Allow the buffering mode to be changed as long as we
1316 * haven't yet committed to a particular mode. Our
1317 * definition of commitment, for now, is whether or not a
1318 * buffer has been allocated or an interface attached, since
1319 * that's the point where things get tricky.
1321 switch (*(u_int *)addr) {
1322 case BPF_BUFMODE_BUFFER:
1325 case BPF_BUFMODE_ZBUF:
1326 if (bpf_zerocopy_enable)
1335 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1336 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1340 d->bd_bufmode = *(u_int *)addr;
1345 return (bpf_ioctl_getzmax(td, d, (size_t *)addr));
1348 return (bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr));
1351 return (bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr));
1358 * Set d's packet filter program to fp. If this file already has a filter,
1359 * free it and replace it. Returns EINVAL for bogus requests.
1362 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1364 struct bpf_insn *fcode, *old;
1365 u_int wfilter, flen, size;
1367 bpf_jit_filter *ofunc;
1370 if (cmd == BIOCSETWF) {
1371 old = d->bd_wfilter;
1378 old = d->bd_rfilter;
1380 ofunc = d->bd_bfilter;
1383 if (fp->bf_insns == NULL) {
1384 if (fp->bf_len != 0)
1388 d->bd_wfilter = NULL;
1390 d->bd_rfilter = NULL;
1392 d->bd_bfilter = NULL;
1394 if (cmd == BIOCSETF)
1399 free((caddr_t)old, M_BPF);
1402 bpf_destroy_jit_filter(ofunc);
1407 if (flen > bpf_maxinsns)
1410 size = flen * sizeof(*fp->bf_insns);
1411 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
1412 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1413 bpf_validate(fcode, (int)flen)) {
1416 d->bd_wfilter = fcode;
1418 d->bd_rfilter = fcode;
1420 d->bd_bfilter = bpf_jitter(fcode, flen);
1422 if (cmd == BIOCSETF)
1427 free((caddr_t)old, M_BPF);
1430 bpf_destroy_jit_filter(ofunc);
1435 free((caddr_t)fcode, M_BPF);
1440 * Detach a file from its current interface (if attached at all) and attach
1441 * to the interface indicated by the name stored in ifr.
1442 * Return an errno or 0.
1445 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1448 struct ifnet *theywant;
1450 theywant = ifunit(ifr->ifr_name);
1451 if (theywant == NULL || theywant->if_bpf == NULL)
1454 bp = theywant->if_bpf;
1457 * Behavior here depends on the buffering model. If we're using
1458 * kernel memory buffers, then we can allocate them here. If we're
1459 * using zero-copy, then the user process must have registered
1460 * buffers by the time we get here. If not, return an error.
1462 * XXXRW: There are locking issues here with multi-threaded use: what
1463 * if two threads try to set the interface at once?
1465 switch (d->bd_bufmode) {
1466 case BPF_BUFMODE_BUFFER:
1467 if (d->bd_sbuf == NULL)
1468 bpf_buffer_alloc(d);
1469 KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL"));
1472 case BPF_BUFMODE_ZBUF:
1473 if (d->bd_sbuf == NULL)
1478 panic("bpf_setif: bufmode %d", d->bd_bufmode);
1480 if (bp != d->bd_bif) {
1483 * Detach if attached to something else.
1496 * Support for select() and poll() system calls
1498 * Return true iff the specific operation will not block indefinitely.
1499 * Otherwise, return false but make a note that a selwakeup() must be done.
1502 bpfpoll(struct cdev *dev, int events, struct thread *td)
1507 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1509 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1512 * Refresh PID associated with this descriptor.
1514 revents = events & (POLLOUT | POLLWRNORM);
1516 d->bd_pid = td->td_proc->p_pid;
1517 if (events & (POLLIN | POLLRDNORM)) {
1519 revents |= events & (POLLIN | POLLRDNORM);
1521 selrecord(td, &d->bd_sel);
1522 /* Start the read timeout if necessary. */
1523 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1524 callout_reset(&d->bd_callout, d->bd_rtout,
1526 d->bd_state = BPF_WAITING;
1535 * Support for kevent() system call. Register EVFILT_READ filters and
1536 * reject all others.
1539 bpfkqfilter(struct cdev *dev, struct knote *kn)
1543 if (devfs_get_cdevpriv((void **)&d) != 0 ||
1544 kn->kn_filter != EVFILT_READ)
1548 * Refresh PID associated with this descriptor.
1551 d->bd_pid = curthread->td_proc->p_pid;
1552 kn->kn_fop = &bpfread_filtops;
1554 knlist_add(&d->bd_sel.si_note, kn, 1);
1561 filt_bpfdetach(struct knote *kn)
1563 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1565 knlist_remove(&d->bd_sel.si_note, kn, 0);
1569 filt_bpfread(struct knote *kn, long hint)
1571 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1574 BPFD_LOCK_ASSERT(d);
1575 ready = bpf_ready(d);
1577 kn->kn_data = d->bd_slen;
1579 kn->kn_data += d->bd_hlen;
1581 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1582 callout_reset(&d->bd_callout, d->bd_rtout,
1584 d->bd_state = BPF_WAITING;
1591 * Incoming linkage from device drivers. Process the packet pkt, of length
1592 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1593 * by each process' filter, and if accepted, stashed into the corresponding
1597 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1609 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1613 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
1614 * way for the caller to indiciate to us whether this packet
1615 * is inbound or outbound. In the bpf_mtap() routines, we use
1616 * the interface pointers on the mbuf to figure it out.
1619 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
1621 slen = (*(bf->func))(pkt, pktlen, pktlen);
1624 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
1632 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
1634 catchpacket(d, pkt, pktlen, slen,
1635 bpf_append_bytes, &tv);
1642 #define BPF_CHECK_DIRECTION(d, r, i) \
1643 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
1644 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
1647 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1650 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1660 /* Skip outgoing duplicate packets. */
1661 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1662 m->m_flags &= ~M_PROMISC;
1668 pktlen = m_length(m, NULL);
1671 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1672 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
1677 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
1678 /* XXX We cannot handle multiple mbufs. */
1679 if (bf != NULL && m->m_next == NULL)
1680 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
1683 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
1691 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
1693 catchpacket(d, (u_char *)m, pktlen, slen,
1694 bpf_append_mbuf, &tv);
1702 * Incoming linkage from device drivers, when packet is in
1703 * an mbuf chain and to be prepended by a contiguous header.
1706 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
1714 /* Skip outgoing duplicate packets. */
1715 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1716 m->m_flags &= ~M_PROMISC;
1722 pktlen = m_length(m, NULL);
1724 * Craft on-stack mbuf suitable for passing to bpf_filter.
1725 * Note that we cut corners here; we only setup what's
1726 * absolutely needed--this mbuf should never go anywhere else.
1734 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1735 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
1739 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
1747 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
1749 catchpacket(d, (u_char *)&mb, pktlen, slen,
1750 bpf_append_mbuf, &tv);
1757 #undef BPF_CHECK_DIRECTION
1760 * Move the packet data from interface memory (pkt) into the
1761 * store buffer. "cpfn" is the routine called to do the actual data
1762 * transfer. bcopy is passed in to copy contiguous chunks, while
1763 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
1764 * pkt is really an mbuf.
1767 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1768 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
1773 int hdrlen = d->bd_bif->bif_hdrlen;
1776 BPFD_LOCK_ASSERT(d);
1779 * Detect whether user space has released a buffer back to us, and if
1780 * so, move it from being a hold buffer to a free buffer. This may
1781 * not be the best place to do it (for example, we might only want to
1782 * run this check if we need the space), but for now it's a reliable
1785 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
1786 d->bd_fbuf = d->bd_hbuf;
1789 bpf_buf_reclaimed(d);
1793 * Figure out how many bytes to move. If the packet is
1794 * greater or equal to the snapshot length, transfer that
1795 * much. Otherwise, transfer the whole packet (unless
1796 * we hit the buffer size limit).
1798 totlen = hdrlen + min(snaplen, pktlen);
1799 if (totlen > d->bd_bufsize)
1800 totlen = d->bd_bufsize;
1803 * Round up the end of the previous packet to the next longword.
1805 * Drop the packet if there's no room and no hope of room
1806 * If the packet would overflow the storage buffer or the storage
1807 * buffer is considered immutable by the buffer model, try to rotate
1808 * the buffer and wakeup pending processes.
1810 curlen = BPF_WORDALIGN(d->bd_slen);
1811 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
1812 if (d->bd_fbuf == NULL) {
1814 * There's no room in the store buffer, and no
1815 * prospect of room, so drop the packet. Notify the
1825 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1827 * Immediate mode is set, or the read timeout has already
1828 * expired during a select call. A packet arrived, so the
1829 * reader should be woken up.
1834 * Append the bpf header. Note we append the actual header size, but
1835 * move forward the length of the header plus padding.
1837 bzero(&hdr, sizeof(hdr));
1838 hdr.bh_tstamp = *tv;
1839 hdr.bh_datalen = pktlen;
1840 hdr.bh_hdrlen = hdrlen;
1841 hdr.bh_caplen = totlen - hdrlen;
1842 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
1845 * Copy the packet data into the store buffer and update its length.
1847 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen);
1848 d->bd_slen = curlen + totlen;
1855 * Free buffers currently in use by a descriptor.
1859 bpf_freed(struct bpf_d *d)
1863 * We don't need to lock out interrupts since this descriptor has
1864 * been detached from its interface and it yet hasn't been marked
1868 if (d->bd_rfilter != NULL) {
1869 free((caddr_t)d->bd_rfilter, M_BPF);
1871 if (d->bd_bfilter != NULL)
1872 bpf_destroy_jit_filter(d->bd_bfilter);
1875 if (d->bd_wfilter != NULL)
1876 free((caddr_t)d->bd_wfilter, M_BPF);
1877 mtx_destroy(&d->bd_mtx);
1881 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
1882 * fixed size of the link header (variable length headers not yet supported).
1885 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1888 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1892 * Attach an interface to bpf. ifp is a pointer to the structure
1893 * defining the interface to be attached, dlt is the link layer type,
1894 * and hdrlen is the fixed size of the link header (variable length
1895 * headers are not yet supporrted).
1898 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1902 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1906 LIST_INIT(&bp->bif_dlist);
1909 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1910 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
1914 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
1915 mtx_unlock(&bpf_mtx);
1918 * Compute the length of the bpf header. This is not necessarily
1919 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1920 * that the network layer header begins on a longword boundary (for
1921 * performance reasons and to alleviate alignment restrictions).
1923 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1926 if_printf(ifp, "bpf attached\n");
1930 * Detach bpf from an interface. This involves detaching each descriptor
1931 * associated with the interface, and leaving bd_bif NULL. Notify each
1932 * descriptor as it's detached so that any sleepers wake up and get
1936 bpfdetach(struct ifnet *ifp)
1941 /* Locate BPF interface information */
1943 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1944 if (ifp == bp->bif_ifp)
1948 /* Interface wasn't attached */
1949 if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1950 mtx_unlock(&bpf_mtx);
1951 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1955 LIST_REMOVE(bp, bif_next);
1956 mtx_unlock(&bpf_mtx);
1958 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
1965 mtx_destroy(&bp->bif_mtx);
1970 * Get a list of available data link type of the interface.
1973 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1979 ifp = d->bd_bif->bif_ifp;
1983 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1984 if (bp->bif_ifp != ifp)
1986 if (bfl->bfl_list != NULL) {
1987 if (n >= bfl->bfl_len) {
1988 mtx_unlock(&bpf_mtx);
1991 error = copyout(&bp->bif_dlt,
1992 bfl->bfl_list + n, sizeof(u_int));
1996 mtx_unlock(&bpf_mtx);
2002 * Set the data link type of a BPF instance.
2005 bpf_setdlt(struct bpf_d *d, u_int dlt)
2007 int error, opromisc;
2011 if (d->bd_bif->bif_dlt == dlt)
2013 ifp = d->bd_bif->bif_ifp;
2015 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2016 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2019 mtx_unlock(&bpf_mtx);
2021 opromisc = d->bd_promisc;
2028 error = ifpromisc(bp->bif_ifp, 1);
2030 if_printf(bp->bif_ifp,
2031 "bpf_setdlt: ifpromisc failed (%d)\n",
2037 return (bp == NULL ? EINVAL : 0);
2041 bpf_drvinit(void *unused)
2045 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2046 LIST_INIT(&bpf_iflist);
2048 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2049 /* For compatibility */
2050 make_dev_alias(dev, "bpf0");
2054 * Zero out the various packet counters associated with all of the bpf
2055 * descriptors. At some point, we will probably want to get a bit more
2056 * granular and allow the user to specify descriptors to be zeroed.
2059 bpf_zero_counters(void)
2065 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2067 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2079 mtx_unlock(&bpf_mtx);
2083 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2086 bzero(d, sizeof(*d));
2087 BPFD_LOCK_ASSERT(bd);
2088 d->bd_structsize = sizeof(*d);
2089 d->bd_immediate = bd->bd_immediate;
2090 d->bd_promisc = bd->bd_promisc;
2091 d->bd_hdrcmplt = bd->bd_hdrcmplt;
2092 d->bd_direction = bd->bd_direction;
2093 d->bd_feedback = bd->bd_feedback;
2094 d->bd_async = bd->bd_async;
2095 d->bd_rcount = bd->bd_rcount;
2096 d->bd_dcount = bd->bd_dcount;
2097 d->bd_fcount = bd->bd_fcount;
2098 d->bd_sig = bd->bd_sig;
2099 d->bd_slen = bd->bd_slen;
2100 d->bd_hlen = bd->bd_hlen;
2101 d->bd_bufsize = bd->bd_bufsize;
2102 d->bd_pid = bd->bd_pid;
2103 strlcpy(d->bd_ifname,
2104 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2105 d->bd_locked = bd->bd_locked;
2106 d->bd_wcount = bd->bd_wcount;
2107 d->bd_wdcount = bd->bd_wdcount;
2108 d->bd_wfcount = bd->bd_wfcount;
2109 d->bd_zcopy = bd->bd_zcopy;
2110 d->bd_bufmode = bd->bd_bufmode;
2114 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2116 struct xbpf_d *xbdbuf, *xbd, zerostats;
2122 * XXX This is not technically correct. It is possible for non
2123 * privileged users to open bpf devices. It would make sense
2124 * if the users who opened the devices were able to retrieve
2125 * the statistics for them, too.
2127 error = priv_check(req->td, PRIV_NET_BPF);
2131 * Check to see if the user is requesting that the counters be
2132 * zeroed out. Explicitly check that the supplied data is zeroed,
2133 * as we aren't allowing the user to set the counters currently.
2135 if (req->newptr != NULL) {
2136 if (req->newlen != sizeof(zerostats))
2138 bzero(&zerostats, sizeof(zerostats));
2140 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
2142 bpf_zero_counters();
2145 if (req->oldptr == NULL)
2146 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2147 if (bpf_bpfd_cnt == 0)
2148 return (SYSCTL_OUT(req, 0, 0));
2149 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2151 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2152 mtx_unlock(&bpf_mtx);
2153 free(xbdbuf, M_BPF);
2157 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2159 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2160 xbd = &xbdbuf[index++];
2162 bpfstats_fill_xbpf(xbd, bd);
2167 mtx_unlock(&bpf_mtx);
2168 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2169 free(xbdbuf, M_BPF);
2173 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2175 #else /* !DEV_BPF && !NETGRAPH_BPF */
2177 * NOP stubs to allow bpf-using drivers to load and function.
2179 * A 'better' implementation would allow the core bpf functionality
2180 * to be loaded at runtime.
2182 static struct bpf_if bp_null;
2185 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2190 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2195 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2200 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2203 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2207 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2210 *driverp = &bp_null;
2214 bpfdetach(struct ifnet *ifp)
2219 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2221 return -1; /* "no filter" behaviour */
2225 bpf_validate(const struct bpf_insn *f, int len)
2227 return 0; /* false */
2230 #endif /* !DEV_BPF && !NETGRAPH_BPF */