2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 #include "opt_netgraph.h"
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/fcntl.h>
49 #include <sys/malloc.h>
54 #include <sys/signalvar.h>
55 #include <sys/filio.h>
56 #include <sys/sockio.h>
57 #include <sys/ttycom.h>
60 #include <sys/event.h>
65 #include <sys/socket.h>
69 #include <net/bpf_buffer.h>
71 #include <net/bpf_jitter.h>
73 #include <net/bpf_zerocopy.h>
74 #include <net/bpfdesc.h>
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
82 #include <net80211/ieee80211_freebsd.h>
84 #include <security/mac/mac_framework.h>
86 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
88 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
90 #define PRINET 26 /* interruptible */
93 * bpf_iflist is a list of BPF interface structures, each corresponding to a
94 * specific DLT. The same network interface might have several BPF interface
95 * structures registered by different layers in the stack (i.e., 802.11
96 * frames, ethernet frames, etc).
98 static LIST_HEAD(, bpf_if) bpf_iflist;
99 static struct mtx bpf_mtx; /* bpf global lock */
100 static int bpf_bpfd_cnt;
102 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
103 static void bpf_detachd(struct bpf_d *);
104 static void bpf_freed(struct bpf_d *);
105 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
106 struct sockaddr *, int *, struct bpf_insn *);
107 static int bpf_setif(struct bpf_d *, struct ifreq *);
108 static void bpf_timed_out(void *);
110 bpf_wakeup(struct bpf_d *);
111 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
112 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
114 static void reset_d(struct bpf_d *);
115 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
116 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
117 static int bpf_setdlt(struct bpf_d *, u_int);
118 static void filt_bpfdetach(struct knote *);
119 static int filt_bpfread(struct knote *, long);
120 static void bpf_drvinit(void *);
121 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
123 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
124 int bpf_maxinsns = BPF_MAXINSNS;
125 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
126 &bpf_maxinsns, 0, "Maximum bpf program instructions");
127 static int bpf_zerocopy_enable = 0;
128 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
129 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
130 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
131 bpf_stats_sysctl, "bpf statistics portal");
133 static d_open_t bpfopen;
134 static d_read_t bpfread;
135 static d_write_t bpfwrite;
136 static d_ioctl_t bpfioctl;
137 static d_poll_t bpfpoll;
138 static d_kqfilter_t bpfkqfilter;
140 static struct cdevsw bpf_cdevsw = {
141 .d_version = D_VERSION,
148 .d_kqfilter = bpfkqfilter,
151 static struct filterops bpfread_filtops = {
153 .f_detach = filt_bpfdetach,
154 .f_event = filt_bpfread,
158 * Wrapper functions for various buffering methods. If the set of buffer
159 * modes expands, we will probably want to introduce a switch data structure
160 * similar to protosw, et.
163 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
169 switch (d->bd_bufmode) {
170 case BPF_BUFMODE_BUFFER:
171 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
173 case BPF_BUFMODE_ZBUF:
175 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
178 panic("bpf_buf_append_bytes");
183 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
189 switch (d->bd_bufmode) {
190 case BPF_BUFMODE_BUFFER:
191 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
193 case BPF_BUFMODE_ZBUF:
195 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
198 panic("bpf_buf_append_mbuf");
203 * This function gets called when the free buffer is re-assigned.
206 bpf_buf_reclaimed(struct bpf_d *d)
211 switch (d->bd_bufmode) {
212 case BPF_BUFMODE_BUFFER:
215 case BPF_BUFMODE_ZBUF:
216 bpf_zerocopy_buf_reclaimed(d);
220 panic("bpf_buf_reclaimed");
225 * If the buffer mechanism has a way to decide that a held buffer can be made
226 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is
227 * returned if the buffer can be discarded, (0) is returned if it cannot.
230 bpf_canfreebuf(struct bpf_d *d)
235 switch (d->bd_bufmode) {
236 case BPF_BUFMODE_ZBUF:
237 return (bpf_zerocopy_canfreebuf(d));
243 * Allow the buffer model to indicate that the current store buffer is
244 * immutable, regardless of the appearance of space. Return (1) if the
245 * buffer is writable, and (0) if not.
248 bpf_canwritebuf(struct bpf_d *d)
253 switch (d->bd_bufmode) {
254 case BPF_BUFMODE_ZBUF:
255 return (bpf_zerocopy_canwritebuf(d));
261 * Notify buffer model that an attempt to write to the store buffer has
262 * resulted in a dropped packet, in which case the buffer may be considered
266 bpf_buffull(struct bpf_d *d)
271 switch (d->bd_bufmode) {
272 case BPF_BUFMODE_ZBUF:
273 bpf_zerocopy_buffull(d);
279 * Notify the buffer model that a buffer has moved into the hold position.
282 bpf_bufheld(struct bpf_d *d)
287 switch (d->bd_bufmode) {
288 case BPF_BUFMODE_ZBUF:
289 bpf_zerocopy_bufheld(d);
295 bpf_free(struct bpf_d *d)
298 switch (d->bd_bufmode) {
299 case BPF_BUFMODE_BUFFER:
300 return (bpf_buffer_free(d));
302 case BPF_BUFMODE_ZBUF:
303 return (bpf_zerocopy_free(d));
306 panic("bpf_buf_free");
311 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
314 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
316 return (bpf_buffer_uiomove(d, buf, len, uio));
320 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
323 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
325 return (bpf_buffer_ioctl_sblen(d, i));
329 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
332 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
334 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
338 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
341 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
343 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
347 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
350 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
352 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
356 * General BPF functions.
359 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
360 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
362 const struct ieee80211_bpf_params *p;
363 struct ether_header *eh;
371 * Build a sockaddr based on the data link layer type.
372 * We do this at this level because the ethernet header
373 * is copied directly into the data field of the sockaddr.
374 * In the case of SLIP, there is no header and the packet
375 * is forwarded as is.
376 * Also, we are careful to leave room at the front of the mbuf
377 * for the link level header.
382 sockp->sa_family = AF_INET;
387 sockp->sa_family = AF_UNSPEC;
388 /* XXX Would MAXLINKHDR be better? */
389 hlen = ETHER_HDR_LEN;
393 sockp->sa_family = AF_IMPLINK;
398 sockp->sa_family = AF_UNSPEC;
404 * null interface types require a 4 byte pseudo header which
405 * corresponds to the address family of the packet.
407 sockp->sa_family = AF_UNSPEC;
411 case DLT_ATM_RFC1483:
413 * en atm driver requires 4-byte atm pseudo header.
414 * though it isn't standard, vpi:vci needs to be
417 sockp->sa_family = AF_UNSPEC;
418 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
422 sockp->sa_family = AF_UNSPEC;
423 hlen = 4; /* This should match PPP_HDRLEN */
426 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
427 sockp->sa_family = AF_IEEE80211;
431 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
432 sockp->sa_family = AF_IEEE80211;
433 sockp->sa_len = 12; /* XXX != 0 */
434 hlen = sizeof(struct ieee80211_bpf_params);
441 len = uio->uio_resid;
443 if (len - hlen > ifp->if_mtu)
446 if ((unsigned)len > MJUM16BYTES)
450 MGETHDR(m, M_WAIT, MT_DATA);
451 else if (len <= MCLBYTES)
452 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
454 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR,
455 #if (MJUMPAGESIZE > MCLBYTES)
456 len <= MJUMPAGESIZE ? MJUMPAGESIZE :
458 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
459 m->m_pkthdr.len = m->m_len = len;
460 m->m_pkthdr.rcvif = NULL;
463 if (m->m_len < hlen) {
468 error = uiomove(mtod(m, u_char *), len, uio);
472 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
478 /* Check for multicast destination */
481 eh = mtod(m, struct ether_header *);
482 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
483 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
484 ETHER_ADDR_LEN) == 0)
485 m->m_flags |= M_BCAST;
487 m->m_flags |= M_MCAST;
493 * Make room for link header, and copy it to sockaddr
496 if (sockp->sa_family == AF_IEEE80211) {
498 * Collect true length from the parameter header
499 * NB: sockp is known to be zero'd so if we do a
500 * short copy unspecified parameters will be
502 * NB: packet may not be aligned after stripping
506 p = mtod(m, const struct ieee80211_bpf_params *);
508 if (hlen > sizeof(sockp->sa_data)) {
513 bcopy(m->m_data, sockp->sa_data, hlen);
524 * Attach file to the bpf interface, i.e. make d listen on bp.
527 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
530 * Point d at bp, and add d to the interface's list of listeners.
531 * Finally, point the driver's bpf cookie at the interface so
532 * it will divert packets to bpf.
536 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
541 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
545 * Detach a file from its interface.
548 bpf_detachd(struct bpf_d *d)
557 ifp = d->bd_bif->bif_ifp;
560 * Remove d from the interface's descriptor list.
562 LIST_REMOVE(d, bd_next);
569 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
572 * Check if this descriptor had requested promiscuous mode.
573 * If so, turn it off.
577 CURVNET_SET(ifp->if_vnet);
578 error = ifpromisc(ifp, 0);
580 if (error != 0 && error != ENXIO) {
582 * ENXIO can happen if a pccard is unplugged
583 * Something is really wrong if we were able to put
584 * the driver into promiscuous mode, but can't
587 if_printf(bp->bif_ifp,
588 "bpf_detach: ifpromisc failed (%d)\n", error);
594 * Close the descriptor by detaching it from its interface,
595 * deallocating its buffers, and marking it free.
600 struct bpf_d *d = data;
603 if (d->bd_state == BPF_WAITING)
604 callout_stop(&d->bd_callout);
605 d->bd_state = BPF_IDLE;
607 funsetown(&d->bd_sigio);
611 mtx_unlock(&bpf_mtx);
612 selwakeuppri(&d->bd_sel, PRINET);
614 mac_bpfdesc_destroy(d);
616 knlist_destroy(&d->bd_sel.si_note);
617 callout_drain(&d->bd_callout);
623 * Open ethernet device. Returns ENXIO for illegal minor device number,
624 * EBUSY if file is open by another process.
628 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
633 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
634 error = devfs_set_cdevpriv(d, bpf_dtor);
641 * For historical reasons, perform a one-time initialization call to
642 * the buffer routines, even though we're not yet committed to a
643 * particular buffer method.
646 d->bd_bufmode = BPF_BUFMODE_BUFFER;
648 d->bd_direction = BPF_D_INOUT;
649 d->bd_pid = td->td_proc->p_pid;
652 mac_bpfdesc_create(td->td_ucred, d);
654 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
655 callout_init_mtx(&d->bd_callout, &d->bd_mtx, 0);
656 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_mtx);
662 * bpfread - read next chunk of packets from buffers
665 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
672 error = devfs_get_cdevpriv((void **)&d);
677 * Restrict application to use a buffer the same size as
680 if (uio->uio_resid != d->bd_bufsize)
683 non_block = ((ioflag & O_NONBLOCK) != 0);
686 d->bd_pid = curthread->td_proc->p_pid;
687 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
691 if (d->bd_state == BPF_WAITING)
692 callout_stop(&d->bd_callout);
693 timed_out = (d->bd_state == BPF_TIMED_OUT);
694 d->bd_state = BPF_IDLE;
696 * If the hold buffer is empty, then do a timed sleep, which
697 * ends when the timeout expires or when enough packets
698 * have arrived to fill the store buffer.
700 while (d->bd_hbuf == NULL) {
701 if (d->bd_slen != 0) {
703 * A packet(s) either arrived since the previous
704 * read or arrived while we were asleep.
706 if (d->bd_immediate || non_block || timed_out) {
708 * Rotate the buffers and return what's here
709 * if we are in immediate mode, non-blocking
710 * flag is set, or this descriptor timed out.
718 * No data is available, check to see if the bpf device
719 * is still pointed at a real interface. If not, return
720 * ENXIO so that the userland process knows to rebind
721 * it before using it again.
723 if (d->bd_bif == NULL) {
730 return (EWOULDBLOCK);
732 error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
734 if (error == EINTR || error == ERESTART) {
738 if (error == EWOULDBLOCK) {
740 * On a timeout, return what's in the buffer,
741 * which may be nothing. If there is something
742 * in the store buffer, we can rotate the buffers.
746 * We filled up the buffer in between
747 * getting the timeout and arriving
748 * here, so we don't need to rotate.
752 if (d->bd_slen == 0) {
761 * At this point, we know we have something in the hold slot.
766 * Move data from hold buffer into user space.
767 * We know the entire buffer is transferred since
768 * we checked above that the read buffer is bpf_bufsize bytes.
770 * XXXRW: More synchronization needed here: what if a second thread
771 * issues a read on the same fd at the same time? Don't want this
772 * getting invalidated.
774 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
777 d->bd_fbuf = d->bd_hbuf;
780 bpf_buf_reclaimed(d);
787 * If there are processes sleeping on this descriptor, wake them up.
790 bpf_wakeup(struct bpf_d *d)
794 if (d->bd_state == BPF_WAITING) {
795 callout_stop(&d->bd_callout);
796 d->bd_state = BPF_IDLE;
799 if (d->bd_async && d->bd_sig && d->bd_sigio)
800 pgsigio(&d->bd_sigio, d->bd_sig, 0);
802 selwakeuppri(&d->bd_sel, PRINET);
803 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
807 bpf_timed_out(void *arg)
809 struct bpf_d *d = (struct bpf_d *)arg;
813 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
815 if (d->bd_state == BPF_WAITING) {
816 d->bd_state = BPF_TIMED_OUT;
823 bpf_ready(struct bpf_d *d)
828 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
830 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
837 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
845 error = devfs_get_cdevpriv((void **)&d);
849 d->bd_pid = curthread->td_proc->p_pid;
851 if (d->bd_bif == NULL) {
856 ifp = d->bd_bif->bif_ifp;
858 if ((ifp->if_flags & IFF_UP) == 0) {
863 if (uio->uio_resid == 0) {
868 bzero(&dst, sizeof(dst));
871 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
872 &m, &dst, &hlen, d->bd_wfilter);
879 dst.sa_family = pseudo_AF_HDRCMPLT;
881 if (d->bd_feedback) {
882 mc = m_dup(m, M_DONTWAIT);
884 mc->m_pkthdr.rcvif = ifp;
885 /* Set M_PROMISC for outgoing packets to be discarded. */
886 if (d->bd_direction == BPF_D_INOUT)
887 m->m_flags |= M_PROMISC;
891 m->m_pkthdr.len -= hlen;
893 m->m_data += hlen; /* XXX */
895 CURVNET_SET(ifp->if_vnet);
898 mac_bpfdesc_create_mbuf(d, m);
900 mac_bpfdesc_create_mbuf(d, mc);
904 error = (*ifp->if_output)(ifp, m, &dst, NULL);
910 (*ifp->if_input)(ifp, mc);
920 * Reset a descriptor by flushing its packet buffer and clearing the receive
921 * and drop counts. This is doable for kernel-only buffers, but with
922 * zero-copy buffers, we can't write to (or rotate) buffers that are
923 * currently owned by userspace. It would be nice if we could encapsulate
924 * this logic in the buffer code rather than here.
927 reset_d(struct bpf_d *d)
930 mtx_assert(&d->bd_mtx, MA_OWNED);
932 if ((d->bd_hbuf != NULL) &&
933 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
934 /* Free the hold buffer. */
935 d->bd_fbuf = d->bd_hbuf;
938 bpf_buf_reclaimed(d);
940 if (bpf_canwritebuf(d))
952 * FIONREAD Check for read packet available.
953 * SIOCGIFADDR Get interface address - convenient hook to driver.
954 * BIOCGBLEN Get buffer len [for read()].
955 * BIOCSETF Set read filter.
956 * BIOCSETFNR Set read filter without resetting descriptor.
957 * BIOCSETWF Set write filter.
958 * BIOCFLUSH Flush read packet buffer.
959 * BIOCPROMISC Put interface into promiscuous mode.
960 * BIOCGDLT Get link layer type.
961 * BIOCGETIF Get interface name.
962 * BIOCSETIF Set interface.
963 * BIOCSRTIMEOUT Set read timeout.
964 * BIOCGRTIMEOUT Get read timeout.
965 * BIOCGSTATS Get packet stats.
966 * BIOCIMMEDIATE Set immediate mode.
967 * BIOCVERSION Get filter language version.
968 * BIOCGHDRCMPLT Get "header already complete" flag
969 * BIOCSHDRCMPLT Set "header already complete" flag
970 * BIOCGDIRECTION Get packet direction flag
971 * BIOCSDIRECTION Set packet direction flag
972 * BIOCLOCK Set "locked" flag
973 * BIOCFEEDBACK Set packet feedback mode.
974 * BIOCSETZBUF Set current zero-copy buffer locations.
975 * BIOCGETZMAX Get maximum zero-copy buffer size.
976 * BIOCROTZBUF Force rotation of zero-copy buffer
977 * BIOCSETBUFMODE Set buffer mode.
978 * BIOCGETBUFMODE Get current buffer mode.
982 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
988 error = devfs_get_cdevpriv((void **)&d);
993 * Refresh PID associated with this descriptor.
996 d->bd_pid = td->td_proc->p_pid;
997 if (d->bd_state == BPF_WAITING)
998 callout_stop(&d->bd_callout);
999 d->bd_state = BPF_IDLE;
1002 if (d->bd_locked == 1) {
1026 CURVNET_SET(TD_TO_VNET(td));
1034 * Check for read packet available.
1054 if (d->bd_bif == NULL)
1057 ifp = d->bd_bif->bif_ifp;
1058 error = (*ifp->if_ioctl)(ifp, cmd, addr);
1064 * Get buffer len [for read()].
1067 *(u_int *)addr = d->bd_bufsize;
1071 * Set buffer length.
1074 error = bpf_ioctl_sblen(d, (u_int *)addr);
1078 * Set link layer read filter.
1083 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1087 * Flush read packet buffer.
1096 * Put interface into promiscuous mode.
1099 if (d->bd_bif == NULL) {
1101 * No interface attached yet.
1106 if (d->bd_promisc == 0) {
1107 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1114 * Get current data link type.
1117 if (d->bd_bif == NULL)
1120 *(u_int *)addr = d->bd_bif->bif_dlt;
1124 * Get a list of supported data link types.
1127 if (d->bd_bif == NULL)
1130 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1134 * Set data link type.
1137 if (d->bd_bif == NULL)
1140 error = bpf_setdlt(d, *(u_int *)addr);
1144 * Get interface name.
1147 if (d->bd_bif == NULL)
1150 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1151 struct ifreq *const ifr = (struct ifreq *)addr;
1153 strlcpy(ifr->ifr_name, ifp->if_xname,
1154 sizeof(ifr->ifr_name));
1162 error = bpf_setif(d, (struct ifreq *)addr);
1170 struct timeval *tv = (struct timeval *)addr;
1173 * Subtract 1 tick from tvtohz() since this isn't
1176 if ((error = itimerfix(tv)) == 0)
1177 d->bd_rtout = tvtohz(tv) - 1;
1186 struct timeval *tv = (struct timeval *)addr;
1188 tv->tv_sec = d->bd_rtout / hz;
1189 tv->tv_usec = (d->bd_rtout % hz) * tick;
1198 struct bpf_stat *bs = (struct bpf_stat *)addr;
1200 /* XXXCSJP overflow */
1201 bs->bs_recv = d->bd_rcount;
1202 bs->bs_drop = d->bd_dcount;
1207 * Set immediate mode.
1210 d->bd_immediate = *(u_int *)addr;
1215 struct bpf_version *bv = (struct bpf_version *)addr;
1217 bv->bv_major = BPF_MAJOR_VERSION;
1218 bv->bv_minor = BPF_MINOR_VERSION;
1223 * Get "header already complete" flag
1226 *(u_int *)addr = d->bd_hdrcmplt;
1230 * Set "header already complete" flag
1233 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1237 * Get packet direction flag
1239 case BIOCGDIRECTION:
1240 *(u_int *)addr = d->bd_direction;
1244 * Set packet direction flag
1246 case BIOCSDIRECTION:
1250 direction = *(u_int *)addr;
1251 switch (direction) {
1255 d->bd_direction = direction;
1264 d->bd_feedback = *(u_int *)addr;
1271 case FIONBIO: /* Non-blocking I/O */
1274 case FIOASYNC: /* Send signal on receive packets */
1275 d->bd_async = *(int *)addr;
1279 error = fsetown(*(int *)addr, &d->bd_sigio);
1283 *(int *)addr = fgetown(&d->bd_sigio);
1286 /* This is deprecated, FIOSETOWN should be used instead. */
1288 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1291 /* This is deprecated, FIOGETOWN should be used instead. */
1293 *(int *)addr = -fgetown(&d->bd_sigio);
1296 case BIOCSRSIG: /* Set receive signal */
1300 sig = *(u_int *)addr;
1309 *(u_int *)addr = d->bd_sig;
1312 case BIOCGETBUFMODE:
1313 *(u_int *)addr = d->bd_bufmode;
1316 case BIOCSETBUFMODE:
1318 * Allow the buffering mode to be changed as long as we
1319 * haven't yet committed to a particular mode. Our
1320 * definition of commitment, for now, is whether or not a
1321 * buffer has been allocated or an interface attached, since
1322 * that's the point where things get tricky.
1324 switch (*(u_int *)addr) {
1325 case BPF_BUFMODE_BUFFER:
1328 case BPF_BUFMODE_ZBUF:
1329 if (bpf_zerocopy_enable)
1338 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1339 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1343 d->bd_bufmode = *(u_int *)addr;
1348 return (bpf_ioctl_getzmax(td, d, (size_t *)addr));
1351 return (bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr));
1354 return (bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr));
1361 * Set d's packet filter program to fp. If this file already has a filter,
1362 * free it and replace it. Returns EINVAL for bogus requests.
1365 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1367 struct bpf_insn *fcode, *old;
1368 u_int wfilter, flen, size;
1370 bpf_jit_filter *ofunc;
1373 if (cmd == BIOCSETWF) {
1374 old = d->bd_wfilter;
1381 old = d->bd_rfilter;
1383 ofunc = d->bd_bfilter;
1386 if (fp->bf_insns == NULL) {
1387 if (fp->bf_len != 0)
1391 d->bd_wfilter = NULL;
1393 d->bd_rfilter = NULL;
1395 d->bd_bfilter = NULL;
1397 if (cmd == BIOCSETF)
1402 free((caddr_t)old, M_BPF);
1405 bpf_destroy_jit_filter(ofunc);
1410 if (flen > bpf_maxinsns)
1413 size = flen * sizeof(*fp->bf_insns);
1414 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
1415 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1416 bpf_validate(fcode, (int)flen)) {
1419 d->bd_wfilter = fcode;
1421 d->bd_rfilter = fcode;
1423 d->bd_bfilter = bpf_jitter(fcode, flen);
1425 if (cmd == BIOCSETF)
1430 free((caddr_t)old, M_BPF);
1433 bpf_destroy_jit_filter(ofunc);
1438 free((caddr_t)fcode, M_BPF);
1443 * Detach a file from its current interface (if attached at all) and attach
1444 * to the interface indicated by the name stored in ifr.
1445 * Return an errno or 0.
1448 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1451 struct ifnet *theywant;
1453 theywant = ifunit(ifr->ifr_name);
1454 if (theywant == NULL || theywant->if_bpf == NULL)
1457 bp = theywant->if_bpf;
1460 * Behavior here depends on the buffering model. If we're using
1461 * kernel memory buffers, then we can allocate them here. If we're
1462 * using zero-copy, then the user process must have registered
1463 * buffers by the time we get here. If not, return an error.
1465 * XXXRW: There are locking issues here with multi-threaded use: what
1466 * if two threads try to set the interface at once?
1468 switch (d->bd_bufmode) {
1469 case BPF_BUFMODE_BUFFER:
1470 if (d->bd_sbuf == NULL)
1471 bpf_buffer_alloc(d);
1472 KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL"));
1475 case BPF_BUFMODE_ZBUF:
1476 if (d->bd_sbuf == NULL)
1481 panic("bpf_setif: bufmode %d", d->bd_bufmode);
1483 if (bp != d->bd_bif) {
1486 * Detach if attached to something else.
1499 * Support for select() and poll() system calls
1501 * Return true iff the specific operation will not block indefinitely.
1502 * Otherwise, return false but make a note that a selwakeup() must be done.
1505 bpfpoll(struct cdev *dev, int events, struct thread *td)
1510 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1512 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1515 * Refresh PID associated with this descriptor.
1517 revents = events & (POLLOUT | POLLWRNORM);
1519 d->bd_pid = td->td_proc->p_pid;
1520 if (events & (POLLIN | POLLRDNORM)) {
1522 revents |= events & (POLLIN | POLLRDNORM);
1524 selrecord(td, &d->bd_sel);
1525 /* Start the read timeout if necessary. */
1526 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1527 callout_reset(&d->bd_callout, d->bd_rtout,
1529 d->bd_state = BPF_WAITING;
1538 * Support for kevent() system call. Register EVFILT_READ filters and
1539 * reject all others.
1542 bpfkqfilter(struct cdev *dev, struct knote *kn)
1546 if (devfs_get_cdevpriv((void **)&d) != 0 ||
1547 kn->kn_filter != EVFILT_READ)
1551 * Refresh PID associated with this descriptor.
1554 d->bd_pid = curthread->td_proc->p_pid;
1555 kn->kn_fop = &bpfread_filtops;
1557 knlist_add(&d->bd_sel.si_note, kn, 1);
1564 filt_bpfdetach(struct knote *kn)
1566 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1568 knlist_remove(&d->bd_sel.si_note, kn, 0);
1572 filt_bpfread(struct knote *kn, long hint)
1574 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1577 BPFD_LOCK_ASSERT(d);
1578 ready = bpf_ready(d);
1580 kn->kn_data = d->bd_slen;
1582 kn->kn_data += d->bd_hlen;
1583 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1584 callout_reset(&d->bd_callout, d->bd_rtout,
1586 d->bd_state = BPF_WAITING;
1593 * Incoming linkage from device drivers. Process the packet pkt, of length
1594 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1595 * by each process' filter, and if accepted, stashed into the corresponding
1599 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1611 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1615 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
1616 * way for the caller to indiciate to us whether this packet
1617 * is inbound or outbound. In the bpf_mtap() routines, we use
1618 * the interface pointers on the mbuf to figure it out.
1621 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
1623 slen = (*(bf->func))(pkt, pktlen, pktlen);
1626 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
1634 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
1636 catchpacket(d, pkt, pktlen, slen,
1637 bpf_append_bytes, &tv);
1644 #define BPF_CHECK_DIRECTION(d, r, i) \
1645 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
1646 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
1649 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1652 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1662 /* Skip outgoing duplicate packets. */
1663 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1664 m->m_flags &= ~M_PROMISC;
1670 pktlen = m_length(m, NULL);
1673 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1674 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
1679 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
1680 /* XXX We cannot handle multiple mbufs. */
1681 if (bf != NULL && m->m_next == NULL)
1682 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
1685 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
1693 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
1695 catchpacket(d, (u_char *)m, pktlen, slen,
1696 bpf_append_mbuf, &tv);
1704 * Incoming linkage from device drivers, when packet is in
1705 * an mbuf chain and to be prepended by a contiguous header.
1708 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
1716 /* Skip outgoing duplicate packets. */
1717 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1718 m->m_flags &= ~M_PROMISC;
1724 pktlen = m_length(m, NULL);
1726 * Craft on-stack mbuf suitable for passing to bpf_filter.
1727 * Note that we cut corners here; we only setup what's
1728 * absolutely needed--this mbuf should never go anywhere else.
1736 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1737 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
1741 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
1749 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
1751 catchpacket(d, (u_char *)&mb, pktlen, slen,
1752 bpf_append_mbuf, &tv);
1759 #undef BPF_CHECK_DIRECTION
1762 * Move the packet data from interface memory (pkt) into the
1763 * store buffer. "cpfn" is the routine called to do the actual data
1764 * transfer. bcopy is passed in to copy contiguous chunks, while
1765 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
1766 * pkt is really an mbuf.
1769 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1770 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
1775 int hdrlen = d->bd_bif->bif_hdrlen;
1778 BPFD_LOCK_ASSERT(d);
1781 * Detect whether user space has released a buffer back to us, and if
1782 * so, move it from being a hold buffer to a free buffer. This may
1783 * not be the best place to do it (for example, we might only want to
1784 * run this check if we need the space), but for now it's a reliable
1787 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
1788 d->bd_fbuf = d->bd_hbuf;
1791 bpf_buf_reclaimed(d);
1795 * Figure out how many bytes to move. If the packet is
1796 * greater or equal to the snapshot length, transfer that
1797 * much. Otherwise, transfer the whole packet (unless
1798 * we hit the buffer size limit).
1800 totlen = hdrlen + min(snaplen, pktlen);
1801 if (totlen > d->bd_bufsize)
1802 totlen = d->bd_bufsize;
1805 * Round up the end of the previous packet to the next longword.
1807 * Drop the packet if there's no room and no hope of room
1808 * If the packet would overflow the storage buffer or the storage
1809 * buffer is considered immutable by the buffer model, try to rotate
1810 * the buffer and wakeup pending processes.
1812 curlen = BPF_WORDALIGN(d->bd_slen);
1813 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
1814 if (d->bd_fbuf == NULL) {
1816 * There's no room in the store buffer, and no
1817 * prospect of room, so drop the packet. Notify the
1827 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1829 * Immediate mode is set, or the read timeout has already
1830 * expired during a select call. A packet arrived, so the
1831 * reader should be woken up.
1836 * Append the bpf header. Note we append the actual header size, but
1837 * move forward the length of the header plus padding.
1839 bzero(&hdr, sizeof(hdr));
1840 hdr.bh_tstamp = *tv;
1841 hdr.bh_datalen = pktlen;
1842 hdr.bh_hdrlen = hdrlen;
1843 hdr.bh_caplen = totlen - hdrlen;
1844 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
1847 * Copy the packet data into the store buffer and update its length.
1849 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen);
1850 d->bd_slen = curlen + totlen;
1857 * Free buffers currently in use by a descriptor.
1861 bpf_freed(struct bpf_d *d)
1865 * We don't need to lock out interrupts since this descriptor has
1866 * been detached from its interface and it yet hasn't been marked
1870 if (d->bd_rfilter) {
1871 free((caddr_t)d->bd_rfilter, M_BPF);
1873 bpf_destroy_jit_filter(d->bd_bfilter);
1877 free((caddr_t)d->bd_wfilter, M_BPF);
1878 mtx_destroy(&d->bd_mtx);
1882 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
1883 * fixed size of the link header (variable length headers not yet supported).
1886 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1889 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1893 * Attach an interface to bpf. ifp is a pointer to the structure
1894 * defining the interface to be attached, dlt is the link layer type,
1895 * and hdrlen is the fixed size of the link header (variable length
1896 * headers are not yet supporrted).
1899 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1903 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1907 LIST_INIT(&bp->bif_dlist);
1910 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1911 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
1915 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
1916 mtx_unlock(&bpf_mtx);
1919 * Compute the length of the bpf header. This is not necessarily
1920 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1921 * that the network layer header begins on a longword boundary (for
1922 * performance reasons and to alleviate alignment restrictions).
1924 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1927 if_printf(ifp, "bpf attached\n");
1931 * Detach bpf from an interface. This involves detaching each descriptor
1932 * associated with the interface, and leaving bd_bif NULL. Notify each
1933 * descriptor as it's detached so that any sleepers wake up and get
1937 bpfdetach(struct ifnet *ifp)
1942 /* Locate BPF interface information */
1944 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1945 if (ifp == bp->bif_ifp)
1949 /* Interface wasn't attached */
1950 if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1951 mtx_unlock(&bpf_mtx);
1952 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1956 LIST_REMOVE(bp, bif_next);
1957 mtx_unlock(&bpf_mtx);
1959 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
1966 mtx_destroy(&bp->bif_mtx);
1971 * Get a list of available data link type of the interface.
1974 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1980 ifp = d->bd_bif->bif_ifp;
1984 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1985 if (bp->bif_ifp != ifp)
1987 if (bfl->bfl_list != NULL) {
1988 if (n >= bfl->bfl_len) {
1989 mtx_unlock(&bpf_mtx);
1992 error = copyout(&bp->bif_dlt,
1993 bfl->bfl_list + n, sizeof(u_int));
1997 mtx_unlock(&bpf_mtx);
2003 * Set the data link type of a BPF instance.
2006 bpf_setdlt(struct bpf_d *d, u_int dlt)
2008 int error, opromisc;
2012 if (d->bd_bif->bif_dlt == dlt)
2014 ifp = d->bd_bif->bif_ifp;
2016 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2017 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2020 mtx_unlock(&bpf_mtx);
2022 opromisc = d->bd_promisc;
2029 error = ifpromisc(bp->bif_ifp, 1);
2031 if_printf(bp->bif_ifp,
2032 "bpf_setdlt: ifpromisc failed (%d)\n",
2038 return (bp == NULL ? EINVAL : 0);
2042 bpf_drvinit(void *unused)
2046 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2047 LIST_INIT(&bpf_iflist);
2049 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2050 /* For compatibility */
2051 make_dev_alias(dev, "bpf0");
2055 * Zero out the various packet counters associated with all of the bpf
2056 * descriptors. At some point, we will probably want to get a bit more
2057 * granular and allow the user to specify descriptors to be zeroed.
2060 bpf_zero_counters(void)
2066 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2068 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2080 mtx_unlock(&bpf_mtx);
2084 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2087 bzero(d, sizeof(*d));
2088 BPFD_LOCK_ASSERT(bd);
2089 d->bd_structsize = sizeof(*d);
2090 d->bd_immediate = bd->bd_immediate;
2091 d->bd_promisc = bd->bd_promisc;
2092 d->bd_hdrcmplt = bd->bd_hdrcmplt;
2093 d->bd_direction = bd->bd_direction;
2094 d->bd_feedback = bd->bd_feedback;
2095 d->bd_async = bd->bd_async;
2096 d->bd_rcount = bd->bd_rcount;
2097 d->bd_dcount = bd->bd_dcount;
2098 d->bd_fcount = bd->bd_fcount;
2099 d->bd_sig = bd->bd_sig;
2100 d->bd_slen = bd->bd_slen;
2101 d->bd_hlen = bd->bd_hlen;
2102 d->bd_bufsize = bd->bd_bufsize;
2103 d->bd_pid = bd->bd_pid;
2104 strlcpy(d->bd_ifname,
2105 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2106 d->bd_locked = bd->bd_locked;
2107 d->bd_wcount = bd->bd_wcount;
2108 d->bd_wdcount = bd->bd_wdcount;
2109 d->bd_wfcount = bd->bd_wfcount;
2110 d->bd_zcopy = bd->bd_zcopy;
2111 d->bd_bufmode = bd->bd_bufmode;
2115 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2117 struct xbpf_d *xbdbuf, *xbd, zerostats;
2123 * XXX This is not technically correct. It is possible for non
2124 * privileged users to open bpf devices. It would make sense
2125 * if the users who opened the devices were able to retrieve
2126 * the statistics for them, too.
2128 error = priv_check(req->td, PRIV_NET_BPF);
2132 * Check to see if the user is requesting that the counters be
2133 * zeroed out. Explicitly check that the supplied data is zeroed,
2134 * as we aren't allowing the user to set the counters currently.
2136 if (req->newptr != NULL) {
2137 if (req->newlen != sizeof(zerostats))
2139 bzero(&zerostats, sizeof(zerostats));
2141 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
2143 bpf_zero_counters();
2146 if (req->oldptr == NULL)
2147 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2148 if (bpf_bpfd_cnt == 0)
2149 return (SYSCTL_OUT(req, 0, 0));
2150 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2152 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2153 mtx_unlock(&bpf_mtx);
2154 free(xbdbuf, M_BPF);
2158 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2160 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2161 xbd = &xbdbuf[index++];
2163 bpfstats_fill_xbpf(xbd, bd);
2168 mtx_unlock(&bpf_mtx);
2169 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2170 free(xbdbuf, M_BPF);
2174 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2176 #else /* !DEV_BPF && !NETGRAPH_BPF */
2178 * NOP stubs to allow bpf-using drivers to load and function.
2180 * A 'better' implementation would allow the core bpf functionality
2181 * to be loaded at runtime.
2183 static struct bpf_if bp_null;
2186 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2191 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2196 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2201 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2204 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2208 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2211 *driverp = &bp_null;
2215 bpfdetach(struct ifnet *ifp)
2220 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2222 return -1; /* "no filter" behaviour */
2226 bpf_validate(const struct bpf_insn *f, int len)
2228 return 0; /* false */
2231 #endif /* !DEV_BPF && !NETGRAPH_BPF */