2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 #include "opt_compat.h"
42 #include "opt_netgraph.h"
44 #include <sys/types.h>
45 #include <sys/param.h>
47 #include <sys/rwlock.h>
48 #include <sys/systm.h>
50 #include <sys/fcntl.h>
52 #include <sys/malloc.h>
57 #include <sys/signalvar.h>
58 #include <sys/filio.h>
59 #include <sys/sockio.h>
60 #include <sys/ttycom.h>
63 #include <sys/event.h>
68 #include <sys/socket.h>
71 #include <net/if_var.h>
74 #include <net/bpf_buffer.h>
76 #include <net/bpf_jitter.h>
78 #include <net/bpf_zerocopy.h>
79 #include <net/bpfdesc.h>
82 #include <netinet/in.h>
83 #include <netinet/if_ether.h>
84 #include <sys/kernel.h>
85 #include <sys/sysctl.h>
87 #include <net80211/ieee80211_freebsd.h>
89 #include <security/mac/mac_framework.h>
91 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
93 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
95 #define PRINET 26 /* interruptible */
97 #define SIZEOF_BPF_HDR(type) \
98 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
100 #ifdef COMPAT_FREEBSD32
101 #include <sys/mount.h>
102 #include <compat/freebsd32/freebsd32.h>
103 #define BPF_ALIGNMENT32 sizeof(int32_t)
104 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
108 * 32-bit version of structure prepended to each packet. We use this header
109 * instead of the standard one for 32-bit streams. We mark the a stream as
110 * 32-bit the first time we see a 32-bit compat ioctl request.
113 struct timeval32 bh_tstamp; /* time stamp */
114 uint32_t bh_caplen; /* length of captured portion */
115 uint32_t bh_datalen; /* original length of packet */
116 uint16_t bh_hdrlen; /* length of bpf header (this struct
117 plus alignment padding) */
121 struct bpf_program32 {
126 struct bpf_dltlist32 {
131 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32)
132 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32)
133 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32)
134 #define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32)
135 #define BIOCSETWF32 _IOW('B', 123, struct bpf_program32)
136 #define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32)
140 * bpf_iflist is a list of BPF interface structures, each corresponding to a
141 * specific DLT. The same network interface might have several BPF interface
142 * structures registered by different layers in the stack (i.e., 802.11
143 * frames, ethernet frames, etc).
145 static LIST_HEAD(, bpf_if) bpf_iflist, bpf_freelist;
146 static struct mtx bpf_mtx; /* bpf global lock */
147 static int bpf_bpfd_cnt;
149 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
150 static void bpf_detachd(struct bpf_d *);
151 static void bpf_detachd_locked(struct bpf_d *);
152 static void bpf_freed(struct bpf_d *);
153 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
154 struct sockaddr *, int *, struct bpf_insn *);
155 static int bpf_setif(struct bpf_d *, struct ifreq *);
156 static void bpf_timed_out(void *);
158 bpf_wakeup(struct bpf_d *);
159 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
160 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
162 static void reset_d(struct bpf_d *);
163 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
164 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
165 static int bpf_setdlt(struct bpf_d *, u_int);
166 static void filt_bpfdetach(struct knote *);
167 static int filt_bpfread(struct knote *, long);
168 static void bpf_drvinit(void *);
169 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
171 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
172 int bpf_maxinsns = BPF_MAXINSNS;
173 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
174 &bpf_maxinsns, 0, "Maximum bpf program instructions");
175 static int bpf_zerocopy_enable = 0;
176 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
177 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
178 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
179 bpf_stats_sysctl, "bpf statistics portal");
181 static VNET_DEFINE(int, bpf_optimize_writers) = 0;
182 #define V_bpf_optimize_writers VNET(bpf_optimize_writers)
183 SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
184 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
185 "Do not send packets until BPF program is set");
187 static d_open_t bpfopen;
188 static d_read_t bpfread;
189 static d_write_t bpfwrite;
190 static d_ioctl_t bpfioctl;
191 static d_poll_t bpfpoll;
192 static d_kqfilter_t bpfkqfilter;
194 static struct cdevsw bpf_cdevsw = {
195 .d_version = D_VERSION,
202 .d_kqfilter = bpfkqfilter,
205 static struct filterops bpfread_filtops = {
207 .f_detach = filt_bpfdetach,
208 .f_event = filt_bpfread,
211 eventhandler_tag bpf_ifdetach_cookie = NULL;
214 * LOCKING MODEL USED BY BPF:
216 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
217 * some global counters and every bpf_if reference.
218 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
219 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
220 * used by bpf_mtap code.
224 * Global lock, interface lock, descriptor lock
226 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
227 * working model. In many places (like bpf_detachd) we start with BPF descriptor
228 * (and we need to at least rlock it to get reliable interface pointer). This
229 * gives us potential LOR. As a result, we use global lock to protect from bpf_if
230 * change in every such place.
232 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
233 * 3) descriptor main wlock.
234 * Reading bd_bif can be protected by any of these locks, typically global lock.
236 * Changing read/write BPF filter is protected by the same three locks,
237 * the same applies for reading.
239 * Sleeping in global lock is not allowed due to bpfdetach() using it.
243 * Wrapper functions for various buffering methods. If the set of buffer
244 * modes expands, we will probably want to introduce a switch data structure
245 * similar to protosw, et.
248 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
254 switch (d->bd_bufmode) {
255 case BPF_BUFMODE_BUFFER:
256 return (bpf_buffer_append_bytes(d, buf, offset, src, len));
258 case BPF_BUFMODE_ZBUF:
260 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
263 panic("bpf_buf_append_bytes");
268 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
274 switch (d->bd_bufmode) {
275 case BPF_BUFMODE_BUFFER:
276 return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
278 case BPF_BUFMODE_ZBUF:
280 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
283 panic("bpf_buf_append_mbuf");
288 * This function gets called when the free buffer is re-assigned.
291 bpf_buf_reclaimed(struct bpf_d *d)
296 switch (d->bd_bufmode) {
297 case BPF_BUFMODE_BUFFER:
300 case BPF_BUFMODE_ZBUF:
301 bpf_zerocopy_buf_reclaimed(d);
305 panic("bpf_buf_reclaimed");
310 * If the buffer mechanism has a way to decide that a held buffer can be made
311 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is
312 * returned if the buffer can be discarded, (0) is returned if it cannot.
315 bpf_canfreebuf(struct bpf_d *d)
320 switch (d->bd_bufmode) {
321 case BPF_BUFMODE_ZBUF:
322 return (bpf_zerocopy_canfreebuf(d));
328 * Allow the buffer model to indicate that the current store buffer is
329 * immutable, regardless of the appearance of space. Return (1) if the
330 * buffer is writable, and (0) if not.
333 bpf_canwritebuf(struct bpf_d *d)
337 switch (d->bd_bufmode) {
338 case BPF_BUFMODE_ZBUF:
339 return (bpf_zerocopy_canwritebuf(d));
345 * Notify buffer model that an attempt to write to the store buffer has
346 * resulted in a dropped packet, in which case the buffer may be considered
350 bpf_buffull(struct bpf_d *d)
355 switch (d->bd_bufmode) {
356 case BPF_BUFMODE_ZBUF:
357 bpf_zerocopy_buffull(d);
363 * Notify the buffer model that a buffer has moved into the hold position.
366 bpf_bufheld(struct bpf_d *d)
371 switch (d->bd_bufmode) {
372 case BPF_BUFMODE_ZBUF:
373 bpf_zerocopy_bufheld(d);
379 bpf_free(struct bpf_d *d)
382 switch (d->bd_bufmode) {
383 case BPF_BUFMODE_BUFFER:
384 return (bpf_buffer_free(d));
386 case BPF_BUFMODE_ZBUF:
387 return (bpf_zerocopy_free(d));
390 panic("bpf_buf_free");
395 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
398 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
400 return (bpf_buffer_uiomove(d, buf, len, uio));
404 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
407 if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
409 return (bpf_buffer_ioctl_sblen(d, i));
413 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
416 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
418 return (bpf_zerocopy_ioctl_getzmax(td, d, i));
422 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
425 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
427 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
431 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
434 if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
436 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
440 * General BPF functions.
443 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
444 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
446 const struct ieee80211_bpf_params *p;
447 struct ether_header *eh;
455 * Build a sockaddr based on the data link layer type.
456 * We do this at this level because the ethernet header
457 * is copied directly into the data field of the sockaddr.
458 * In the case of SLIP, there is no header and the packet
459 * is forwarded as is.
460 * Also, we are careful to leave room at the front of the mbuf
461 * for the link level header.
466 sockp->sa_family = AF_INET;
471 sockp->sa_family = AF_UNSPEC;
472 /* XXX Would MAXLINKHDR be better? */
473 hlen = ETHER_HDR_LEN;
477 sockp->sa_family = AF_IMPLINK;
482 sockp->sa_family = AF_UNSPEC;
488 * null interface types require a 4 byte pseudo header which
489 * corresponds to the address family of the packet.
491 sockp->sa_family = AF_UNSPEC;
495 case DLT_ATM_RFC1483:
497 * en atm driver requires 4-byte atm pseudo header.
498 * though it isn't standard, vpi:vci needs to be
501 sockp->sa_family = AF_UNSPEC;
502 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
506 sockp->sa_family = AF_UNSPEC;
507 hlen = 4; /* This should match PPP_HDRLEN */
510 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
511 sockp->sa_family = AF_IEEE80211;
515 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
516 sockp->sa_family = AF_IEEE80211;
517 sockp->sa_len = 12; /* XXX != 0 */
518 hlen = sizeof(struct ieee80211_bpf_params);
525 len = uio->uio_resid;
526 if (len < hlen || len - hlen > ifp->if_mtu)
529 m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
532 m->m_pkthdr.len = m->m_len = len;
535 error = uiomove(mtod(m, u_char *), len, uio);
539 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
545 /* Check for multicast destination */
548 eh = mtod(m, struct ether_header *);
549 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
550 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
551 ETHER_ADDR_LEN) == 0)
552 m->m_flags |= M_BCAST;
554 m->m_flags |= M_MCAST;
560 * Make room for link header, and copy it to sockaddr
563 if (sockp->sa_family == AF_IEEE80211) {
565 * Collect true length from the parameter header
566 * NB: sockp is known to be zero'd so if we do a
567 * short copy unspecified parameters will be
569 * NB: packet may not be aligned after stripping
573 p = mtod(m, const struct ieee80211_bpf_params *);
575 if (hlen > sizeof(sockp->sa_data)) {
580 bcopy(mtod(m, const void *), sockp->sa_data, hlen);
591 * Attach file to the bpf interface, i.e. make d listen on bp.
594 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
601 * Save sysctl value to protect from sysctl change
604 op_w = V_bpf_optimize_writers;
606 if (d->bd_bif != NULL)
607 bpf_detachd_locked(d);
609 * Point d at bp, and add d to the interface's list.
610 * Since there are many applicaiotns using BPF for
611 * sending raw packets only (dhcpd, cdpd are good examples)
612 * we can delay adding d to the list of active listeners until
613 * some filter is configured.
622 /* Add to writers-only list */
623 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
625 * We decrement bd_writer on every filter set operation.
626 * First BIOCSETF is done by pcap_open_live() to set up
627 * snap length. After that appliation usually sets its own filter
631 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
638 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
639 __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
642 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
646 * Add d to the list of active bp filters.
647 * Reuqires bpf_attachd() to be called before
650 bpf_upgraded(struct bpf_d *d)
659 * Filter can be set several times without specifying interface.
660 * Mark d as reader and exit.
672 /* Remove from writers-only list */
673 LIST_REMOVE(d, bd_next);
674 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
675 /* Mark d as reader */
681 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
683 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
687 * Detach a file from its interface.
690 bpf_detachd(struct bpf_d *d)
693 bpf_detachd_locked(d);
698 bpf_detachd_locked(struct bpf_d *d)
704 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
708 /* Check if descriptor is attached */
709 if ((bp = d->bd_bif) == NULL)
715 /* Save bd_writer value */
716 error = d->bd_writer;
719 * Remove d from the interface's descriptor list.
721 LIST_REMOVE(d, bd_next);
730 /* Call event handler iff d is attached */
732 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
735 * Check if this descriptor had requested promiscuous mode.
736 * If so, turn it off.
740 CURVNET_SET(ifp->if_vnet);
741 error = ifpromisc(ifp, 0);
743 if (error != 0 && error != ENXIO) {
745 * ENXIO can happen if a pccard is unplugged
746 * Something is really wrong if we were able to put
747 * the driver into promiscuous mode, but can't
750 if_printf(bp->bif_ifp,
751 "bpf_detach: ifpromisc failed (%d)\n", error);
757 * Close the descriptor by detaching it from its interface,
758 * deallocating its buffers, and marking it free.
763 struct bpf_d *d = data;
766 if (d->bd_state == BPF_WAITING)
767 callout_stop(&d->bd_callout);
768 d->bd_state = BPF_IDLE;
770 funsetown(&d->bd_sigio);
773 mac_bpfdesc_destroy(d);
775 seldrain(&d->bd_sel);
776 knlist_destroy(&d->bd_sel.si_note);
777 callout_drain(&d->bd_callout);
783 * Open ethernet device. Returns ENXIO for illegal minor device number,
784 * EBUSY if file is open by another process.
788 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
793 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
794 error = devfs_set_cdevpriv(d, bpf_dtor);
801 * For historical reasons, perform a one-time initialization call to
802 * the buffer routines, even though we're not yet committed to a
803 * particular buffer method.
806 d->bd_hbuf_in_use = 0;
807 d->bd_bufmode = BPF_BUFMODE_BUFFER;
809 d->bd_direction = BPF_D_INOUT;
810 BPF_PID_REFRESH(d, td);
813 mac_bpfdesc_create(td->td_ucred, d);
815 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
816 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
817 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
819 /* Allocate default buffers */
820 size = d->bd_bufsize;
821 bpf_buffer_ioctl_sblen(d, &size);
827 * bpfread - read next chunk of packets from buffers
830 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
837 error = devfs_get_cdevpriv((void **)&d);
842 * Restrict application to use a buffer the same size as
845 if (uio->uio_resid != d->bd_bufsize)
848 non_block = ((ioflag & O_NONBLOCK) != 0);
851 BPF_PID_REFRESH_CUR(d);
852 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
856 if (d->bd_state == BPF_WAITING)
857 callout_stop(&d->bd_callout);
858 timed_out = (d->bd_state == BPF_TIMED_OUT);
859 d->bd_state = BPF_IDLE;
860 while (d->bd_hbuf_in_use) {
861 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
862 PRINET|PCATCH, "bd_hbuf", 0);
869 * If the hold buffer is empty, then do a timed sleep, which
870 * ends when the timeout expires or when enough packets
871 * have arrived to fill the store buffer.
873 while (d->bd_hbuf == NULL) {
874 if (d->bd_slen != 0) {
876 * A packet(s) either arrived since the previous
877 * read or arrived while we were asleep.
879 if (d->bd_immediate || non_block || timed_out) {
881 * Rotate the buffers and return what's here
882 * if we are in immediate mode, non-blocking
883 * flag is set, or this descriptor timed out.
891 * No data is available, check to see if the bpf device
892 * is still pointed at a real interface. If not, return
893 * ENXIO so that the userland process knows to rebind
894 * it before using it again.
896 if (d->bd_bif == NULL) {
903 return (EWOULDBLOCK);
905 error = msleep(d, &d->bd_lock, PRINET|PCATCH,
907 if (error == EINTR || error == ERESTART) {
911 if (error == EWOULDBLOCK) {
913 * On a timeout, return what's in the buffer,
914 * which may be nothing. If there is something
915 * in the store buffer, we can rotate the buffers.
919 * We filled up the buffer in between
920 * getting the timeout and arriving
921 * here, so we don't need to rotate.
925 if (d->bd_slen == 0) {
934 * At this point, we know we have something in the hold slot.
936 d->bd_hbuf_in_use = 1;
940 * Move data from hold buffer into user space.
941 * We know the entire buffer is transferred since
942 * we checked above that the read buffer is bpf_bufsize bytes.
944 * We do not have to worry about simultaneous reads because
945 * we waited for sole access to the hold buffer above.
947 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
950 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
951 d->bd_fbuf = d->bd_hbuf;
954 bpf_buf_reclaimed(d);
955 d->bd_hbuf_in_use = 0;
956 wakeup(&d->bd_hbuf_in_use);
963 * If there are processes sleeping on this descriptor, wake them up.
966 bpf_wakeup(struct bpf_d *d)
970 if (d->bd_state == BPF_WAITING) {
971 callout_stop(&d->bd_callout);
972 d->bd_state = BPF_IDLE;
975 if (d->bd_async && d->bd_sig && d->bd_sigio)
976 pgsigio(&d->bd_sigio, d->bd_sig, 0);
978 selwakeuppri(&d->bd_sel, PRINET);
979 KNOTE_LOCKED(&d->bd_sel.si_note, 0);
983 bpf_timed_out(void *arg)
985 struct bpf_d *d = (struct bpf_d *)arg;
989 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
991 if (d->bd_state == BPF_WAITING) {
992 d->bd_state = BPF_TIMED_OUT;
999 bpf_ready(struct bpf_d *d)
1002 BPFD_LOCK_ASSERT(d);
1004 if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1006 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1013 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1017 struct mbuf *m, *mc;
1018 struct sockaddr dst;
1021 error = devfs_get_cdevpriv((void **)&d);
1025 BPF_PID_REFRESH_CUR(d);
1027 /* XXX: locking required */
1028 if (d->bd_bif == NULL) {
1033 ifp = d->bd_bif->bif_ifp;
1035 if ((ifp->if_flags & IFF_UP) == 0) {
1040 if (uio->uio_resid == 0) {
1045 bzero(&dst, sizeof(dst));
1048 /* XXX: bpf_movein() can sleep */
1049 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1050 &m, &dst, &hlen, d->bd_wfilter);
1057 dst.sa_family = pseudo_AF_HDRCMPLT;
1059 if (d->bd_feedback) {
1060 mc = m_dup(m, M_NOWAIT);
1062 mc->m_pkthdr.rcvif = ifp;
1063 /* Set M_PROMISC for outgoing packets to be discarded. */
1064 if (d->bd_direction == BPF_D_INOUT)
1065 m->m_flags |= M_PROMISC;
1069 m->m_pkthdr.len -= hlen;
1071 m->m_data += hlen; /* XXX */
1073 CURVNET_SET(ifp->if_vnet);
1076 mac_bpfdesc_create_mbuf(d, m);
1078 mac_bpfdesc_create_mbuf(d, mc);
1082 error = (*ifp->if_output)(ifp, m, &dst, NULL);
1088 (*ifp->if_input)(ifp, mc);
1098 * Reset a descriptor by flushing its packet buffer and clearing the receive
1099 * and drop counts. This is doable for kernel-only buffers, but with
1100 * zero-copy buffers, we can't write to (or rotate) buffers that are
1101 * currently owned by userspace. It would be nice if we could encapsulate
1102 * this logic in the buffer code rather than here.
1105 reset_d(struct bpf_d *d)
1108 BPFD_LOCK_ASSERT(d);
1110 while (d->bd_hbuf_in_use)
1111 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1113 if ((d->bd_hbuf != NULL) &&
1114 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1115 /* Free the hold buffer. */
1116 d->bd_fbuf = d->bd_hbuf;
1119 bpf_buf_reclaimed(d);
1121 if (bpf_canwritebuf(d))
1133 * FIONREAD Check for read packet available.
1134 * SIOCGIFADDR Get interface address - convenient hook to driver.
1135 * BIOCGBLEN Get buffer len [for read()].
1136 * BIOCSETF Set read filter.
1137 * BIOCSETFNR Set read filter without resetting descriptor.
1138 * BIOCSETWF Set write filter.
1139 * BIOCFLUSH Flush read packet buffer.
1140 * BIOCPROMISC Put interface into promiscuous mode.
1141 * BIOCGDLT Get link layer type.
1142 * BIOCGETIF Get interface name.
1143 * BIOCSETIF Set interface.
1144 * BIOCSRTIMEOUT Set read timeout.
1145 * BIOCGRTIMEOUT Get read timeout.
1146 * BIOCGSTATS Get packet stats.
1147 * BIOCIMMEDIATE Set immediate mode.
1148 * BIOCVERSION Get filter language version.
1149 * BIOCGHDRCMPLT Get "header already complete" flag
1150 * BIOCSHDRCMPLT Set "header already complete" flag
1151 * BIOCGDIRECTION Get packet direction flag
1152 * BIOCSDIRECTION Set packet direction flag
1153 * BIOCGTSTAMP Get time stamp format and resolution.
1154 * BIOCSTSTAMP Set time stamp format and resolution.
1155 * BIOCLOCK Set "locked" flag
1156 * BIOCFEEDBACK Set packet feedback mode.
1157 * BIOCSETZBUF Set current zero-copy buffer locations.
1158 * BIOCGETZMAX Get maximum zero-copy buffer size.
1159 * BIOCROTZBUF Force rotation of zero-copy buffer
1160 * BIOCSETBUFMODE Set buffer mode.
1161 * BIOCGETBUFMODE Get current buffer mode.
1165 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1171 error = devfs_get_cdevpriv((void **)&d);
1176 * Refresh PID associated with this descriptor.
1179 BPF_PID_REFRESH(d, td);
1180 if (d->bd_state == BPF_WAITING)
1181 callout_stop(&d->bd_callout);
1182 d->bd_state = BPF_IDLE;
1185 if (d->bd_locked == 1) {
1191 #ifdef COMPAT_FREEBSD32
1192 case BIOCGDLTLIST32:
1196 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1197 case BIOCGRTIMEOUT32:
1208 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1209 case BIOCSRTIMEOUT32:
1219 #ifdef COMPAT_FREEBSD32
1221 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1222 * that it will get 32-bit packet headers.
1228 case BIOCGDLTLIST32:
1229 case BIOCGRTIMEOUT32:
1230 case BIOCSRTIMEOUT32:
1237 CURVNET_SET(TD_TO_VNET(td));
1245 * Check for read packet available.
1253 while (d->bd_hbuf_in_use)
1254 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1255 PRINET, "bd_hbuf", 0);
1268 if (d->bd_bif == NULL)
1271 ifp = d->bd_bif->bif_ifp;
1272 error = (*ifp->if_ioctl)(ifp, cmd, addr);
1278 * Get buffer len [for read()].
1282 *(u_int *)addr = d->bd_bufsize;
1287 * Set buffer length.
1290 error = bpf_ioctl_sblen(d, (u_int *)addr);
1294 * Set link layer read filter.
1299 #ifdef COMPAT_FREEBSD32
1304 error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1308 * Flush read packet buffer.
1317 * Put interface into promiscuous mode.
1320 if (d->bd_bif == NULL) {
1322 * No interface attached yet.
1327 if (d->bd_promisc == 0) {
1328 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1335 * Get current data link type.
1339 if (d->bd_bif == NULL)
1342 *(u_int *)addr = d->bd_bif->bif_dlt;
1347 * Get a list of supported data link types.
1349 #ifdef COMPAT_FREEBSD32
1350 case BIOCGDLTLIST32:
1352 struct bpf_dltlist32 *list32;
1353 struct bpf_dltlist dltlist;
1355 list32 = (struct bpf_dltlist32 *)addr;
1356 dltlist.bfl_len = list32->bfl_len;
1357 dltlist.bfl_list = PTRIN(list32->bfl_list);
1359 if (d->bd_bif == NULL)
1362 error = bpf_getdltlist(d, &dltlist);
1364 list32->bfl_len = dltlist.bfl_len;
1373 if (d->bd_bif == NULL)
1376 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1381 * Set data link type.
1385 if (d->bd_bif == NULL)
1388 error = bpf_setdlt(d, *(u_int *)addr);
1393 * Get interface name.
1397 if (d->bd_bif == NULL)
1400 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1401 struct ifreq *const ifr = (struct ifreq *)addr;
1403 strlcpy(ifr->ifr_name, ifp->if_xname,
1404 sizeof(ifr->ifr_name));
1414 error = bpf_setif(d, (struct ifreq *)addr);
1422 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1423 case BIOCSRTIMEOUT32:
1426 struct timeval *tv = (struct timeval *)addr;
1427 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1428 struct timeval32 *tv32;
1429 struct timeval tv64;
1431 if (cmd == BIOCSRTIMEOUT32) {
1432 tv32 = (struct timeval32 *)addr;
1434 tv->tv_sec = tv32->tv_sec;
1435 tv->tv_usec = tv32->tv_usec;
1438 tv = (struct timeval *)addr;
1441 * Subtract 1 tick from tvtohz() since this isn't
1444 if ((error = itimerfix(tv)) == 0)
1445 d->bd_rtout = tvtohz(tv) - 1;
1453 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1454 case BIOCGRTIMEOUT32:
1458 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1459 struct timeval32 *tv32;
1460 struct timeval tv64;
1462 if (cmd == BIOCGRTIMEOUT32)
1466 tv = (struct timeval *)addr;
1468 tv->tv_sec = d->bd_rtout / hz;
1469 tv->tv_usec = (d->bd_rtout % hz) * tick;
1470 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1471 if (cmd == BIOCGRTIMEOUT32) {
1472 tv32 = (struct timeval32 *)addr;
1473 tv32->tv_sec = tv->tv_sec;
1474 tv32->tv_usec = tv->tv_usec;
1486 struct bpf_stat *bs = (struct bpf_stat *)addr;
1488 /* XXXCSJP overflow */
1489 bs->bs_recv = d->bd_rcount;
1490 bs->bs_drop = d->bd_dcount;
1495 * Set immediate mode.
1499 d->bd_immediate = *(u_int *)addr;
1505 struct bpf_version *bv = (struct bpf_version *)addr;
1507 bv->bv_major = BPF_MAJOR_VERSION;
1508 bv->bv_minor = BPF_MINOR_VERSION;
1513 * Get "header already complete" flag
1517 *(u_int *)addr = d->bd_hdrcmplt;
1522 * Set "header already complete" flag
1526 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1531 * Get packet direction flag
1533 case BIOCGDIRECTION:
1535 *(u_int *)addr = d->bd_direction;
1540 * Set packet direction flag
1542 case BIOCSDIRECTION:
1546 direction = *(u_int *)addr;
1547 switch (direction) {
1552 d->bd_direction = direction;
1562 * Get packet timestamp format and resolution.
1566 *(u_int *)addr = d->bd_tstamp;
1571 * Set packet timestamp format and resolution.
1577 func = *(u_int *)addr;
1578 if (BPF_T_VALID(func))
1579 d->bd_tstamp = func;
1587 d->bd_feedback = *(u_int *)addr;
1597 case FIONBIO: /* Non-blocking I/O */
1600 case FIOASYNC: /* Send signal on receive packets */
1602 d->bd_async = *(int *)addr;
1608 * XXX: Add some sort of locking here?
1609 * fsetown() can sleep.
1611 error = fsetown(*(int *)addr, &d->bd_sigio);
1616 *(int *)addr = fgetown(&d->bd_sigio);
1620 /* This is deprecated, FIOSETOWN should be used instead. */
1622 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1625 /* This is deprecated, FIOGETOWN should be used instead. */
1627 *(int *)addr = -fgetown(&d->bd_sigio);
1630 case BIOCSRSIG: /* Set receive signal */
1634 sig = *(u_int *)addr;
1647 *(u_int *)addr = d->bd_sig;
1651 case BIOCGETBUFMODE:
1653 *(u_int *)addr = d->bd_bufmode;
1657 case BIOCSETBUFMODE:
1659 * Allow the buffering mode to be changed as long as we
1660 * haven't yet committed to a particular mode. Our
1661 * definition of commitment, for now, is whether or not a
1662 * buffer has been allocated or an interface attached, since
1663 * that's the point where things get tricky.
1665 switch (*(u_int *)addr) {
1666 case BPF_BUFMODE_BUFFER:
1669 case BPF_BUFMODE_ZBUF:
1670 if (bpf_zerocopy_enable)
1680 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1681 d->bd_fbuf != NULL || d->bd_bif != NULL) {
1686 d->bd_bufmode = *(u_int *)addr;
1691 error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1695 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1699 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1707 * Set d's packet filter program to fp. If this file already has a filter,
1708 * free it and replace it. Returns EINVAL for bogus requests.
1710 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1711 * since reading d->bd_bif can't be protected by d or interface lock due to
1714 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1715 * interface read lock to read all filers.
1719 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1721 #ifdef COMPAT_FREEBSD32
1722 struct bpf_program fp_swab;
1723 struct bpf_program32 *fp32;
1725 struct bpf_insn *fcode, *old;
1727 bpf_jit_filter *jfunc, *ofunc;
1733 #ifdef COMPAT_FREEBSD32
1738 fp32 = (struct bpf_program32 *)fp;
1739 fp_swab.bf_len = fp32->bf_len;
1740 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1756 jfunc = ofunc = NULL;
1761 * Check new filter validness before acquiring any locks.
1762 * Allocate memory for new filter, if needed.
1765 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1767 size = flen * sizeof(*fp->bf_insns);
1769 /* We're setting up new filter. Copy and check actual data. */
1770 fcode = malloc(size, M_BPF, M_WAITOK);
1771 if (copyin(fp->bf_insns, fcode, size) != 0 ||
1772 !bpf_validate(fcode, flen)) {
1777 /* Filter is copied inside fcode and is perfectly valid. */
1778 jfunc = bpf_jitter(fcode, flen);
1785 * Set up new filter.
1786 * Protect filter change by interface lock.
1787 * Additionally, we are protected by global lock here.
1789 if (d->bd_bif != NULL)
1790 BPFIF_WLOCK(d->bd_bif);
1792 if (cmd == BIOCSETWF) {
1793 old = d->bd_wfilter;
1794 d->bd_wfilter = fcode;
1796 old = d->bd_rfilter;
1797 d->bd_rfilter = fcode;
1799 ofunc = d->bd_bfilter;
1800 d->bd_bfilter = jfunc;
1802 if (cmd == BIOCSETF)
1805 if (fcode != NULL) {
1807 * Do not require upgrade by first BIOCSETF
1808 * (used to set snaplen) by pcap_open_live().
1810 if (d->bd_writer != 0 && --d->bd_writer == 0)
1812 CTR4(KTR_NET, "%s: filter function set by pid %d, "
1813 "bd_writer counter %d, need_upgrade %d",
1814 __func__, d->bd_pid, d->bd_writer, need_upgrade);
1818 if (d->bd_bif != NULL)
1819 BPFIF_WUNLOCK(d->bd_bif);
1824 bpf_destroy_jit_filter(ofunc);
1827 /* Move d to active readers list. */
1836 * Detach a file from its current interface (if attached at all) and attach
1837 * to the interface indicated by the name stored in ifr.
1838 * Return an errno or 0.
1841 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1844 struct ifnet *theywant;
1848 theywant = ifunit(ifr->ifr_name);
1849 if (theywant == NULL || theywant->if_bpf == NULL)
1852 bp = theywant->if_bpf;
1854 /* Check if interface is not being detached from BPF */
1856 if (bp->flags & BPFIF_FLAG_DYING) {
1863 * Behavior here depends on the buffering model. If we're using
1864 * kernel memory buffers, then we can allocate them here. If we're
1865 * using zero-copy, then the user process must have registered
1866 * buffers by the time we get here. If not, return an error.
1868 switch (d->bd_bufmode) {
1869 case BPF_BUFMODE_BUFFER:
1870 case BPF_BUFMODE_ZBUF:
1871 if (d->bd_sbuf == NULL)
1876 panic("bpf_setif: bufmode %d", d->bd_bufmode);
1878 if (bp != d->bd_bif)
1887 * Support for select() and poll() system calls
1889 * Return true iff the specific operation will not block indefinitely.
1890 * Otherwise, return false but make a note that a selwakeup() must be done.
1893 bpfpoll(struct cdev *dev, int events, struct thread *td)
1898 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1900 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1903 * Refresh PID associated with this descriptor.
1905 revents = events & (POLLOUT | POLLWRNORM);
1907 BPF_PID_REFRESH(d, td);
1908 if (events & (POLLIN | POLLRDNORM)) {
1910 revents |= events & (POLLIN | POLLRDNORM);
1912 selrecord(td, &d->bd_sel);
1913 /* Start the read timeout if necessary. */
1914 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1915 callout_reset(&d->bd_callout, d->bd_rtout,
1917 d->bd_state = BPF_WAITING;
1926 * Support for kevent() system call. Register EVFILT_READ filters and
1927 * reject all others.
1930 bpfkqfilter(struct cdev *dev, struct knote *kn)
1934 if (devfs_get_cdevpriv((void **)&d) != 0 ||
1935 kn->kn_filter != EVFILT_READ)
1939 * Refresh PID associated with this descriptor.
1942 BPF_PID_REFRESH_CUR(d);
1943 kn->kn_fop = &bpfread_filtops;
1945 knlist_add(&d->bd_sel.si_note, kn, 1);
1952 filt_bpfdetach(struct knote *kn)
1954 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1956 knlist_remove(&d->bd_sel.si_note, kn, 0);
1960 filt_bpfread(struct knote *kn, long hint)
1962 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1965 BPFD_LOCK_ASSERT(d);
1966 ready = bpf_ready(d);
1968 kn->kn_data = d->bd_slen;
1969 while (d->bd_hbuf_in_use)
1970 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1971 PRINET, "bd_hbuf", 0);
1973 kn->kn_data += d->bd_hlen;
1974 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1975 callout_reset(&d->bd_callout, d->bd_rtout,
1977 d->bd_state = BPF_WAITING;
1983 #define BPF_TSTAMP_NONE 0
1984 #define BPF_TSTAMP_FAST 1
1985 #define BPF_TSTAMP_NORMAL 2
1986 #define BPF_TSTAMP_EXTERN 3
1989 bpf_ts_quality(int tstype)
1992 if (tstype == BPF_T_NONE)
1993 return (BPF_TSTAMP_NONE);
1994 if ((tstype & BPF_T_FAST) != 0)
1995 return (BPF_TSTAMP_FAST);
1997 return (BPF_TSTAMP_NORMAL);
2001 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2006 quality = bpf_ts_quality(tstype);
2007 if (quality == BPF_TSTAMP_NONE)
2011 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2013 *bt = *(struct bintime *)(tag + 1);
2014 return (BPF_TSTAMP_EXTERN);
2017 if (quality == BPF_TSTAMP_NORMAL)
2026 * Incoming linkage from device drivers. Process the packet pkt, of length
2027 * pktlen, which is stored in a contiguous buffer. The packet is parsed
2028 * by each process' filter, and if accepted, stashed into the corresponding
2032 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2042 gottime = BPF_TSTAMP_NONE;
2046 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2048 * We are not using any locks for d here because:
2049 * 1) any filter change is protected by interface
2051 * 2) destroying/detaching d is protected by interface
2055 /* XXX: Do not protect counter for the sake of performance. */
2058 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
2059 * way for the caller to indiciate to us whether this packet
2060 * is inbound or outbound. In the bpf_mtap() routines, we use
2061 * the interface pointers on the mbuf to figure it out.
2064 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2066 slen = (*(bf->func))(pkt, pktlen, pktlen);
2069 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2072 * Filter matches. Let's to acquire write lock.
2077 if (gottime < bpf_ts_quality(d->bd_tstamp))
2078 gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
2080 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2082 catchpacket(d, pkt, pktlen, slen,
2083 bpf_append_bytes, &bt);
2090 #define BPF_CHECK_DIRECTION(d, r, i) \
2091 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
2092 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2095 * Incoming linkage from device drivers, when packet is in an mbuf chain.
2096 * Locking model is explained in bpf_tap().
2099 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2109 /* Skip outgoing duplicate packets. */
2110 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2111 m->m_flags &= ~M_PROMISC;
2115 pktlen = m_length(m, NULL);
2116 gottime = BPF_TSTAMP_NONE;
2120 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2121 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2125 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2126 /* XXX We cannot handle multiple mbufs. */
2127 if (bf != NULL && m->m_next == NULL)
2128 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2131 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2136 if (gottime < bpf_ts_quality(d->bd_tstamp))
2137 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2139 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2141 catchpacket(d, (u_char *)m, pktlen, slen,
2142 bpf_append_mbuf, &bt);
2150 * Incoming linkage from device drivers, when packet is in
2151 * an mbuf chain and to be prepended by a contiguous header.
2154 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2162 /* Skip outgoing duplicate packets. */
2163 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2164 m->m_flags &= ~M_PROMISC;
2168 pktlen = m_length(m, NULL);
2170 * Craft on-stack mbuf suitable for passing to bpf_filter.
2171 * Note that we cut corners here; we only setup what's
2172 * absolutely needed--this mbuf should never go anywhere else.
2179 gottime = BPF_TSTAMP_NONE;
2183 LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2184 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2187 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2192 if (gottime < bpf_ts_quality(d->bd_tstamp))
2193 gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2195 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2197 catchpacket(d, (u_char *)&mb, pktlen, slen,
2198 bpf_append_mbuf, &bt);
2205 #undef BPF_CHECK_DIRECTION
2207 #undef BPF_TSTAMP_NONE
2208 #undef BPF_TSTAMP_FAST
2209 #undef BPF_TSTAMP_NORMAL
2210 #undef BPF_TSTAMP_EXTERN
2213 bpf_hdrlen(struct bpf_d *d)
2217 hdrlen = d->bd_bif->bif_hdrlen;
2218 #ifndef BURN_BRIDGES
2219 if (d->bd_tstamp == BPF_T_NONE ||
2220 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2221 #ifdef COMPAT_FREEBSD32
2223 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2226 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2229 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2230 #ifdef COMPAT_FREEBSD32
2232 hdrlen = BPF_WORDALIGN32(hdrlen);
2235 hdrlen = BPF_WORDALIGN(hdrlen);
2237 return (hdrlen - d->bd_bif->bif_hdrlen);
2241 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2245 struct timespec tsn;
2247 if ((tstype & BPF_T_MONOTONIC) == 0) {
2249 bintime_add(&bt2, &boottimebin);
2252 switch (BPF_T_FORMAT(tstype)) {
2253 case BPF_T_MICROTIME:
2254 bintime2timeval(bt, &tsm);
2255 ts->bt_sec = tsm.tv_sec;
2256 ts->bt_frac = tsm.tv_usec;
2258 case BPF_T_NANOTIME:
2259 bintime2timespec(bt, &tsn);
2260 ts->bt_sec = tsn.tv_sec;
2261 ts->bt_frac = tsn.tv_nsec;
2264 ts->bt_sec = bt->sec;
2265 ts->bt_frac = bt->frac;
2271 * Move the packet data from interface memory (pkt) into the
2272 * store buffer. "cpfn" is the routine called to do the actual data
2273 * transfer. bcopy is passed in to copy contiguous chunks, while
2274 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
2275 * pkt is really an mbuf.
2278 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2279 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2282 struct bpf_xhdr hdr;
2283 #ifndef BURN_BRIDGES
2284 struct bpf_hdr hdr_old;
2285 #ifdef COMPAT_FREEBSD32
2286 struct bpf_hdr32 hdr32_old;
2289 int caplen, curlen, hdrlen, totlen;
2294 BPFD_LOCK_ASSERT(d);
2297 * Detect whether user space has released a buffer back to us, and if
2298 * so, move it from being a hold buffer to a free buffer. This may
2299 * not be the best place to do it (for example, we might only want to
2300 * run this check if we need the space), but for now it's a reliable
2303 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2304 while (d->bd_hbuf_in_use)
2305 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2306 PRINET, "bd_hbuf", 0);
2307 d->bd_fbuf = d->bd_hbuf;
2310 bpf_buf_reclaimed(d);
2314 * Figure out how many bytes to move. If the packet is
2315 * greater or equal to the snapshot length, transfer that
2316 * much. Otherwise, transfer the whole packet (unless
2317 * we hit the buffer size limit).
2319 hdrlen = bpf_hdrlen(d);
2320 totlen = hdrlen + min(snaplen, pktlen);
2321 if (totlen > d->bd_bufsize)
2322 totlen = d->bd_bufsize;
2325 * Round up the end of the previous packet to the next longword.
2327 * Drop the packet if there's no room and no hope of room
2328 * If the packet would overflow the storage buffer or the storage
2329 * buffer is considered immutable by the buffer model, try to rotate
2330 * the buffer and wakeup pending processes.
2332 #ifdef COMPAT_FREEBSD32
2334 curlen = BPF_WORDALIGN32(d->bd_slen);
2337 curlen = BPF_WORDALIGN(d->bd_slen);
2338 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2339 if (d->bd_fbuf == NULL) {
2341 * There's no room in the store buffer, and no
2342 * prospect of room, so drop the packet. Notify the
2349 while (d->bd_hbuf_in_use)
2350 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2351 PRINET, "bd_hbuf", 0);
2355 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2357 * Immediate mode is set, or the read timeout has already
2358 * expired during a select call. A packet arrived, so the
2359 * reader should be woken up.
2362 caplen = totlen - hdrlen;
2363 tstype = d->bd_tstamp;
2364 do_timestamp = tstype != BPF_T_NONE;
2365 #ifndef BURN_BRIDGES
2366 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2369 bpf_bintime2ts(bt, &ts, tstype);
2370 #ifdef COMPAT_FREEBSD32
2371 if (d->bd_compat32) {
2372 bzero(&hdr32_old, sizeof(hdr32_old));
2374 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2375 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2377 hdr32_old.bh_datalen = pktlen;
2378 hdr32_old.bh_hdrlen = hdrlen;
2379 hdr32_old.bh_caplen = caplen;
2380 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2385 bzero(&hdr_old, sizeof(hdr_old));
2387 hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2388 hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2390 hdr_old.bh_datalen = pktlen;
2391 hdr_old.bh_hdrlen = hdrlen;
2392 hdr_old.bh_caplen = caplen;
2393 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2400 * Append the bpf header. Note we append the actual header size, but
2401 * move forward the length of the header plus padding.
2403 bzero(&hdr, sizeof(hdr));
2405 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2406 hdr.bh_datalen = pktlen;
2407 hdr.bh_hdrlen = hdrlen;
2408 hdr.bh_caplen = caplen;
2409 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2412 * Copy the packet data into the store buffer and update its length.
2414 #ifndef BURN_BRIDGES
2417 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2418 d->bd_slen = curlen + totlen;
2425 * Free buffers currently in use by a descriptor.
2429 bpf_freed(struct bpf_d *d)
2433 * We don't need to lock out interrupts since this descriptor has
2434 * been detached from its interface and it yet hasn't been marked
2438 if (d->bd_rfilter != NULL) {
2439 free((caddr_t)d->bd_rfilter, M_BPF);
2441 if (d->bd_bfilter != NULL)
2442 bpf_destroy_jit_filter(d->bd_bfilter);
2445 if (d->bd_wfilter != NULL)
2446 free((caddr_t)d->bd_wfilter, M_BPF);
2447 mtx_destroy(&d->bd_lock);
2451 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
2452 * fixed size of the link header (variable length headers not yet supported).
2455 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2458 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2462 * Attach an interface to bpf. ifp is a pointer to the structure
2463 * defining the interface to be attached, dlt is the link layer type,
2464 * and hdrlen is the fixed size of the link header (variable length
2465 * headers are not yet supporrted).
2468 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2472 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2476 LIST_INIT(&bp->bif_dlist);
2477 LIST_INIT(&bp->bif_wlist);
2480 rw_init(&bp->bif_lock, "bpf interface lock");
2481 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2485 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2488 bp->bif_hdrlen = hdrlen;
2491 if_printf(ifp, "bpf attached\n");
2495 * Detach bpf from an interface. This involves detaching each descriptor
2496 * associated with the interface. Notify each descriptor as it's detached
2497 * so that any sleepers wake up and get ENXIO.
2500 bpfdetach(struct ifnet *ifp)
2502 struct bpf_if *bp, *bp_temp;
2509 /* Find all bpf_if struct's which reference ifp and detach them. */
2510 LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
2511 if (ifp != bp->bif_ifp)
2514 LIST_REMOVE(bp, bif_next);
2515 /* Add to to-be-freed list */
2516 LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next);
2520 * Delay freeing bp till interface is detached
2521 * and all routes through this interface are removed.
2522 * Mark bp as detached to restrict new consumers.
2525 bp->flags |= BPFIF_FLAG_DYING;
2528 CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
2529 __func__, bp->bif_dlt, bp, ifp);
2531 /* Free common descriptors */
2532 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2533 bpf_detachd_locked(d);
2539 /* Free writer-only descriptors */
2540 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2541 bpf_detachd_locked(d);
2551 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2556 * Interface departure handler.
2557 * Note departure event does not guarantee interface is going down.
2558 * Interface renaming is currently done via departure/arrival event set.
2560 * Departure handled is called after all routes pointing to
2561 * given interface are removed and interface is in down state
2562 * restricting any packets to be sent/received. We assume it is now safe
2563 * to free data allocated by BPF.
2566 bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2568 struct bpf_if *bp, *bp_temp;
2573 * Find matching entries in free list.
2574 * Nothing should be found if bpfdetach() was not called.
2576 LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) {
2577 if (ifp != bp->bif_ifp)
2580 CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p",
2583 LIST_REMOVE(bp, bif_next);
2585 rw_destroy(&bp->bif_lock);
2593 * Note that we cannot zero other pointers to
2594 * custom DLTs possibly used by given interface.
2601 * Get a list of available data link type of the interface.
2604 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2612 ifp = d->bd_bif->bif_ifp;
2615 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2616 if (bp->bif_ifp != ifp)
2618 if (bfl->bfl_list != NULL) {
2619 if (n >= bfl->bfl_len)
2621 error = copyout(&bp->bif_dlt,
2622 bfl->bfl_list + n, sizeof(u_int));
2631 * Set the data link type of a BPF instance.
2634 bpf_setdlt(struct bpf_d *d, u_int dlt)
2636 int error, opromisc;
2642 if (d->bd_bif->bif_dlt == dlt)
2644 ifp = d->bd_bif->bif_ifp;
2646 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2647 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2652 opromisc = d->bd_promisc;
2658 error = ifpromisc(bp->bif_ifp, 1);
2660 if_printf(bp->bif_ifp,
2661 "bpf_setdlt: ifpromisc failed (%d)\n",
2667 return (bp == NULL ? EINVAL : 0);
2671 bpf_drvinit(void *unused)
2675 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2676 LIST_INIT(&bpf_iflist);
2677 LIST_INIT(&bpf_freelist);
2679 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2680 /* For compatibility */
2681 make_dev_alias(dev, "bpf0");
2683 /* Register interface departure handler */
2684 bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2685 ifnet_departure_event, bpf_ifdetach, NULL,
2686 EVENTHANDLER_PRI_ANY);
2690 * Zero out the various packet counters associated with all of the bpf
2691 * descriptors. At some point, we will probably want to get a bit more
2692 * granular and allow the user to specify descriptors to be zeroed.
2695 bpf_zero_counters(void)
2701 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2703 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2719 * Fill filter statistics
2722 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2725 bzero(d, sizeof(*d));
2726 BPFD_LOCK_ASSERT(bd);
2727 d->bd_structsize = sizeof(*d);
2728 /* XXX: reading should be protected by global lock */
2729 d->bd_immediate = bd->bd_immediate;
2730 d->bd_promisc = bd->bd_promisc;
2731 d->bd_hdrcmplt = bd->bd_hdrcmplt;
2732 d->bd_direction = bd->bd_direction;
2733 d->bd_feedback = bd->bd_feedback;
2734 d->bd_async = bd->bd_async;
2735 d->bd_rcount = bd->bd_rcount;
2736 d->bd_dcount = bd->bd_dcount;
2737 d->bd_fcount = bd->bd_fcount;
2738 d->bd_sig = bd->bd_sig;
2739 d->bd_slen = bd->bd_slen;
2740 d->bd_hlen = bd->bd_hlen;
2741 d->bd_bufsize = bd->bd_bufsize;
2742 d->bd_pid = bd->bd_pid;
2743 strlcpy(d->bd_ifname,
2744 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2745 d->bd_locked = bd->bd_locked;
2746 d->bd_wcount = bd->bd_wcount;
2747 d->bd_wdcount = bd->bd_wdcount;
2748 d->bd_wfcount = bd->bd_wfcount;
2749 d->bd_zcopy = bd->bd_zcopy;
2750 d->bd_bufmode = bd->bd_bufmode;
2754 * Handle `netstat -B' stats request
2757 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2759 struct xbpf_d *xbdbuf, *xbd, zerostats;
2765 * XXX This is not technically correct. It is possible for non
2766 * privileged users to open bpf devices. It would make sense
2767 * if the users who opened the devices were able to retrieve
2768 * the statistics for them, too.
2770 error = priv_check(req->td, PRIV_NET_BPF);
2774 * Check to see if the user is requesting that the counters be
2775 * zeroed out. Explicitly check that the supplied data is zeroed,
2776 * as we aren't allowing the user to set the counters currently.
2778 if (req->newptr != NULL) {
2779 if (req->newlen != sizeof(zerostats))
2781 bzero(&zerostats, sizeof(zerostats));
2783 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
2785 bpf_zero_counters();
2788 if (req->oldptr == NULL)
2789 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2790 if (bpf_bpfd_cnt == 0)
2791 return (SYSCTL_OUT(req, 0, 0));
2792 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2794 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2796 free(xbdbuf, M_BPF);
2800 LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2802 /* Send writers-only first */
2803 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2804 xbd = &xbdbuf[index++];
2806 bpfstats_fill_xbpf(xbd, bd);
2809 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2810 xbd = &xbdbuf[index++];
2812 bpfstats_fill_xbpf(xbd, bd);
2818 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2819 free(xbdbuf, M_BPF);
2823 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2825 #else /* !DEV_BPF && !NETGRAPH_BPF */
2827 * NOP stubs to allow bpf-using drivers to load and function.
2829 * A 'better' implementation would allow the core bpf functionality
2830 * to be loaded at runtime.
2832 static struct bpf_if bp_null;
2835 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2840 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2845 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2850 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2853 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2857 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2860 *driverp = &bp_null;
2864 bpfdetach(struct ifnet *ifp)
2869 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2871 return -1; /* "no filter" behaviour */
2875 bpf_validate(const struct bpf_insn *f, int len)
2877 return 0; /* false */
2880 #endif /* !DEV_BPF && !NETGRAPH_BPF */