2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2007 Seccuris Inc.
7 * This software was developed by Robert N. M. Watson under contract to
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
39 #include <sys/malloc.h>
41 #include <sys/mutex.h>
43 #include <sys/sf_buf.h>
44 #include <sys/socket.h>
47 #include <machine/atomic.h>
51 #include <net/bpf_zerocopy.h>
52 #include <net/bpfdesc.h>
55 #include <vm/vm_param.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
62 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
63 * are mapped into the kernel address space using sf_bufs and used directly
64 * by BPF. Memory is wired since page faults cannot be tolerated in the
65 * contexts where the buffers are copied to (locks held, interrupt context,
66 * etc). Access to shared memory buffers is synchronized using a header on
67 * each buffer, allowing the number of system calls to go to zero as BPF
68 * reaches saturation (buffers filled as fast as they can be drained by the
69 * user process). Full details of the protocol for communicating between the
70 * user process and BPF may be found in bpf(4).
74 * Maximum number of pages per buffer. Since all BPF devices use two, the
75 * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of
76 * sf_bufs may be an issue, so do not set this too high. On older systems,
77 * kernel address space limits may also be an issue.
79 #define BPF_MAX_PAGES 512
82 * struct zbuf describes a memory buffer loaned by a user process to the
83 * kernel. We represent this as a series of pages managed using an array of
84 * sf_bufs. Even though the memory is contiguous in user space, it may not
85 * be mapped contiguously in the kernel (i.e., a set of physically
86 * non-contiguous pages in the direct map region) so we must implement
87 * scatter-gather copying. One significant mitigating factor is that on
88 * systems with a direct memory map, we can avoid TLB misses.
90 * At the front of the shared memory region is a bpf_zbuf_header, which
91 * contains shared control data to allow user space and the kernel to
92 * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
93 * knows that the space is not available.
96 vm_offset_t zb_uaddr; /* User address at time of setup. */
97 size_t zb_size; /* Size of buffer, incl. header. */
98 u_int zb_numpages; /* Number of pages. */
99 int zb_flags; /* Flags on zbuf. */
100 struct sf_buf **zb_pages; /* Pages themselves. */
101 struct bpf_zbuf_header *zb_header; /* Shared header. */
105 * When a buffer has been assigned to userspace, flag it as such, as the
106 * buffer may remain in the store position as a result of the user process
107 * not yet having acknowledged the buffer in the hold position yet.
109 #define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */
112 * Release a page we've previously wired.
115 zbuf_page_free(vm_page_t pp)
119 if (vm_page_unwire(pp, PQ_INACTIVE) && pp->object == NULL)
125 * Free an sf_buf with attached page.
128 zbuf_sfbuf_free(struct sf_buf *sf)
132 pp = sf_buf_page(sf);
138 * Free a zbuf, including its page array, sbufs, and pages. Allow partially
139 * allocated zbufs to be freed so that it may be used even during a zbuf
143 zbuf_free(struct zbuf *zb)
147 for (i = 0; i < zb->zb_numpages; i++) {
148 if (zb->zb_pages[i] != NULL)
149 zbuf_sfbuf_free(zb->zb_pages[i]);
151 free(zb->zb_pages, M_BPF);
156 * Given a user pointer to a page of user memory, return an sf_buf for the
157 * page. Because we may be requesting quite a few sf_bufs, prefer failure to
158 * deadlock and use SFB_NOWAIT.
160 static struct sf_buf *
161 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
166 if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
167 VM_PROT_WRITE, &pp, 1) < 0)
169 sf = sf_buf_alloc(pp, SFB_NOWAIT);
178 * Create a zbuf describing a range of user address space memory. Validate
179 * page alignment, size requirements, etc.
182 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len,
192 * User address must be page-aligned.
194 if (uaddr & PAGE_MASK)
198 * Length must be an integer number of full pages.
204 * Length must not exceed per-buffer resource limit.
206 if ((len / PAGE_SIZE) > BPF_MAX_PAGES)
210 * Allocate the buffer and set up each page with is own sf_buf.
213 zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK);
214 zb->zb_uaddr = uaddr;
216 zb->zb_numpages = len / PAGE_SIZE;
217 zb->zb_pages = malloc(sizeof(struct sf_buf *) *
218 zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK);
219 map = &td->td_proc->p_vmspace->vm_map;
220 for (i = 0; i < zb->zb_numpages; i++) {
221 zb->zb_pages[i] = zbuf_sfbuf_get(map,
222 uaddr + (i * PAGE_SIZE));
223 if (zb->zb_pages[i] == NULL) {
229 (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]);
230 bzero(zb->zb_header, sizeof(*zb->zb_header));
240 * Copy bytes from a source into the specified zbuf. The caller is
241 * responsible for performing bounds checking, etc.
244 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
245 void *src, u_int len)
247 u_int count, page, poffset;
251 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
252 ("bpf_zerocopy_append_bytes: not in zbuf mode"));
253 KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf"));
255 src_bytes = (u_char *)src;
256 zb = (struct zbuf *)buf;
258 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
259 ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
262 * Scatter-gather copy to user pages mapped into kernel address space
263 * using sf_bufs: copy up to a page at a time.
265 offset += sizeof(struct bpf_zbuf_header);
266 page = offset / PAGE_SIZE;
267 poffset = offset % PAGE_SIZE;
269 KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:"
270 " page overflow (%d p %d np)\n", page, zb->zb_numpages));
272 count = min(len, PAGE_SIZE - poffset);
273 bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) +
276 if (poffset == PAGE_SIZE) {
280 KASSERT(poffset < PAGE_SIZE,
281 ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
289 * Copy bytes from an mbuf chain to the specified zbuf: copying will be
290 * scatter-gather both from mbufs, which may be fragmented over memory, and
291 * to pages, which may not be contiguously mapped in kernel address space.
292 * As with bpf_zerocopy_append_bytes(), the caller is responsible for
293 * checking that this will not exceed the buffer limit.
296 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
297 void *src, u_int len)
299 u_int count, moffset, page, poffset;
300 const struct mbuf *m;
303 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
304 ("bpf_zerocopy_append_mbuf not in zbuf mode"));
305 KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf"));
307 m = (struct mbuf *)src;
308 zb = (struct zbuf *)buf;
310 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
311 ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
314 * Scatter gather both from an mbuf chain and to a user page set
315 * mapped into kernel address space using sf_bufs. If we're lucky,
316 * each mbuf requires one copy operation, but if page alignment and
317 * mbuf alignment work out less well, we'll be doing two copies per
320 offset += sizeof(struct bpf_zbuf_header);
321 page = offset / PAGE_SIZE;
322 poffset = offset % PAGE_SIZE;
325 KASSERT(page < zb->zb_numpages,
326 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
327 "np)\n", page, zb->zb_numpages));
329 ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
331 count = min(m->m_len - moffset, len);
332 count = min(count, PAGE_SIZE - poffset);
333 bcopy(mtod(m, u_char *) + moffset,
334 ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset,
337 if (poffset == PAGE_SIZE) {
341 KASSERT(poffset < PAGE_SIZE,
342 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
345 if (moffset == m->m_len) {
354 * Notification from the BPF framework that a buffer in the store position is
355 * rejecting packets and may be considered full. We mark the buffer as
356 * immutable and assign to userspace so that it is immediately available for
357 * the user process to access.
360 bpf_zerocopy_buffull(struct bpf_d *d)
364 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
365 ("bpf_zerocopy_buffull: not in zbuf mode"));
367 zb = (struct zbuf *)d->bd_sbuf;
368 KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
370 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
371 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
372 zb->zb_header->bzh_kernel_len = d->bd_slen;
373 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
378 * Notification from the BPF framework that a buffer has moved into the held
379 * slot on a descriptor. Zero-copy BPF will update the shared page to let
380 * the user process know and flag the buffer as assigned if it hasn't already
381 * been marked assigned due to filling while it was in the store position.
383 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
384 * on bd_hbuf and bd_hlen.
387 bpf_zerocopy_bufheld(struct bpf_d *d)
391 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
392 ("bpf_zerocopy_bufheld: not in zbuf mode"));
394 zb = (struct zbuf *)d->bd_hbuf;
395 KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
397 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
398 zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
399 zb->zb_header->bzh_kernel_len = d->bd_hlen;
400 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
405 * Notification from the BPF framework that the free buffer has been been
406 * rotated out of the held position to the free position. This happens when
407 * the user acknowledges the held buffer.
410 bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
414 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
415 ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
417 KASSERT(d->bd_fbuf != NULL,
418 ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
419 zb = (struct zbuf *)d->bd_fbuf;
420 zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED;
424 * Query from the BPF framework regarding whether the buffer currently in the
425 * held position can be moved to the free position, which can be indicated by
426 * the user process making their generation number equal to the kernel
430 bpf_zerocopy_canfreebuf(struct bpf_d *d)
434 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
435 ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
437 zb = (struct zbuf *)d->bd_hbuf;
440 if (zb->zb_header->bzh_kernel_gen ==
441 atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
447 * Query from the BPF framework as to whether or not the buffer current in
448 * the store position can actually be written to. This may return false if
449 * the store buffer is assigned to userspace before the hold buffer is
453 bpf_zerocopy_canwritebuf(struct bpf_d *d)
457 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
458 ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
460 zb = (struct zbuf *)d->bd_sbuf;
461 KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
463 if (zb->zb_flags & ZBUF_FLAG_ASSIGNED)
469 * Free zero copy buffers at request of descriptor.
472 bpf_zerocopy_free(struct bpf_d *d)
476 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
477 ("bpf_zerocopy_free: not in zbuf mode"));
479 zb = (struct zbuf *)d->bd_sbuf;
482 zb = (struct zbuf *)d->bd_hbuf;
485 zb = (struct zbuf *)d->bd_fbuf;
491 * Ioctl to return the maximum buffer size.
494 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
497 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
498 ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
500 *i = BPF_MAX_PAGES * PAGE_SIZE;
505 * Ioctl to force rotation of the two buffers, if there's any data available.
506 * This can be used by user space to implement timeouts when waiting for a
510 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
515 bzero(bz, sizeof(*bz));
517 if (d->bd_hbuf == NULL && d->bd_slen != 0) {
519 bzh = (struct zbuf *)d->bd_hbuf;
520 bz->bz_bufa = (void *)bzh->zb_uaddr;
521 bz->bz_buflen = d->bd_hlen;
528 * Ioctl to configure zero-copy buffers -- may be done only once.
531 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
534 struct zbuf *zba, *zbb;
537 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
538 ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
541 * Must set both buffers. Cannot clear them.
543 if (bz->bz_bufa == NULL || bz->bz_bufb == NULL)
547 * Buffers must have a size greater than 0. Alignment and other size
548 * validity checking is done in zbuf_setup().
550 if (bz->bz_buflen == 0)
554 * Allocate new buffers.
556 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen,
560 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen,
568 * We only allow buffers to be installed once, so atomically check
569 * that no buffers are currently installed and install new buffers.
572 if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL ||
581 * Point BPF descriptor at buffers; initialize sbuf as zba so that
582 * it is always filled first in the sequence, per bpf(4).
584 d->bd_fbuf = (caddr_t)zbb;
585 d->bd_sbuf = (caddr_t)zba;
590 * We expose only the space left in the buffer after the size of the
591 * shared management region.
593 d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header);