2 * Copyright (c) 2013-2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 1998, David Greenman. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_kern_tls.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/capsicum.h>
38 #include <sys/kernel.h>
41 #include <sys/mutex.h>
42 #include <sys/malloc.h>
44 #include <sys/mount.h>
47 #include <sys/protosw.h>
48 #include <sys/rwlock.h>
49 #include <sys/sf_buf.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysproto.h>
55 #include <sys/vnode.h>
58 #include <netinet/in.h>
59 #include <netinet/tcp.h>
61 #include <security/audit/audit.h>
62 #include <security/mac/mac_framework.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_pager.h>
68 static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile dynamic memory");
70 #define EXT_FLAG_SYNC EXT_FLAG_VENDOR1
71 #define EXT_FLAG_NOCACHE EXT_FLAG_VENDOR2
72 #define EXT_FLAG_CACHE_LAST EXT_FLAG_VENDOR3
75 * Structure describing a single sendfile(2) I/O, which may consist of
76 * several underlying pager I/Os.
78 * The syscall context allocates the structure and initializes 'nios'
79 * to 1. As sendfile_swapin() runs through pages and starts asynchronous
80 * paging operations, it increments 'nios'.
82 * Every I/O completion calls sendfile_iodone(), which decrements the 'nios',
83 * and the syscall also calls sendfile_iodone() after allocating all mbufs,
84 * linking them and sending to socket. Whoever reaches zero 'nios' is
85 * responsible to * call pru_ready on the socket, to notify it of readyness
97 struct ktls_session *tls;
103 * Structure used to track requests with SF_SYNC flag.
105 struct sendfile_sync {
113 sendfile_sync_destroy(struct sendfile_sync *sfs)
115 KASSERT(sfs->count == 0, ("sendfile sync %p still busy", sfs));
117 cv_destroy(&sfs->cv);
118 mtx_destroy(&sfs->mtx);
119 free(sfs, M_SENDFILE);
123 sendfile_sync_signal(struct sendfile_sync *sfs)
126 KASSERT(sfs->count > 0, ("sendfile sync %p not busy", sfs));
127 if (--sfs->count == 0) {
129 /* The sendfile() waiter was interrupted by a signal. */
130 sendfile_sync_destroy(sfs);
136 mtx_unlock(&sfs->mtx);
139 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
142 sfstat_init(const void *unused)
145 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
148 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
151 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
155 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
157 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
158 return (SYSCTL_OUT(req, &s, sizeof(s)));
160 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat,
161 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
163 "sendfile statistics");
166 sendfile_free_mext(struct mbuf *m)
172 KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF,
173 ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m));
175 sf = m->m_ext.ext_arg1;
176 pg = sf_buf_page(sf);
177 flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
180 vm_page_release(pg, flags);
182 if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
183 struct sendfile_sync *sfs = m->m_ext.ext_arg2;
184 sendfile_sync_signal(sfs);
189 sendfile_free_mext_pg(struct mbuf *m)
191 struct mbuf_ext_pgs *ext_pgs;
196 KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_PGS,
197 ("%s: m %p !M_EXT or !EXT_PGS", __func__, m));
199 cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
200 ext_pgs = &m->m_ext_pgs;
201 flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
203 for (i = 0; i < ext_pgs->npgs; i++) {
204 if (cache_last && i == ext_pgs->npgs - 1)
206 pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]);
207 vm_page_release(pg, flags);
210 if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
211 struct sendfile_sync *sfs = m->m_ext.ext_arg1;
212 sendfile_sync_signal(sfs);
217 * Helper function to calculate how much data to put into page i of n.
218 * Only first and last pages are special.
221 xfsize(int i, int n, off_t off, off_t len)
225 return (omin(PAGE_SIZE - (off & PAGE_MASK), len));
227 if (i == n - 1 && ((off + len) & PAGE_MASK) > 0)
228 return ((off + len) & PAGE_MASK);
234 * Helper function to get offset within object for i page.
236 static inline vm_ooffset_t
237 vmoff(int i, off_t off)
241 return ((vm_ooffset_t)off);
243 return (trunc_page(off + i * PAGE_SIZE));
247 * Helper function used when allocation of a page or sf_buf failed.
248 * Pretend as if we don't have enough space, subtract xfsize() of
249 * all pages that failed.
252 fixspace(int old, int new, off_t off, int *space)
255 KASSERT(old > new, ("%s: old %d new %d", __func__, old, new));
257 /* Subtract last one. */
258 *space -= xfsize(old - 1, old, off, *space);
262 /* There was only one page. */
265 /* Subtract first one. */
267 *space -= xfsize(0, old, off, *space);
271 /* Rest of pages are full sized. */
272 *space -= (old - new) * PAGE_SIZE;
274 KASSERT(*space >= 0, ("%s: space went backwards", __func__));
278 * Wait for all in-flight ios to complete, we must not unwire pages
282 sendfile_iowait(struct sf_io *sfio, const char *wmesg)
284 while (atomic_load_int(&sfio->nios) != 1)
289 * I/O completion callback.
292 sendfile_iodone(void *arg, vm_page_t *pa, int count, int error)
294 struct sf_io *sfio = arg;
301 * Restore of the pg[] elements is done by
306 * Restore the valid page pointers. They are already
307 * unbusied, but still wired. For error != 0 case,
308 * sendfile_swapin() handles unbusy.
310 * XXXKIB since pages are only wired, and we do not
311 * own the object lock, other users might have
312 * invalidated them in meantime. Similarly, after we
313 * unbusied the swapped-in pages, they can become
316 MPASS(count == 0 || pa[0] != bogus_page);
317 for (i = 0; i < count; i++) {
318 if (pa[i] == bogus_page) {
319 sfio->pa[(pa[0]->pindex - sfio->pindex0) + i] =
320 pa[i] = vm_page_relookup(sfio->obj,
322 KASSERT(pa[i] != NULL,
323 ("%s: page %p[%d] disappeared",
326 vm_page_xunbusy_unchecked(pa[i]);
331 if (!refcount_release(&sfio->nios))
335 for (i = 1; i < sfio->npages; i++) {
336 if (sfio->pa[i] == NULL)
338 KASSERT(vm_page_wired(sfio->pa[i]),
339 ("sfio %p page %d %p not wired", sfio, i, sfio->pa[i]));
342 KASSERT(sfio->pa[0]->object == sfio->pa[i]->object,
343 ("sfio %p page %d %p wrong owner %p %p", sfio, i,
344 sfio->pa[i], sfio->pa[0]->object, sfio->pa[i]->object));
345 KASSERT(sfio->pa[0]->pindex + i == sfio->pa[i]->pindex,
346 ("sfio %p page %d %p wrong index %jx %jx", sfio, i,
347 sfio->pa[i], (uintmax_t)sfio->pa[0]->pindex,
348 (uintmax_t)sfio->pa[i]->pindex));
352 vm_object_pip_wakeup(sfio->obj);
354 if (sfio->m == NULL) {
356 * Either I/O operation failed, or we failed to allocate
357 * buffers, or we bailed out on first busy page, or we
358 * succeeded filling the request without any I/Os. Anyway,
359 * pru_send hadn't been executed - nothing had been sent
362 MPASS((curthread->td_pflags & TDP_KTHREAD) == 0);
363 free(sfio, M_SENDFILE);
367 #if defined(KERN_TLS) && defined(INVARIANTS)
368 if ((sfio->m->m_flags & M_EXT) != 0 &&
369 sfio->m->m_ext.ext_type == EXT_PGS)
370 KASSERT(sfio->tls == sfio->m->m_ext_pgs.tls,
371 ("TLS session mismatch"));
373 KASSERT(sfio->tls == NULL,
374 ("non-ext_pgs mbuf with TLS session"));
377 CURVNET_SET(so->so_vnet);
378 if (__predict_false(sfio->error)) {
380 * I/O operation failed. The state of data in the socket
381 * is now inconsistent, and all what we can do is to tear
382 * it down. Protocol abort method would tear down protocol
383 * state, free all ready mbufs and detach not ready ones.
384 * We will free the mbufs corresponding to this I/O manually.
386 * The socket would be marked with EIO and made available
387 * for read, so that application receives EIO on next
388 * syscall and eventually closes the socket.
390 so->so_proto->pr_usrreqs->pru_abort(so);
393 mb_free_notready(sfio->m, sfio->npages);
395 } else if (sfio->tls != NULL && sfio->tls->mode == TCP_TLS_MODE_SW) {
397 * I/O operation is complete, but we still need to
398 * encrypt. We cannot do this in the interrupt thread
399 * of the disk controller, so forward the mbufs to a
402 * Donate the socket reference from sfio to rather
403 * than explicitly invoking soref().
405 ktls_enqueue(sfio->m, so, sfio->npages);
409 (void)(so->so_proto->pr_usrreqs->pru_ready)(so, sfio->m,
418 free(sfio, M_SENDFILE);
422 * Iterate through pages vector and request paging for non-valid pages.
425 sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
426 off_t len, int npages, int rhpages, int flags)
429 int a, count, count1, grabbed, i, j, rv;
433 flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
434 sfio->pindex0 = OFF_TO_IDX(off);
437 * First grab all the pages and wire them. Note that we grab
438 * only required pages. Readahead pages are dealt with later.
440 grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off),
441 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
442 if (grabbed < npages) {
443 for (int i = grabbed; i < npages; i++)
449 for (i = 0; i < npages;) {
450 /* Skip valid pages. */
451 if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK,
452 xfsize(i, npages, off, len))) {
453 vm_page_xunbusy(pa[i]);
454 SFSTAT_INC(sf_pages_valid);
460 * Next page is invalid. Check if it belongs to pager. It
461 * may not be there, which is a regular situation for shmem
462 * pager. For vnode pager this happens only in case of
465 * Important feature of vm_pager_has_page() is the hint
466 * stored in 'a', about how many pages we can pagein after
467 * this page in a single I/O.
469 VM_OBJECT_RLOCK(obj);
470 if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
472 VM_OBJECT_RUNLOCK(obj);
473 pmap_zero_page(pa[i]);
474 vm_page_valid(pa[i]);
475 MPASS(pa[i]->dirty == 0);
476 vm_page_xunbusy(pa[i]);
480 VM_OBJECT_RUNLOCK(obj);
483 * We want to pagein as many pages as possible, limited only
484 * by the 'a' hint and actual request.
486 count = min(a + 1, npages - i);
489 * We should not pagein into a valid page because
490 * there might be still unfinished write tracked by
491 * e.g. a buffer, thus we substitute any valid pages
492 * with the bogus one.
494 * We must not leave around xbusy pages which are not
495 * part of the run passed to vm_pager_getpages(),
496 * otherwise pager might deadlock waiting for the busy
497 * status of the page, e.g. if it constitues the
498 * buffer needed to validate other page.
500 * First trim the end of the run consisting of the
501 * valid pages, then replace the rest of the valid
505 for (j = i + count - 1; j > i; j--) {
506 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
507 xfsize(j, npages, off, len))) {
508 vm_page_xunbusy(pa[j]);
509 SFSTAT_INC(sf_pages_valid);
517 * The last page in the run pa[i + count - 1] is
518 * guaranteed to be invalid by the trim above, so it
519 * is not replaced with bogus, thus -1 in the loop end
522 MPASS(pa[i + count - 1]->valid != VM_PAGE_BITS_ALL);
523 for (j = i + 1; j < i + count - 1; j++) {
524 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
525 xfsize(j, npages, off, len))) {
526 vm_page_xunbusy(pa[j]);
527 SFSTAT_INC(sf_pages_valid);
528 SFSTAT_INC(sf_pages_bogus);
533 refcount_acquire(&sfio->nios);
534 rv = vm_pager_get_pages_async(obj, pa + i, count, NULL,
535 i + count == npages ? &rhpages : NULL,
536 &sendfile_iodone, sfio);
537 if (__predict_false(rv != VM_PAGER_OK)) {
538 sendfile_iowait(sfio, "sferrio");
541 * Perform full pages recovery before returning EIO.
542 * Pages from 0 to npages are wired.
543 * Pages from (i + 1) to (i + count - 1) may be
544 * substituted to bogus page, and not busied.
545 * Pages from (i + count) to (i + count1 - 1) are
547 * Rest of the pages from i to npages are busied.
549 for (j = 0; j < npages; j++) {
550 if (j >= i + count && j < i + count1)
552 else if (j > i && j < i + count - 1 &&
554 pa[j] = vm_page_relookup(obj,
555 OFF_TO_IDX(vmoff(j, off)));
557 vm_page_xunbusy(pa[j]);
558 KASSERT(pa[j] != NULL && pa[j] != bogus_page,
559 ("%s: page %p[%d] I/O recovery failure",
561 vm_page_unwire(pa[j], PQ_INACTIVE);
566 SFSTAT_INC(sf_iocnt);
567 SFSTAT_ADD(sf_pages_read, count);
568 if (i + count == npages)
569 SFSTAT_ADD(sf_rhpages_read, rhpages);
575 if (*nios == 0 && npages != 0)
576 SFSTAT_INC(sf_noiocnt);
582 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
583 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
594 shmfd = *shmfd_res = NULL;
598 * The file descriptor must be a regular file and have a
601 if (fp->f_type == DTYPE_VNODE) {
603 vn_lock(vp, LK_SHARED | LK_RETRY);
604 if (vp->v_type != VREG) {
608 *bsize = vp->v_mount->mnt_stat.f_iosize;
609 error = VOP_GETATTR(vp, &va, td->td_ucred);
612 *obj_size = va.va_size;
618 } else if (fp->f_type == DTYPE_SHM) {
621 obj = shmfd->shm_object;
622 *obj_size = shmfd->shm_size;
628 VM_OBJECT_WLOCK(obj);
629 if ((obj->flags & OBJ_DEAD) != 0) {
630 VM_OBJECT_WUNLOCK(obj);
636 * Temporarily increase the backing VM object's reference
637 * count so that a forced reclamation of its vnode does not
638 * immediately destroy it.
640 vm_object_reference_locked(obj);
641 VM_OBJECT_WUNLOCK(obj);
653 sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
662 * The socket must be a stream socket and connected.
664 error = getsock_cap(td, s, &cap_send_rights,
665 sock_fp, NULL, NULL);
668 *so = (*sock_fp)->f_data;
669 if ((*so)->so_type != SOCK_STREAM)
672 * SCTP one-to-one style sockets currently don't work with
673 * sendfile(). So indicate EINVAL for now.
675 if ((*so)->so_proto->pr_protocol == IPPROTO_SCTP)
677 if (SOLISTENING(*so))
683 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
684 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
687 struct file *sock_fp;
689 struct vm_object *obj;
693 struct ktls_session *tls;
695 struct mbuf_ext_pgs *ext_pgs;
696 struct mbuf *m, *mh, *mhtail;
699 struct sendfile_sync *sfs;
701 off_t off, sbytes, rem, obj_size;
702 int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr;
719 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
723 error = sendfile_getsock(td, sockfd, &sock_fp, &so);
728 error = mac_socket_check_send(td->td_ucred, so);
733 SFSTAT_INC(sf_syscalls);
734 SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags));
736 if (flags & SF_SYNC) {
737 sfs = malloc(sizeof(*sfs), M_SENDFILE, M_WAITOK | M_ZERO);
738 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
739 cv_init(&sfs->cv, "sendfile");
743 rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset;
746 * Protect against multiple writers to the socket.
748 * XXXRW: Historically this has assumed non-interruptibility, so now
749 * we implement that, but possibly shouldn't.
751 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
753 tls = ktls_hold(so->so_snd.sb_tls_info);
757 * Loop through the pages of the file, starting with the requested
758 * offset. Get a file page (do I/O if necessary), map the file page
759 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
761 * This is done in two loops. The inner loop turns as many pages
762 * as it can, up to available socket buffer space, without blocking
763 * into mbufs to have it bulk delivered into the socket send buffer.
764 * The outer loop checks the state and available space of the socket
765 * and takes care of the overall progress.
767 for (off = offset; rem > 0; ) {
770 struct mbuf *m0, *mtail;
771 int nios, space, npages, rhpages;
775 * Check the socket state for ongoing connection,
776 * no errors and space in socket buffer.
777 * If space is low allow for the remainder of the
778 * file to be processed if it fits the socket buffer.
779 * Otherwise block in waiting for sufficient space
780 * to proceed, or if the socket is nonblocking, return
781 * to userland with EAGAIN while reporting how far
783 * We wait until the socket buffer has significant free
784 * space to do bulk sends. This makes good use of file
785 * system read ahead and allows packet segmentation
786 * offloading hardware to take over lots of work. If
787 * we were not careful here we would send off only one
790 SOCKBUF_LOCK(&so->so_snd);
791 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
792 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
794 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
796 SOCKBUF_UNLOCK(&so->so_snd);
798 } else if (so->so_error) {
799 error = so->so_error;
801 SOCKBUF_UNLOCK(&so->so_snd);
804 if ((so->so_state & SS_ISCONNECTED) == 0) {
805 SOCKBUF_UNLOCK(&so->so_snd);
810 space = sbspace(&so->so_snd);
813 space < so->so_snd.sb_lowat)) {
814 if (so->so_state & SS_NBIO) {
815 SOCKBUF_UNLOCK(&so->so_snd);
820 * sbwait drops the lock while sleeping.
821 * When we loop back to retry_space the
822 * state may have changed and we retest
825 error = sbwait(&so->so_snd);
827 * An error from sbwait usually indicates that we've
828 * been interrupted by a signal. If we've sent anything
829 * then return bytes sent, otherwise return the error.
832 SOCKBUF_UNLOCK(&so->so_snd);
837 SOCKBUF_UNLOCK(&so->so_snd);
840 * At the beginning of the first loop check if any headers
841 * are specified and copy them into mbufs. Reduce space in
842 * the socket buffer by the size of the header mbuf chain.
843 * Clear hdr_uio here and hdrlen at the end of the first loop.
845 if (hdr_uio != NULL && hdr_uio->uio_resid > 0) {
846 hdr_uio->uio_td = td;
847 hdr_uio->uio_rw = UIO_WRITE;
850 mh = m_uiotombuf(hdr_uio, M_WAITOK, space,
851 tls->params.max_frame_len, M_NOMAP);
854 mh = m_uiotombuf(hdr_uio, M_WAITOK,
856 hdrlen = m_length(mh, &mhtail);
859 * If header consumed all the socket buffer space,
860 * don't waste CPU cycles and jump to the end.
871 error = vn_lock(vp, LK_SHARED);
874 error = VOP_GETATTR(vp, &va, td->td_ucred);
875 if (error != 0 || off >= va.va_size) {
879 if (va.va_size != obj_size) {
880 obj_size = va.va_size;
882 omin(nbytes + offset, obj_size) : obj_size;
889 else if (space > PAGE_SIZE) {
891 * Use page boundaries when possible for large
895 space -= (PAGE_SIZE - (off & PAGE_MASK));
896 space = trunc_page(space);
898 space += (PAGE_SIZE - (off & PAGE_MASK));
901 npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE);
904 * Calculate maximum allowed number of pages for readahead
905 * at this iteration. If SF_USER_READAHEAD was set, we don't
906 * do any heuristics and use exactly the value supplied by
907 * application. Otherwise, we allow readahead up to "rem".
908 * If application wants more, let it be, but there is no
909 * reason to go above MAXPHYS. Also check against "obj_size",
910 * since vm_pager_has_page() can hint beyond EOF.
912 if (flags & SF_USER_READAHEAD) {
913 rhpages = SF_READAHEAD(flags);
915 rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) -
917 rhpages += SF_READAHEAD(flags);
919 rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages);
920 rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) -
923 sfio = malloc(sizeof(struct sf_io) +
924 npages * sizeof(vm_page_t), M_SENDFILE, M_WAITOK);
925 refcount_init(&sfio->nios, 1);
931 * This doesn't use ktls_hold() because sfio->m will
932 * also have a reference on 'tls' that will be valid
933 * for all of sfio's lifetime.
937 vm_object_pip_add(obj, 1);
938 error = sendfile_swapin(obj, sfio, &nios, off, space, npages,
943 sendfile_iodone(sfio, NULL, 0, error);
948 * Loop and construct maximum sized mbuf chain to be bulk
949 * dumped into socket buffer.
954 * Use unmapped mbufs if enabled for TCP. Unmapped
955 * bufs are restricted to TCP as that is what has been
956 * tested. In particular, unmapped mbufs have not
957 * been tested with UNIX-domain sockets.
959 * TLS frames always require unmapped mbufs.
961 if ((mb_use_ext_pgs &&
962 so->so_proto->pr_protocol == IPPROTO_TCP)
970 max_pgs = num_pages(tls->params.max_frame_len);
973 max_pgs = MBUF_PEXT_MAX_PGS;
975 /* Start at last index, to wrap on first use. */
976 ext_pgs_idx = max_pgs - 1;
979 for (int i = 0; i < npages; i++) {
981 * If a page wasn't grabbed successfully, then
982 * trim the array. Can happen only with SF_NODISKIO.
986 fixspace(npages, i, off, &space);
992 if (pga == bogus_page)
993 pga = vm_page_relookup(obj, sfio->pindex0 + i);
999 if (ext_pgs_idx == max_pgs) {
1000 m0 = mb_alloc_ext_pgs(M_WAITOK,
1001 sendfile_free_mext_pg);
1003 if (flags & SF_NOCACHE) {
1004 m0->m_ext.ext_flags |=
1008 * See comment below regarding
1009 * ignoring SF_NOCACHE for the
1012 if ((npages - i <= max_pgs) &&
1013 ((off + space) & PAGE_MASK) &&
1014 (rem > space || rhpages > 0))
1015 m0->m_ext.ext_flags |=
1016 EXT_FLAG_CACHE_LAST;
1019 m0->m_ext.ext_flags |=
1021 if (m0->m_ext.ext_type ==
1023 m0->m_ext.ext_arg1 =
1026 m0->m_ext.ext_arg2 =
1028 mtx_lock(&sfs->mtx);
1030 mtx_unlock(&sfs->mtx);
1032 ext_pgs = &m0->m_ext_pgs;
1035 /* Append to mbuf chain. */
1041 ext_pgs->first_pg_off =
1042 vmoff(i, off) & PAGE_MASK;
1045 mtail->m_flags |= M_NOTREADY;
1049 ext_pgs->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
1051 xfs = xfsize(i, npages, off, space);
1052 ext_pgs->last_pg_len = xfs;
1053 MBUF_EXT_PGS_ASSERT_SANITY(ext_pgs);
1054 mtail->m_len += xfs;
1055 mtail->m_ext.ext_size += PAGE_SIZE;
1060 * Get a sendfile buf. When allocating the
1061 * first buffer for mbuf chain, we usually
1062 * wait as long as necessary, but this wait
1063 * can be interrupted. For consequent
1064 * buffers, do not sleep, since several
1065 * threads might exhaust the buffers and then
1068 sf = sf_buf_alloc(pga,
1069 m != NULL ? SFB_NOWAIT : SFB_CATCH);
1071 SFSTAT_INC(sf_allocfail);
1072 sendfile_iowait(sfio, "sfnosf");
1073 for (int j = i; j < npages; j++)
1074 vm_page_unwire(pa[j], PQ_INACTIVE);
1077 fixspace(npages, i, off, &space);
1082 m0 = m_get(M_WAITOK, MT_DATA);
1083 m0->m_ext.ext_buf = (char *)sf_buf_kva(sf);
1084 m0->m_ext.ext_size = PAGE_SIZE;
1085 m0->m_ext.ext_arg1 = sf;
1086 m0->m_ext.ext_type = EXT_SFBUF;
1087 m0->m_ext.ext_flags = EXT_FLAG_EMBREF;
1088 m0->m_ext.ext_free = sendfile_free_mext;
1090 * SF_NOCACHE sets the page as being freed upon send.
1091 * However, we ignore it for the last page in 'space',
1092 * if the page is truncated, and we got more data to
1093 * send (rem > space), or if we have readahead
1094 * configured (rhpages > 0).
1096 if ((flags & SF_NOCACHE) &&
1098 !((off + space) & PAGE_MASK) ||
1099 !(rem > space || rhpages > 0)))
1100 m0->m_ext.ext_flags |= EXT_FLAG_NOCACHE;
1102 m0->m_ext.ext_flags |= EXT_FLAG_SYNC;
1103 if (m0->m_ext.ext_type == EXT_PGS)
1104 m0->m_ext.ext_arg1 = sfs;
1106 m0->m_ext.ext_arg2 = sfs;
1107 m0->m_ext.ext_arg2 = sfs;
1108 mtx_lock(&sfs->mtx);
1110 mtx_unlock(&sfs->mtx);
1112 m0->m_ext.ext_count = 1;
1113 m0->m_flags |= (M_EXT | M_RDONLY);
1115 m0->m_flags |= M_NOTREADY;
1116 m0->m_data = (char *)sf_buf_kva(sf) +
1117 (vmoff(i, off) & PAGE_MASK);
1118 m0->m_len = xfsize(i, npages, off, space);
1120 /* Append to mbuf chain. */
1131 /* Keep track of bytes processed. */
1136 * Prepend header, if any. Save pointer to first mbuf
1141 m0 = mhtail->m_next = m;
1148 KASSERT(softerr, ("%s: m NULL, no error", __func__));
1150 sendfile_iodone(sfio, NULL, 0, 0);
1154 /* Add the buffer chain to the socket buffer. */
1155 KASSERT(m_length(m, NULL) == space + hdrlen,
1156 ("%s: mlen %u space %d hdrlen %d",
1157 __func__, m_length(m, NULL), space, hdrlen));
1159 CURVNET_SET(so->so_vnet);
1162 ktls_frame(m, tls, &tls_enq_cnt, TLS_RLTYPE_APP);
1166 * If sendfile_swapin() didn't initiate any I/Os,
1167 * which happens if all data is cached in VM, or if
1168 * the header consumed all socket buffer space and
1169 * sfio is NULL, then we can send data right now
1170 * without the PRUS_NOTREADY flag.
1173 sendfile_iodone(sfio, NULL, 0, 0);
1175 if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) {
1176 error = (*so->so_proto->pr_usrreqs->pru_send)
1177 (so, PRUS_NOTREADY, m, NULL, NULL, td);
1179 ktls_enqueue(m, so, tls_enq_cnt);
1182 error = (*so->so_proto->pr_usrreqs->pru_send)
1183 (so, 0, m, NULL, NULL, td);
1187 sfio->npages = npages;
1189 error = (*so->so_proto->pr_usrreqs->pru_send)
1190 (so, PRUS_NOTREADY, m, NULL, NULL, td);
1191 sendfile_iodone(sfio, NULL, 0, 0);
1195 m = NULL; /* pru_send always consumes */
1198 sbytes += space + hdrlen;
1208 * Send trailers. Wimp out and use writev(2).
1210 if (trl_uio != NULL) {
1211 sbunlock(&so->so_snd);
1212 error = kern_writev(td, sockfd, trl_uio);
1214 sbytes += td->td_retval[0];
1219 sbunlock(&so->so_snd);
1222 * If there was no error we have to clear td->td_retval[0]
1223 * because it may have been set by writev.
1226 td->td_retval[0] = 0;
1232 vm_object_deallocate(obj);
1241 mtx_lock(&sfs->mtx);
1242 if (sfs->count != 0)
1243 error = cv_wait_sig(&sfs->cv, &sfs->mtx);
1244 if (sfs->count == 0) {
1245 sendfile_sync_destroy(sfs);
1247 sfs->waiting = false;
1248 mtx_unlock(&sfs->mtx);
1256 if (error == ERESTART)
1263 sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1265 struct sf_hdtr hdtr;
1266 struct uio *hdr_uio, *trl_uio;
1272 * File offset must be positive. If it goes beyond EOF
1273 * we send only the header/trailer and no payload data.
1275 if (uap->offset < 0)
1279 hdr_uio = trl_uio = NULL;
1281 if (uap->hdtr != NULL) {
1282 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1285 if (hdtr.headers != NULL) {
1286 error = copyinuio(hdtr.headers, hdtr.hdr_cnt,
1290 #ifdef COMPAT_FREEBSD4
1292 * In FreeBSD < 5.0 the nbytes to send also included
1293 * the header. If compat is specified subtract the
1294 * header size from nbytes.
1297 if (uap->nbytes > hdr_uio->uio_resid)
1298 uap->nbytes -= hdr_uio->uio_resid;
1304 if (hdtr.trailers != NULL) {
1305 error = copyinuio(hdtr.trailers, hdtr.trl_cnt,
1312 AUDIT_ARG_FD(uap->fd);
1315 * sendfile(2) can start at any offset within a file so we require
1316 * CAP_READ+CAP_SEEK = CAP_PREAD.
1318 if ((error = fget_read(td, uap->fd, &cap_pread_rights, &fp)) != 0)
1321 error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
1322 uap->nbytes, &sbytes, uap->flags, td);
1325 if (uap->sbytes != NULL)
1326 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1329 free(hdr_uio, M_IOV);
1330 free(trl_uio, M_IOV);
1337 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1338 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1340 * Send a file specified by 'fd' and starting at 'offset' to a socket
1341 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1342 * 0. Optionally add a header and/or trailer to the socket output. If
1343 * specified, write the total number of bytes sent into *sbytes.
1346 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1349 return (sendfile(td, uap, 0));
1352 #ifdef COMPAT_FREEBSD4
1354 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1356 struct sendfile_args args;
1360 args.offset = uap->offset;
1361 args.nbytes = uap->nbytes;
1362 args.hdtr = uap->hdtr;
1363 args.sbytes = uap->sbytes;
1364 args.flags = uap->flags;
1366 return (sendfile(td, &args, 1));
1368 #endif /* COMPAT_FREEBSD4 */