2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_capsicum.h"
40 #include "opt_inet6.h"
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/capsicum.h>
47 #include <sys/condvar.h>
48 #include <sys/kernel.h>
50 #include <sys/mutex.h>
51 #include <sys/sysproto.h>
52 #include <sys/malloc.h>
53 #include <sys/filedesc.h>
54 #include <sys/event.h>
56 #include <sys/fcntl.h>
58 #include <sys/filio.h>
61 #include <sys/mount.h>
63 #include <sys/protosw.h>
64 #include <sys/rwlock.h>
65 #include <sys/sf_buf.h>
66 #include <sys/sysent.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/signalvar.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysctl.h>
73 #include <sys/vnode.h>
75 #include <sys/ktrace.h>
77 #ifdef COMPAT_FREEBSD32
78 #include <compat/freebsd32/freebsd32_util.h>
83 #include <security/audit/audit.h>
84 #include <security/mac/mac_framework.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
96 * Flags for accept1() and kern_accept4(), in addition to SOCK_CLOEXEC
99 #define ACCEPT4_INHERIT 0x1
100 #define ACCEPT4_COMPAT 0x2
102 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
103 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
105 static int accept1(struct thread *td, int s, struct sockaddr *uname,
106 socklen_t *anamelen, int flags);
107 static int do_sendfile(struct thread *td, struct sendfile_args *uap,
109 static int getsockname1(struct thread *td, struct getsockname_args *uap,
111 static int getpeername1(struct thread *td, struct getpeername_args *uap,
114 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
117 * sendfile(2)-related variables and associated sysctls
119 static SYSCTL_NODE(_kern_ipc, OID_AUTO, sendfile, CTLFLAG_RW, 0,
120 "sendfile(2) tunables");
121 static int sfreadahead = 1;
122 SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, readahead, CTLFLAG_RW,
123 &sfreadahead, 0, "Number of sendfile(2) read-ahead MAXBSIZE blocks");
126 sfstat_init(const void *unused)
129 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
132 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
135 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
139 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
141 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
142 return (SYSCTL_OUT(req, &s, sizeof(s)));
144 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
145 NULL, 0, sfstat_sysctl, "I", "sendfile statistics");
148 * Convert a user file descriptor to a kernel file entry and check if required
149 * capability rights are present.
150 * A reference on the file entry is held upon returning.
153 getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp,
154 struct file **fpp, u_int *fflagp)
159 error = fget_unlocked(td->td_proc->p_fd, fd, rightsp, &fp, NULL);
162 if (fp->f_type != DTYPE_SOCKET) {
167 *fflagp = fp->f_flag;
173 * System call interface to the socket abstraction.
175 #if defined(COMPAT_43)
176 #define COMPAT_OLDSOCK
182 struct socket_args /* {
190 int fd, error, type, oflag, fflag;
192 AUDIT_ARG_SOCKET(uap->domain, uap->type, uap->protocol);
197 if ((type & SOCK_CLOEXEC) != 0) {
198 type &= ~SOCK_CLOEXEC;
201 if ((type & SOCK_NONBLOCK) != 0) {
202 type &= ~SOCK_NONBLOCK;
207 error = mac_socket_check_create(td->td_ucred, uap->domain, type,
212 error = falloc(td, &fp, &fd, oflag);
215 /* An extra reference on `fp' has been held for us by falloc(). */
216 error = socreate(uap->domain, &so, type, uap->protocol,
221 finit(fp, FREAD | FWRITE | fflag, DTYPE_SOCKET, so, &socketops);
222 if ((fflag & FNONBLOCK) != 0)
223 (void) fo_ioctl(fp, FIONBIO, &fflag, td->td_ucred, td);
224 td->td_retval[0] = fd;
234 struct bind_args /* {
243 error = getsockaddr(&sa, uap->name, uap->namelen);
245 error = kern_bindat(td, AT_FDCWD, uap->s, sa);
252 kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
260 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
261 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_BIND),
267 if (KTRPOINT(td, KTR_STRUCT))
271 error = mac_socket_check_bind(td->td_ucred, so, sa);
274 if (dirfd == AT_FDCWD)
275 error = sobind(so, sa, td);
277 error = sobindat(dirfd, so, sa, td);
289 struct bindat_args /* {
299 error = getsockaddr(&sa, uap->name, uap->namelen);
301 error = kern_bindat(td, uap->fd, uap->s, sa);
311 struct listen_args /* {
321 AUDIT_ARG_FD(uap->s);
322 error = getsock_cap(td, uap->s, cap_rights_init(&rights, CAP_LISTEN),
327 error = mac_socket_check_listen(td->td_ucred, so);
330 error = solisten(so, uap->backlog, td);
340 accept1(td, s, uname, anamelen, flags)
343 struct sockaddr *uname;
347 struct sockaddr *name;
353 return (kern_accept4(td, s, NULL, NULL, flags, NULL));
355 error = copyin(anamelen, &namelen, sizeof (namelen));
359 error = kern_accept4(td, s, &name, &namelen, flags, &fp);
364 if (error == 0 && uname != NULL) {
365 #ifdef COMPAT_OLDSOCK
366 if (flags & ACCEPT4_COMPAT)
367 ((struct osockaddr *)name)->sa_family =
370 error = copyout(name, uname, namelen);
373 error = copyout(&namelen, anamelen,
376 fdclose(td, fp, td->td_retval[0]);
378 free(name, M_SONAME);
383 kern_accept(struct thread *td, int s, struct sockaddr **name,
384 socklen_t *namelen, struct file **fp)
386 return (kern_accept4(td, s, name, namelen, ACCEPT4_INHERIT, fp));
390 kern_accept4(struct thread *td, int s, struct sockaddr **name,
391 socklen_t *namelen, int flags, struct file **fp)
393 struct file *headfp, *nfp = NULL;
394 struct sockaddr *sa = NULL;
395 struct socket *head, *so;
405 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_ACCEPT),
409 head = headfp->f_data;
410 if ((head->so_options & SO_ACCEPTCONN) == 0) {
415 error = mac_socket_check_accept(td->td_ucred, head);
419 error = falloc(td, &nfp, &fd, (flags & SOCK_CLOEXEC) ? O_CLOEXEC : 0);
423 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
428 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
429 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
430 head->so_error = ECONNABORTED;
433 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
440 if (head->so_error) {
441 error = head->so_error;
446 so = TAILQ_FIRST(&head->so_comp);
447 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
448 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
451 * Before changing the flags on the socket, we have to bump the
452 * reference count. Otherwise, if the protocol calls sofree(),
453 * the socket will be released due to a zero refcount.
455 SOCK_LOCK(so); /* soref() and so_state update */
456 soref(so); /* file descriptor reference */
458 TAILQ_REMOVE(&head->so_comp, so, so_list);
460 if (flags & ACCEPT4_INHERIT)
461 so->so_state |= (head->so_state & SS_NBIO);
463 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
464 so->so_qstate &= ~SQ_COMP;
470 /* An extra reference on `nfp' has been held for us by falloc(). */
471 td->td_retval[0] = fd;
473 /* connection has been removed from the listen queue */
474 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
476 if (flags & ACCEPT4_INHERIT) {
477 pgid = fgetown(&head->so_sigio);
479 fsetown(pgid, &so->so_sigio);
481 fflag &= ~(FNONBLOCK | FASYNC);
482 if (flags & SOCK_NONBLOCK)
486 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
487 /* Sync socket nonblocking/async state with file flags */
488 tmp = fflag & FNONBLOCK;
489 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
490 tmp = fflag & FASYNC;
491 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
493 error = soaccept(so, &sa);
501 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, sa);
503 /* check sa_len before it is destroyed */
504 if (*namelen > sa->sa_len)
505 *namelen = sa->sa_len;
507 if (KTRPOINT(td, KTR_STRUCT))
517 * close the new descriptor, assuming someone hasn't ripped it
521 fdclose(td, nfp, fd);
524 * Release explicitly held references before returning. We return
525 * a reference on nfp to the caller on success if they request it.
544 struct accept_args *uap;
547 return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT));
553 struct accept4_args *uap;
556 if (uap->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
559 return (accept1(td, uap->s, uap->name, uap->anamelen, uap->flags));
562 #ifdef COMPAT_OLDSOCK
566 struct accept_args *uap;
569 return (accept1(td, uap->s, uap->name, uap->anamelen,
570 ACCEPT4_INHERIT | ACCEPT4_COMPAT));
572 #endif /* COMPAT_OLDSOCK */
578 struct connect_args /* {
587 error = getsockaddr(&sa, uap->name, uap->namelen);
589 error = kern_connectat(td, AT_FDCWD, uap->s, sa);
596 kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
601 int error, interrupted = 0;
604 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
605 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_CONNECT),
610 if (so->so_state & SS_ISCONNECTING) {
615 if (KTRPOINT(td, KTR_STRUCT))
619 error = mac_socket_check_connect(td->td_ucred, so, sa);
623 if (dirfd == AT_FDCWD)
624 error = soconnect(so, sa, td);
626 error = soconnectat(dirfd, so, sa, td);
629 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
634 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
635 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
638 if (error == EINTR || error == ERESTART)
644 error = so->so_error;
650 so->so_state &= ~SS_ISCONNECTING;
651 if (error == ERESTART)
660 sys_connectat(td, uap)
662 struct connectat_args /* {
672 error = getsockaddr(&sa, uap->name, uap->namelen);
674 error = kern_connectat(td, uap->fd, uap->s, sa);
681 kern_socketpair(struct thread *td, int domain, int type, int protocol,
684 struct file *fp1, *fp2;
685 struct socket *so1, *so2;
686 int fd, error, oflag, fflag;
688 AUDIT_ARG_SOCKET(domain, type, protocol);
692 if ((type & SOCK_CLOEXEC) != 0) {
693 type &= ~SOCK_CLOEXEC;
696 if ((type & SOCK_NONBLOCK) != 0) {
697 type &= ~SOCK_NONBLOCK;
701 /* We might want to have a separate check for socket pairs. */
702 error = mac_socket_check_create(td->td_ucred, domain, type,
707 error = socreate(domain, &so1, type, protocol, td->td_ucred, td);
710 error = socreate(domain, &so2, type, protocol, td->td_ucred, td);
713 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
714 error = falloc(td, &fp1, &fd, oflag);
718 fp1->f_data = so1; /* so1 already has ref count */
719 error = falloc(td, &fp2, &fd, oflag);
722 fp2->f_data = so2; /* so2 already has ref count */
724 error = soconnect2(so1, so2);
727 if (type == SOCK_DGRAM) {
729 * Datagram socket connection is asymmetric.
731 error = soconnect2(so2, so1);
735 finit(fp1, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp1->f_data,
737 finit(fp2, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp2->f_data,
739 if ((fflag & FNONBLOCK) != 0) {
740 (void) fo_ioctl(fp1, FIONBIO, &fflag, td->td_ucred, td);
741 (void) fo_ioctl(fp2, FIONBIO, &fflag, td->td_ucred, td);
747 fdclose(td, fp2, rsv[1]);
750 fdclose(td, fp1, rsv[0]);
762 sys_socketpair(struct thread *td, struct socketpair_args *uap)
766 error = kern_socketpair(td, uap->domain, uap->type,
770 error = copyout(sv, uap->rsv, 2 * sizeof(int));
772 (void)kern_close(td, sv[0]);
773 (void)kern_close(td, sv[1]);
779 sendit(td, s, mp, flags)
785 struct mbuf *control;
789 #ifdef CAPABILITY_MODE
790 if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL))
794 if (mp->msg_name != NULL) {
795 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
805 if (mp->msg_control) {
806 if (mp->msg_controllen < sizeof(struct cmsghdr)
807 #ifdef COMPAT_OLDSOCK
808 && mp->msg_flags != MSG_COMPAT
814 error = sockargs(&control, mp->msg_control,
815 mp->msg_controllen, MT_CONTROL);
818 #ifdef COMPAT_OLDSOCK
819 if (mp->msg_flags == MSG_COMPAT) {
822 M_PREPEND(control, sizeof(*cm), M_WAITOK);
823 cm = mtod(control, struct cmsghdr *);
824 cm->cmsg_len = control->m_len;
825 cm->cmsg_level = SOL_SOCKET;
826 cm->cmsg_type = SCM_RIGHTS;
833 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
841 kern_sendit(td, s, mp, flags, control, segflg)
846 struct mbuf *control;
855 struct uio *ktruio = NULL;
861 cap_rights_init(&rights, CAP_SEND);
862 if (mp->msg_name != NULL) {
863 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name);
864 cap_rights_set(&rights, CAP_CONNECT);
866 error = getsock_cap(td, s, &rights, &fp, NULL);
869 so = (struct socket *)fp->f_data;
872 if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT))
873 ktrsockaddr(mp->msg_name);
876 if (mp->msg_name != NULL) {
877 error = mac_socket_check_connect(td->td_ucred, so,
882 error = mac_socket_check_send(td->td_ucred, so);
887 auio.uio_iov = mp->msg_iov;
888 auio.uio_iovcnt = mp->msg_iovlen;
889 auio.uio_segflg = segflg;
890 auio.uio_rw = UIO_WRITE;
892 auio.uio_offset = 0; /* XXX */
895 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
896 if ((auio.uio_resid += iov->iov_len) < 0) {
902 if (KTRPOINT(td, KTR_GENIO))
903 ktruio = cloneuio(&auio);
905 len = auio.uio_resid;
906 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
908 if (auio.uio_resid != len && (error == ERESTART ||
909 error == EINTR || error == EWOULDBLOCK))
911 /* Generation of SIGPIPE can be controlled per socket */
912 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
913 !(flags & MSG_NOSIGNAL)) {
914 PROC_LOCK(td->td_proc);
915 tdsignal(td, SIGPIPE);
916 PROC_UNLOCK(td->td_proc);
920 td->td_retval[0] = len - auio.uio_resid;
922 if (ktruio != NULL) {
923 ktruio->uio_resid = td->td_retval[0];
924 ktrgenio(s, UIO_WRITE, ktruio, error);
935 struct sendto_args /* {
947 msg.msg_name = uap->to;
948 msg.msg_namelen = uap->tolen;
952 #ifdef COMPAT_OLDSOCK
955 aiov.iov_base = uap->buf;
956 aiov.iov_len = uap->len;
957 return (sendit(td, uap->s, &msg, uap->flags));
960 #ifdef COMPAT_OLDSOCK
964 struct osend_args /* {
978 aiov.iov_base = uap->buf;
979 aiov.iov_len = uap->len;
982 return (sendit(td, uap->s, &msg, uap->flags));
988 struct osendmsg_args /* {
998 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1001 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1005 msg.msg_flags = MSG_COMPAT;
1006 error = sendit(td, uap->s, &msg, uap->flags);
1013 sys_sendmsg(td, uap)
1015 struct sendmsg_args /* {
1025 error = copyin(uap->msg, &msg, sizeof (msg));
1028 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1032 #ifdef COMPAT_OLDSOCK
1035 error = sendit(td, uap->s, &msg, uap->flags);
1041 kern_recvit(td, s, mp, fromseg, controlp)
1045 enum uio_seg fromseg;
1046 struct mbuf **controlp;
1050 struct mbuf *m, *control = NULL;
1054 struct sockaddr *fromsa = NULL;
1055 cap_rights_t rights;
1057 struct uio *ktruio = NULL;
1062 if (controlp != NULL)
1066 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_RECV),
1073 error = mac_socket_check_receive(td->td_ucred, so);
1080 auio.uio_iov = mp->msg_iov;
1081 auio.uio_iovcnt = mp->msg_iovlen;
1082 auio.uio_segflg = UIO_USERSPACE;
1083 auio.uio_rw = UIO_READ;
1085 auio.uio_offset = 0; /* XXX */
1088 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
1089 if ((auio.uio_resid += iov->iov_len) < 0) {
1095 if (KTRPOINT(td, KTR_GENIO))
1096 ktruio = cloneuio(&auio);
1098 len = auio.uio_resid;
1099 error = soreceive(so, &fromsa, &auio, NULL,
1100 (mp->msg_control || controlp) ? &control : NULL,
1103 if (auio.uio_resid != len && (error == ERESTART ||
1104 error == EINTR || error == EWOULDBLOCK))
1108 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, fromsa);
1110 if (ktruio != NULL) {
1111 ktruio->uio_resid = len - auio.uio_resid;
1112 ktrgenio(s, UIO_READ, ktruio, error);
1117 td->td_retval[0] = len - auio.uio_resid;
1119 len = mp->msg_namelen;
1120 if (len <= 0 || fromsa == NULL)
1123 /* save sa_len before it is destroyed by MSG_COMPAT */
1124 len = MIN(len, fromsa->sa_len);
1125 #ifdef COMPAT_OLDSOCK
1126 if (mp->msg_flags & MSG_COMPAT)
1127 ((struct osockaddr *)fromsa)->sa_family =
1130 if (fromseg == UIO_USERSPACE) {
1131 error = copyout(fromsa, mp->msg_name,
1136 bcopy(fromsa, mp->msg_name, len);
1138 mp->msg_namelen = len;
1140 if (mp->msg_control && controlp == NULL) {
1141 #ifdef COMPAT_OLDSOCK
1143 * We assume that old recvmsg calls won't receive access
1144 * rights and other control info, esp. as control info
1145 * is always optional and those options didn't exist in 4.3.
1146 * If we receive rights, trim the cmsghdr; anything else
1149 if (control && mp->msg_flags & MSG_COMPAT) {
1150 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1152 mtod(control, struct cmsghdr *)->cmsg_type !=
1154 mp->msg_controllen = 0;
1157 control->m_len -= sizeof (struct cmsghdr);
1158 control->m_data += sizeof (struct cmsghdr);
1161 len = mp->msg_controllen;
1163 mp->msg_controllen = 0;
1164 ctlbuf = mp->msg_control;
1166 while (m && len > 0) {
1167 unsigned int tocopy;
1169 if (len >= m->m_len)
1172 mp->msg_flags |= MSG_CTRUNC;
1176 if ((error = copyout(mtod(m, caddr_t),
1177 ctlbuf, tocopy)) != 0)
1184 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1189 if (fromsa && KTRPOINT(td, KTR_STRUCT))
1190 ktrsockaddr(fromsa);
1192 free(fromsa, M_SONAME);
1194 if (error == 0 && controlp != NULL)
1195 *controlp = control;
1203 recvit(td, s, mp, namelenp)
1211 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1214 if (namelenp != NULL) {
1215 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1216 #ifdef COMPAT_OLDSOCK
1217 if (mp->msg_flags & MSG_COMPAT)
1218 error = 0; /* old recvfrom didn't check */
1225 sys_recvfrom(td, uap)
1227 struct recvfrom_args /* {
1232 struct sockaddr * __restrict from;
1233 socklen_t * __restrict fromlenaddr;
1240 if (uap->fromlenaddr) {
1241 error = copyin(uap->fromlenaddr,
1242 &msg.msg_namelen, sizeof (msg.msg_namelen));
1246 msg.msg_namelen = 0;
1248 msg.msg_name = uap->from;
1249 msg.msg_iov = &aiov;
1251 aiov.iov_base = uap->buf;
1252 aiov.iov_len = uap->len;
1253 msg.msg_control = 0;
1254 msg.msg_flags = uap->flags;
1255 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1260 #ifdef COMPAT_OLDSOCK
1264 struct recvfrom_args *uap;
1267 uap->flags |= MSG_COMPAT;
1268 return (sys_recvfrom(td, uap));
1272 #ifdef COMPAT_OLDSOCK
1276 struct orecv_args /* {
1287 msg.msg_namelen = 0;
1288 msg.msg_iov = &aiov;
1290 aiov.iov_base = uap->buf;
1291 aiov.iov_len = uap->len;
1292 msg.msg_control = 0;
1293 msg.msg_flags = uap->flags;
1294 return (recvit(td, uap->s, &msg, NULL));
1298 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1299 * overlays the new one, missing only the flags, and with the (old) access
1300 * rights where the control fields are now.
1305 struct orecvmsg_args /* {
1307 struct omsghdr *msg;
1315 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1318 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1321 msg.msg_flags = uap->flags | MSG_COMPAT;
1323 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1324 if (msg.msg_controllen && error == 0)
1325 error = copyout(&msg.msg_controllen,
1326 &uap->msg->msg_accrightslen, sizeof (int));
1333 sys_recvmsg(td, uap)
1335 struct recvmsg_args /* {
1342 struct iovec *uiov, *iov;
1345 error = copyin(uap->msg, &msg, sizeof (msg));
1348 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1351 msg.msg_flags = uap->flags;
1352 #ifdef COMPAT_OLDSOCK
1353 msg.msg_flags &= ~MSG_COMPAT;
1357 error = recvit(td, uap->s, &msg, NULL);
1360 error = copyout(&msg, uap->msg, sizeof(msg));
1368 sys_shutdown(td, uap)
1370 struct shutdown_args /* {
1377 cap_rights_t rights;
1380 AUDIT_ARG_FD(uap->s);
1381 error = getsock_cap(td, uap->s, cap_rights_init(&rights, CAP_SHUTDOWN),
1385 error = soshutdown(so, uap->how);
1387 * Previous versions did not return ENOTCONN, but 0 in
1388 * case the socket was not connected. Some important
1389 * programs like syslogd up to r279016, 2015-02-19,
1390 * still depend on this behavior.
1392 if (error == ENOTCONN &&
1393 td->td_proc->p_osrel < P_OSREL_SHUTDOWN_ENOTCONN)
1402 sys_setsockopt(td, uap)
1404 struct setsockopt_args /* {
1413 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1414 uap->val, UIO_USERSPACE, uap->valsize));
1418 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1424 enum uio_seg valseg;
1429 struct sockopt sopt;
1430 cap_rights_t rights;
1433 if (val == NULL && valsize != 0)
1435 if ((int)valsize < 0)
1438 sopt.sopt_dir = SOPT_SET;
1439 sopt.sopt_level = level;
1440 sopt.sopt_name = name;
1441 sopt.sopt_val = val;
1442 sopt.sopt_valsize = valsize;
1448 sopt.sopt_td = NULL;
1451 panic("kern_setsockopt called with bad valseg");
1455 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_SETSOCKOPT),
1459 error = sosetopt(so, &sopt);
1467 sys_getsockopt(td, uap)
1469 struct getsockopt_args /* {
1473 void * __restrict val;
1474 socklen_t * __restrict avalsize;
1481 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1486 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1487 uap->val, UIO_USERSPACE, &valsize);
1490 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1495 * Kernel version of getsockopt.
1496 * optval can be a userland or userspace. optlen is always a kernel pointer.
1499 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1505 enum uio_seg valseg;
1510 struct sockopt sopt;
1511 cap_rights_t rights;
1516 if ((int)*valsize < 0)
1519 sopt.sopt_dir = SOPT_GET;
1520 sopt.sopt_level = level;
1521 sopt.sopt_name = name;
1522 sopt.sopt_val = val;
1523 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1529 sopt.sopt_td = NULL;
1532 panic("kern_getsockopt called with bad valseg");
1536 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_GETSOCKOPT),
1540 error = sogetopt(so, &sopt);
1541 *valsize = sopt.sopt_valsize;
1548 * getsockname1() - Get socket name.
1552 getsockname1(td, uap, compat)
1554 struct getsockname_args /* {
1556 struct sockaddr * __restrict asa;
1557 socklen_t * __restrict alen;
1561 struct sockaddr *sa;
1565 error = copyin(uap->alen, &len, sizeof(len));
1569 error = kern_getsockname(td, uap->fdes, &sa, &len);
1574 #ifdef COMPAT_OLDSOCK
1576 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1578 error = copyout(sa, uap->asa, (u_int)len);
1582 error = copyout(&len, uap->alen, sizeof(len));
1587 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1592 cap_rights_t rights;
1597 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_GETSOCKNAME),
1603 CURVNET_SET(so->so_vnet);
1604 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1611 len = MIN(*alen, (*sa)->sa_len);
1614 if (KTRPOINT(td, KTR_STRUCT))
1619 if (error != 0 && *sa != NULL) {
1620 free(*sa, M_SONAME);
1627 sys_getsockname(td, uap)
1629 struct getsockname_args *uap;
1632 return (getsockname1(td, uap, 0));
1635 #ifdef COMPAT_OLDSOCK
1637 ogetsockname(td, uap)
1639 struct getsockname_args *uap;
1642 return (getsockname1(td, uap, 1));
1644 #endif /* COMPAT_OLDSOCK */
1647 * getpeername1() - Get name of peer for connected socket.
1651 getpeername1(td, uap, compat)
1653 struct getpeername_args /* {
1655 struct sockaddr * __restrict asa;
1656 socklen_t * __restrict alen;
1660 struct sockaddr *sa;
1664 error = copyin(uap->alen, &len, sizeof (len));
1668 error = kern_getpeername(td, uap->fdes, &sa, &len);
1673 #ifdef COMPAT_OLDSOCK
1675 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1677 error = copyout(sa, uap->asa, (u_int)len);
1681 error = copyout(&len, uap->alen, sizeof(len));
1686 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1691 cap_rights_t rights;
1696 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_GETPEERNAME),
1701 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1706 CURVNET_SET(so->so_vnet);
1707 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1714 len = MIN(*alen, (*sa)->sa_len);
1717 if (KTRPOINT(td, KTR_STRUCT))
1721 if (error != 0 && *sa != NULL) {
1722 free(*sa, M_SONAME);
1731 sys_getpeername(td, uap)
1733 struct getpeername_args *uap;
1736 return (getpeername1(td, uap, 0));
1739 #ifdef COMPAT_OLDSOCK
1741 ogetpeername(td, uap)
1743 struct ogetpeername_args *uap;
1746 /* XXX uap should have type `getpeername_args *' to begin with. */
1747 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1749 #endif /* COMPAT_OLDSOCK */
1752 sockargs(mp, buf, buflen, type)
1757 struct sockaddr *sa;
1761 if (buflen > MLEN) {
1762 #ifdef COMPAT_OLDSOCK
1763 if (type == MT_SONAME && buflen <= 112)
1764 buflen = MLEN; /* unix domain compat. hack */
1767 if (buflen > MCLBYTES)
1770 m = m_get2(buflen, M_WAITOK, type, 0);
1772 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1777 if (type == MT_SONAME) {
1778 sa = mtod(m, struct sockaddr *);
1780 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1781 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1782 sa->sa_family = sa->sa_len;
1784 sa->sa_len = buflen;
1791 getsockaddr(namp, uaddr, len)
1792 struct sockaddr **namp;
1796 struct sockaddr *sa;
1799 if (len > SOCK_MAXADDRLEN)
1800 return (ENAMETOOLONG);
1801 if (len < offsetof(struct sockaddr, sa_data[0]))
1803 sa = malloc(len, M_SONAME, M_WAITOK);
1804 error = copyin(uaddr, sa, len);
1808 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1809 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1810 sa->sa_family = sa->sa_len;
1818 struct sendfile_sync {
1825 * Add more references to a vm_page + sf_buf + sendfile_sync.
1828 sf_ext_ref(void *arg1, void *arg2)
1830 struct sf_buf *sf = arg1;
1831 struct sendfile_sync *sfs = arg2;
1832 vm_page_t pg = sf_buf_page(sf);
1841 mtx_lock(&sfs->mtx);
1842 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
1844 mtx_unlock(&sfs->mtx);
1849 * Detach mapped page and release resources back to the system.
1852 sf_ext_free(void *arg1, void *arg2)
1854 struct sf_buf *sf = arg1;
1855 struct sendfile_sync *sfs = arg2;
1856 vm_page_t pg = sf_buf_page(sf);
1861 vm_page_unwire(pg, PQ_INACTIVE);
1863 * Check for the object going away on us. This can
1864 * happen since we don't hold a reference to it.
1865 * If so, we're responsible for freeing the page.
1867 if (pg->wire_count == 0 && pg->object == NULL)
1872 mtx_lock(&sfs->mtx);
1873 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
1874 if (--sfs->count == 0)
1875 cv_signal(&sfs->cv);
1876 mtx_unlock(&sfs->mtx);
1883 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1884 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1886 * Send a file specified by 'fd' and starting at 'offset' to a socket
1887 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1888 * 0. Optionally add a header and/or trailer to the socket output. If
1889 * specified, write the total number of bytes sent into *sbytes.
1892 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1895 return (do_sendfile(td, uap, 0));
1899 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1901 struct sf_hdtr hdtr;
1902 struct uio *hdr_uio, *trl_uio;
1904 cap_rights_t rights;
1909 * File offset must be positive. If it goes beyond EOF
1910 * we send only the header/trailer and no payload data.
1912 if (uap->offset < 0)
1915 hdr_uio = trl_uio = NULL;
1917 if (uap->hdtr != NULL) {
1918 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1921 if (hdtr.headers != NULL) {
1922 error = copyinuio(hdtr.headers, hdtr.hdr_cnt,
1927 if (hdtr.trailers != NULL) {
1928 error = copyinuio(hdtr.trailers, hdtr.trl_cnt,
1935 AUDIT_ARG_FD(uap->fd);
1938 * sendfile(2) can start at any offset within a file so we require
1939 * CAP_READ+CAP_SEEK = CAP_PREAD.
1941 if ((error = fget_read(td, uap->fd,
1942 cap_rights_init(&rights, CAP_PREAD), &fp)) != 0) {
1946 error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
1947 uap->nbytes, &sbytes, uap->flags, compat ? SFK_COMPAT : 0, td);
1950 if (uap->sbytes != NULL)
1951 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1954 free(hdr_uio, M_IOV);
1955 free(trl_uio, M_IOV);
1959 #ifdef COMPAT_FREEBSD4
1961 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1963 struct sendfile_args args;
1967 args.offset = uap->offset;
1968 args.nbytes = uap->nbytes;
1969 args.hdtr = uap->hdtr;
1970 args.sbytes = uap->sbytes;
1971 args.flags = uap->flags;
1973 return (do_sendfile(td, &args, 1));
1975 #endif /* COMPAT_FREEBSD4 */
1978 sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd,
1979 off_t off, int xfsize, int bsize, struct thread *td, vm_page_t *res)
1984 int error, readahead, rv;
1986 pindex = OFF_TO_IDX(off);
1987 VM_OBJECT_WLOCK(obj);
1988 m = vm_page_grab(obj, pindex, (vp != NULL ? VM_ALLOC_NOBUSY |
1989 VM_ALLOC_IGN_SBUSY : 0) | VM_ALLOC_WIRED | VM_ALLOC_NORMAL);
1992 * Check if page is valid for what we need, otherwise initiate I/O.
1994 * The non-zero nd argument prevents disk I/O, instead we
1995 * return the caller what he specified in nd. In particular,
1996 * if we already turned some pages into mbufs, nd == EAGAIN
1997 * and the main function send them the pages before we come
1998 * here again and block.
2000 if (m->valid != 0 && vm_page_is_valid(m, off & PAGE_MASK, xfsize)) {
2003 VM_OBJECT_WUNLOCK(obj);
2006 } else if (nd != 0) {
2014 * Get the page from backing store.
2018 VM_OBJECT_WUNLOCK(obj);
2019 readahead = sfreadahead * MAXBSIZE;
2022 * Use vn_rdwr() instead of the pager interface for
2023 * the vnode, to allow the read-ahead.
2025 * XXXMAC: Because we don't have fp->f_cred here, we
2026 * pass in NOCRED. This is probably wrong, but is
2027 * consistent with our original implementation.
2029 error = vn_rdwr(UIO_READ, vp, NULL, readahead, trunc_page(off),
2030 UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((readahead /
2031 bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td);
2032 SFSTAT_INC(sf_iocnt);
2033 VM_OBJECT_WLOCK(obj);
2035 if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
2036 rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
2037 SFSTAT_INC(sf_iocnt);
2038 if (rv != VM_PAGER_OK) {
2047 m->valid = VM_PAGE_BITS_ALL;
2055 } else if (m != NULL) {
2058 vm_page_unwire(m, PQ_INACTIVE);
2061 * See if anyone else might know about this page. If
2062 * not and it is not valid, then free it.
2064 if (m->wire_count == 0 && m->valid == 0 && !vm_page_busied(m))
2068 KASSERT(error != 0 || (m->wire_count > 0 &&
2069 vm_page_is_valid(m, off & PAGE_MASK, xfsize)),
2070 ("wrong page state m %p off %#jx xfsize %d", m, (uintmax_t)off,
2072 VM_OBJECT_WUNLOCK(obj);
2077 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
2078 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
2084 struct shmfd *shmfd;
2087 vp = *vp_res = NULL;
2089 shmfd = *shmfd_res = NULL;
2093 * The file descriptor must be a regular file and have a
2094 * backing VM object.
2096 if (fp->f_type == DTYPE_VNODE) {
2098 vn_lock(vp, LK_SHARED | LK_RETRY);
2099 if (vp->v_type != VREG) {
2103 *bsize = vp->v_mount->mnt_stat.f_iosize;
2104 error = VOP_GETATTR(vp, &va, td->td_ucred);
2107 *obj_size = va.va_size;
2113 } else if (fp->f_type == DTYPE_SHM) {
2116 obj = shmfd->shm_object;
2117 *obj_size = shmfd->shm_size;
2123 VM_OBJECT_WLOCK(obj);
2124 if ((obj->flags & OBJ_DEAD) != 0) {
2125 VM_OBJECT_WUNLOCK(obj);
2131 * Temporarily increase the backing VM object's reference
2132 * count so that a forced reclamation of its vnode does not
2133 * immediately destroy it.
2135 vm_object_reference_locked(obj);
2136 VM_OBJECT_WUNLOCK(obj);
2148 kern_sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
2151 cap_rights_t rights;
2158 * The socket must be a stream socket and connected.
2160 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_SEND),
2164 *so = (*sock_fp)->f_data;
2165 if ((*so)->so_type != SOCK_STREAM)
2167 if (((*so)->so_state & SS_ISCONNECTED) == 0)
2173 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
2174 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
2175 int kflags, struct thread *td)
2177 struct file *sock_fp;
2179 struct vm_object *obj;
2184 struct shmfd *shmfd;
2185 struct sendfile_sync *sfs;
2187 off_t off, xfsize, fsbytes, sbytes, rem, obj_size;
2188 int error, bsize, nd, hdrlen, mnw;
2195 fsbytes = sbytes = 0;
2200 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
2206 error = kern_sendfile_getsock(td, sockfd, &sock_fp, &so);
2211 * Do not wait on memory allocations but return ENOMEM for
2212 * caller to retry later.
2213 * XXX: Experimental.
2215 if (flags & SF_MNOWAIT)
2218 if (flags & SF_SYNC) {
2219 sfs = malloc(sizeof *sfs, M_TEMP, M_WAITOK | M_ZERO);
2220 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
2221 cv_init(&sfs->cv, "sendfile");
2225 error = mac_socket_check_send(td->td_ucred, so);
2230 /* If headers are specified copy them into mbufs. */
2231 if (hdr_uio != NULL) {
2232 hdr_uio->uio_td = td;
2233 hdr_uio->uio_rw = UIO_WRITE;
2234 if (hdr_uio->uio_resid > 0) {
2236 * In FBSD < 5.0 the nbytes to send also included
2237 * the header. If compat is specified subtract the
2238 * header size from nbytes.
2240 if (kflags & SFK_COMPAT) {
2241 if (nbytes > hdr_uio->uio_resid)
2242 nbytes -= hdr_uio->uio_resid;
2246 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
2249 error = mnw ? EAGAIN : ENOBUFS;
2252 hdrlen = m_length(m, NULL);
2257 * Protect against multiple writers to the socket.
2259 * XXXRW: Historically this has assumed non-interruptibility, so now
2260 * we implement that, but possibly shouldn't.
2262 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
2265 * Loop through the pages of the file, starting with the requested
2266 * offset. Get a file page (do I/O if necessary), map the file page
2267 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
2269 * This is done in two loops. The inner loop turns as many pages
2270 * as it can, up to available socket buffer space, without blocking
2271 * into mbufs to have it bulk delivered into the socket send buffer.
2272 * The outer loop checks the state and available space of the socket
2273 * and takes care of the overall progress.
2275 for (off = offset; ; ) {
2281 if ((nbytes != 0 && nbytes == fsbytes) ||
2282 (nbytes == 0 && obj_size == fsbytes))
2291 * Check the socket state for ongoing connection,
2292 * no errors and space in socket buffer.
2293 * If space is low allow for the remainder of the
2294 * file to be processed if it fits the socket buffer.
2295 * Otherwise block in waiting for sufficient space
2296 * to proceed, or if the socket is nonblocking, return
2297 * to userland with EAGAIN while reporting how far
2299 * We wait until the socket buffer has significant free
2300 * space to do bulk sends. This makes good use of file
2301 * system read ahead and allows packet segmentation
2302 * offloading hardware to take over lots of work. If
2303 * we were not careful here we would send off only one
2306 SOCKBUF_LOCK(&so->so_snd);
2307 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
2308 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
2310 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2312 SOCKBUF_UNLOCK(&so->so_snd);
2314 } else if (so->so_error) {
2315 error = so->so_error;
2317 SOCKBUF_UNLOCK(&so->so_snd);
2320 space = sbspace(&so->so_snd);
2323 space < so->so_snd.sb_lowat)) {
2324 if (so->so_state & SS_NBIO) {
2325 SOCKBUF_UNLOCK(&so->so_snd);
2330 * sbwait drops the lock while sleeping.
2331 * When we loop back to retry_space the
2332 * state may have changed and we retest
2335 error = sbwait(&so->so_snd);
2337 * An error from sbwait usually indicates that we've
2338 * been interrupted by a signal. If we've sent anything
2339 * then return bytes sent, otherwise return the error.
2342 SOCKBUF_UNLOCK(&so->so_snd);
2347 SOCKBUF_UNLOCK(&so->so_snd);
2350 * Reduce space in the socket buffer by the size of
2351 * the header mbuf chain.
2352 * hdrlen is set to 0 after the first loop.
2357 error = vn_lock(vp, LK_SHARED);
2360 error = VOP_GETATTR(vp, &va, td->td_ucred);
2361 if (error != 0 || off >= va.va_size) {
2365 obj_size = va.va_size;
2369 * Loop and construct maximum sized mbuf chain to be bulk
2370 * dumped into socket buffer.
2372 while (space > loopbytes) {
2377 * Calculate the amount to transfer.
2378 * Not to exceed a page, the EOF,
2379 * or the passed in nbytes.
2381 pgoff = (vm_offset_t)(off & PAGE_MASK);
2382 rem = obj_size - offset;
2384 rem = omin(rem, nbytes);
2385 rem -= fsbytes + loopbytes;
2386 xfsize = omin(PAGE_SIZE - pgoff, rem);
2387 xfsize = omin(space - loopbytes, xfsize);
2389 done = 1; /* all data sent */
2394 * Attempt to look up the page. Allocate
2395 * if not found or wait and loop if busy.
2398 nd = EAGAIN; /* send what we already got */
2399 else if ((flags & SF_NODISKIO) != 0)
2403 error = sendfile_readpage(obj, vp, nd, off,
2404 xfsize, bsize, td, &pg);
2406 if (error == EAGAIN)
2407 error = 0; /* not a real error */
2412 * Get a sendfile buf. When allocating the
2413 * first buffer for mbuf chain, we usually
2414 * wait as long as necessary, but this wait
2415 * can be interrupted. For consequent
2416 * buffers, do not sleep, since several
2417 * threads might exhaust the buffers and then
2420 sf = sf_buf_alloc(pg, (mnw || m != NULL) ? SFB_NOWAIT :
2423 SFSTAT_INC(sf_allocfail);
2425 vm_page_unwire(pg, PQ_INACTIVE);
2426 KASSERT(pg->object != NULL,
2427 ("%s: object disappeared", __func__));
2430 error = (mnw ? EAGAIN : EINTR);
2435 * Get an mbuf and set it up as having
2438 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
2440 error = (mnw ? EAGAIN : ENOBUFS);
2441 sf_ext_free(sf, NULL);
2445 * Attach EXT_SFBUF external storage.
2447 m0->m_ext.ext_buf = (caddr_t )sf_buf_kva(sf);
2448 m0->m_ext.ext_size = PAGE_SIZE;
2449 m0->m_ext.ext_arg1 = sf;
2450 m0->m_ext.ext_arg2 = sfs;
2451 m0->m_ext.ext_type = EXT_SFBUF;
2452 m0->m_ext.ext_flags = 0;
2453 m0->m_flags |= (M_EXT|M_RDONLY);
2454 m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
2457 /* Append to mbuf chain. */
2461 m_last(m)->m_next = m0;
2466 /* Keep track of bits processed. */
2467 loopbytes += xfsize;
2471 mtx_lock(&sfs->mtx);
2473 mtx_unlock(&sfs->mtx);
2480 /* Add the buffer chain to the socket buffer. */
2484 mlen = m_length(m, NULL);
2485 SOCKBUF_LOCK(&so->so_snd);
2486 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2488 SOCKBUF_UNLOCK(&so->so_snd);
2491 SOCKBUF_UNLOCK(&so->so_snd);
2492 CURVNET_SET(so->so_vnet);
2493 /* Avoid error aliasing. */
2494 err = (*so->so_proto->pr_usrreqs->pru_send)
2495 (so, 0, m, NULL, NULL, td);
2499 * We need two counters to get the
2500 * file offset and nbytes to send
2502 * - sbytes contains the total amount
2503 * of bytes sent, including headers.
2504 * - fsbytes contains the total amount
2505 * of bytes sent from the file.
2513 } else if (error == 0)
2515 m = NULL; /* pru_send always consumes */
2518 /* Quit outer loop on error or when we're done. */
2526 * Send trailers. Wimp out and use writev(2).
2528 if (trl_uio != NULL) {
2529 sbunlock(&so->so_snd);
2530 error = kern_writev(td, sockfd, trl_uio);
2532 sbytes += td->td_retval[0];
2537 sbunlock(&so->so_snd);
2540 * If there was no error we have to clear td->td_retval[0]
2541 * because it may have been set by writev.
2544 td->td_retval[0] = 0;
2550 vm_object_deallocate(obj);
2557 mtx_lock(&sfs->mtx);
2558 if (sfs->count != 0)
2559 cv_wait(&sfs->cv, &sfs->mtx);
2560 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
2561 cv_destroy(&sfs->cv);
2562 mtx_destroy(&sfs->mtx);
2566 if (error == ERESTART)