2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_capsicum.h"
40 #include "opt_inet6.h"
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/capsicum.h>
47 #include <sys/condvar.h>
48 #include <sys/kernel.h>
50 #include <sys/mutex.h>
51 #include <sys/sysproto.h>
52 #include <sys/malloc.h>
53 #include <sys/filedesc.h>
54 #include <sys/event.h>
56 #include <sys/fcntl.h>
58 #include <sys/filio.h>
61 #include <sys/mount.h>
63 #include <sys/protosw.h>
64 #include <sys/rwlock.h>
65 #include <sys/sf_buf.h>
66 #include <sys/sysent.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/signalvar.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysctl.h>
73 #include <sys/vnode.h>
75 #include <sys/ktrace.h>
77 #ifdef COMPAT_FREEBSD32
78 #include <compat/freebsd32/freebsd32_util.h>
83 #include <security/audit/audit.h>
84 #include <security/mac/mac_framework.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
96 * Flags for accept1() and kern_accept4(), in addition to SOCK_CLOEXEC
99 #define ACCEPT4_INHERIT 0x1
100 #define ACCEPT4_COMPAT 0x2
102 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
103 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
105 static int accept1(struct thread *td, int s, struct sockaddr *uname,
106 socklen_t *anamelen, int flags);
107 static int do_sendfile(struct thread *td, struct sendfile_args *uap,
109 static int getsockname1(struct thread *td, struct getsockname_args *uap,
111 static int getpeername1(struct thread *td, struct getpeername_args *uap,
114 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
117 * sendfile(2)-related variables and associated sysctls
119 static SYSCTL_NODE(_kern_ipc, OID_AUTO, sendfile, CTLFLAG_RW, 0,
120 "sendfile(2) tunables");
121 static int sfreadahead = 1;
122 SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, readahead, CTLFLAG_RW,
123 &sfreadahead, 0, "Number of sendfile(2) read-ahead MAXBSIZE blocks");
126 sfstat_init(const void *unused)
129 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
132 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
135 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
139 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
141 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
142 return (SYSCTL_OUT(req, &s, sizeof(s)));
144 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
145 NULL, 0, sfstat_sysctl, "I", "sendfile statistics");
148 * Convert a user file descriptor to a kernel file entry and check if required
149 * capability rights are present.
150 * A reference on the file entry is held upon returning.
153 getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp,
154 struct file **fpp, u_int *fflagp)
159 error = fget_unlocked(td->td_proc->p_fd, fd, rightsp, &fp, NULL);
162 if (fp->f_type != DTYPE_SOCKET) {
167 *fflagp = fp->f_flag;
173 * System call interface to the socket abstraction.
175 #if defined(COMPAT_43)
176 #define COMPAT_OLDSOCK
182 struct socket_args /* {
190 int fd, error, type, oflag, fflag;
192 AUDIT_ARG_SOCKET(uap->domain, uap->type, uap->protocol);
197 if ((type & SOCK_CLOEXEC) != 0) {
198 type &= ~SOCK_CLOEXEC;
201 if ((type & SOCK_NONBLOCK) != 0) {
202 type &= ~SOCK_NONBLOCK;
207 error = mac_socket_check_create(td->td_ucred, uap->domain, type,
212 error = falloc(td, &fp, &fd, oflag);
215 /* An extra reference on `fp' has been held for us by falloc(). */
216 error = socreate(uap->domain, &so, type, uap->protocol,
221 finit(fp, FREAD | FWRITE | fflag, DTYPE_SOCKET, so, &socketops);
222 if ((fflag & FNONBLOCK) != 0)
223 (void) fo_ioctl(fp, FIONBIO, &fflag, td->td_ucred, td);
224 td->td_retval[0] = fd;
234 struct bind_args /* {
243 error = getsockaddr(&sa, uap->name, uap->namelen);
245 error = kern_bindat(td, AT_FDCWD, uap->s, sa);
252 kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
260 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
261 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_BIND),
267 if (KTRPOINT(td, KTR_STRUCT))
271 error = mac_socket_check_bind(td->td_ucred, so, sa);
274 if (dirfd == AT_FDCWD)
275 error = sobind(so, sa, td);
277 error = sobindat(dirfd, so, sa, td);
289 struct bindat_args /* {
299 error = getsockaddr(&sa, uap->name, uap->namelen);
301 error = kern_bindat(td, uap->fd, uap->s, sa);
311 struct listen_args /* {
321 AUDIT_ARG_FD(uap->s);
322 error = getsock_cap(td, uap->s, cap_rights_init(&rights, CAP_LISTEN),
327 error = mac_socket_check_listen(td->td_ucred, so);
330 error = solisten(so, uap->backlog, td);
340 accept1(td, s, uname, anamelen, flags)
343 struct sockaddr *uname;
347 struct sockaddr *name;
353 return (kern_accept4(td, s, NULL, NULL, flags, NULL));
355 error = copyin(anamelen, &namelen, sizeof (namelen));
359 error = kern_accept4(td, s, &name, &namelen, flags, &fp);
364 if (error == 0 && uname != NULL) {
365 #ifdef COMPAT_OLDSOCK
366 if (flags & ACCEPT4_COMPAT)
367 ((struct osockaddr *)name)->sa_family =
370 error = copyout(name, uname, namelen);
373 error = copyout(&namelen, anamelen,
376 fdclose(td, fp, td->td_retval[0]);
378 free(name, M_SONAME);
383 kern_accept(struct thread *td, int s, struct sockaddr **name,
384 socklen_t *namelen, struct file **fp)
386 return (kern_accept4(td, s, name, namelen, ACCEPT4_INHERIT, fp));
390 kern_accept4(struct thread *td, int s, struct sockaddr **name,
391 socklen_t *namelen, int flags, struct file **fp)
393 struct file *headfp, *nfp = NULL;
394 struct sockaddr *sa = NULL;
395 struct socket *head, *so;
405 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_ACCEPT),
409 head = headfp->f_data;
410 if ((head->so_options & SO_ACCEPTCONN) == 0) {
415 error = mac_socket_check_accept(td->td_ucred, head);
419 error = falloc(td, &nfp, &fd, (flags & SOCK_CLOEXEC) ? O_CLOEXEC : 0);
423 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
428 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
429 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
430 head->so_error = ECONNABORTED;
433 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
440 if (head->so_error) {
441 error = head->so_error;
446 so = TAILQ_FIRST(&head->so_comp);
447 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
448 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
451 * Before changing the flags on the socket, we have to bump the
452 * reference count. Otherwise, if the protocol calls sofree(),
453 * the socket will be released due to a zero refcount.
455 SOCK_LOCK(so); /* soref() and so_state update */
456 soref(so); /* file descriptor reference */
458 TAILQ_REMOVE(&head->so_comp, so, so_list);
460 if (flags & ACCEPT4_INHERIT)
461 so->so_state |= (head->so_state & SS_NBIO);
463 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
464 so->so_qstate &= ~SQ_COMP;
470 /* An extra reference on `nfp' has been held for us by falloc(). */
471 td->td_retval[0] = fd;
473 /* connection has been removed from the listen queue */
474 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
476 if (flags & ACCEPT4_INHERIT) {
477 pgid = fgetown(&head->so_sigio);
479 fsetown(pgid, &so->so_sigio);
481 fflag &= ~(FNONBLOCK | FASYNC);
482 if (flags & SOCK_NONBLOCK)
486 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
487 /* Sync socket nonblocking/async state with file flags */
488 tmp = fflag & FNONBLOCK;
489 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
490 tmp = fflag & FASYNC;
491 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
493 error = soaccept(so, &sa);
501 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, sa);
503 /* check sa_len before it is destroyed */
504 if (*namelen > sa->sa_len)
505 *namelen = sa->sa_len;
507 if (KTRPOINT(td, KTR_STRUCT))
517 * close the new descriptor, assuming someone hasn't ripped it
521 fdclose(td, nfp, fd);
524 * Release explicitly held references before returning. We return
525 * a reference on nfp to the caller on success if they request it.
544 struct accept_args *uap;
547 return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT));
553 struct accept4_args *uap;
556 if (uap->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
559 return (accept1(td, uap->s, uap->name, uap->anamelen, uap->flags));
562 #ifdef COMPAT_OLDSOCK
566 struct accept_args *uap;
569 return (accept1(td, uap->s, uap->name, uap->anamelen,
570 ACCEPT4_INHERIT | ACCEPT4_COMPAT));
572 #endif /* COMPAT_OLDSOCK */
578 struct connect_args /* {
587 error = getsockaddr(&sa, uap->name, uap->namelen);
589 error = kern_connectat(td, AT_FDCWD, uap->s, sa);
596 kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
601 int error, interrupted = 0;
604 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
605 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_CONNECT),
610 if (so->so_state & SS_ISCONNECTING) {
615 if (KTRPOINT(td, KTR_STRUCT))
619 error = mac_socket_check_connect(td->td_ucred, so, sa);
623 if (dirfd == AT_FDCWD)
624 error = soconnect(so, sa, td);
626 error = soconnectat(dirfd, so, sa, td);
629 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
634 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
635 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
638 if (error == EINTR || error == ERESTART)
644 error = so->so_error;
650 so->so_state &= ~SS_ISCONNECTING;
651 if (error == ERESTART)
660 sys_connectat(td, uap)
662 struct connectat_args /* {
672 error = getsockaddr(&sa, uap->name, uap->namelen);
674 error = kern_connectat(td, uap->fd, uap->s, sa);
681 kern_socketpair(struct thread *td, int domain, int type, int protocol,
684 struct file *fp1, *fp2;
685 struct socket *so1, *so2;
686 int fd, error, oflag, fflag;
688 AUDIT_ARG_SOCKET(domain, type, protocol);
692 if ((type & SOCK_CLOEXEC) != 0) {
693 type &= ~SOCK_CLOEXEC;
696 if ((type & SOCK_NONBLOCK) != 0) {
697 type &= ~SOCK_NONBLOCK;
701 /* We might want to have a separate check for socket pairs. */
702 error = mac_socket_check_create(td->td_ucred, domain, type,
707 error = socreate(domain, &so1, type, protocol, td->td_ucred, td);
710 error = socreate(domain, &so2, type, protocol, td->td_ucred, td);
713 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
714 error = falloc(td, &fp1, &fd, oflag);
718 fp1->f_data = so1; /* so1 already has ref count */
719 error = falloc(td, &fp2, &fd, oflag);
722 fp2->f_data = so2; /* so2 already has ref count */
724 error = soconnect2(so1, so2);
727 if (type == SOCK_DGRAM) {
729 * Datagram socket connection is asymmetric.
731 error = soconnect2(so2, so1);
735 finit(fp1, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp1->f_data,
737 finit(fp2, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp2->f_data,
739 if ((fflag & FNONBLOCK) != 0) {
740 (void) fo_ioctl(fp1, FIONBIO, &fflag, td->td_ucred, td);
741 (void) fo_ioctl(fp2, FIONBIO, &fflag, td->td_ucred, td);
747 fdclose(td, fp2, rsv[1]);
750 fdclose(td, fp1, rsv[0]);
762 sys_socketpair(struct thread *td, struct socketpair_args *uap)
766 error = kern_socketpair(td, uap->domain, uap->type,
770 error = copyout(sv, uap->rsv, 2 * sizeof(int));
772 (void)kern_close(td, sv[0]);
773 (void)kern_close(td, sv[1]);
779 sendit(td, s, mp, flags)
785 struct mbuf *control;
789 #ifdef CAPABILITY_MODE
790 if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL))
794 if (mp->msg_name != NULL) {
795 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
805 if (mp->msg_control) {
806 if (mp->msg_controllen < sizeof(struct cmsghdr)
807 #ifdef COMPAT_OLDSOCK
808 && mp->msg_flags != MSG_COMPAT
814 error = sockargs(&control, mp->msg_control,
815 mp->msg_controllen, MT_CONTROL);
818 #ifdef COMPAT_OLDSOCK
819 if (mp->msg_flags == MSG_COMPAT) {
822 M_PREPEND(control, sizeof(*cm), M_WAITOK);
823 cm = mtod(control, struct cmsghdr *);
824 cm->cmsg_len = control->m_len;
825 cm->cmsg_level = SOL_SOCKET;
826 cm->cmsg_type = SCM_RIGHTS;
833 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
841 kern_sendit(td, s, mp, flags, control, segflg)
846 struct mbuf *control;
855 struct uio *ktruio = NULL;
861 cap_rights_init(&rights, CAP_SEND);
862 if (mp->msg_name != NULL) {
863 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name);
864 cap_rights_set(&rights, CAP_CONNECT);
866 error = getsock_cap(td, s, &rights, &fp, NULL);
869 so = (struct socket *)fp->f_data;
872 if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT))
873 ktrsockaddr(mp->msg_name);
876 if (mp->msg_name != NULL) {
877 error = mac_socket_check_connect(td->td_ucred, so,
882 error = mac_socket_check_send(td->td_ucred, so);
887 auio.uio_iov = mp->msg_iov;
888 auio.uio_iovcnt = mp->msg_iovlen;
889 auio.uio_segflg = segflg;
890 auio.uio_rw = UIO_WRITE;
892 auio.uio_offset = 0; /* XXX */
895 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
896 if ((auio.uio_resid += iov->iov_len) < 0) {
902 if (KTRPOINT(td, KTR_GENIO))
903 ktruio = cloneuio(&auio);
905 len = auio.uio_resid;
906 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
908 if (auio.uio_resid != len && (error == ERESTART ||
909 error == EINTR || error == EWOULDBLOCK))
911 /* Generation of SIGPIPE can be controlled per socket */
912 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
913 !(flags & MSG_NOSIGNAL)) {
914 PROC_LOCK(td->td_proc);
915 tdsignal(td, SIGPIPE);
916 PROC_UNLOCK(td->td_proc);
920 td->td_retval[0] = len - auio.uio_resid;
922 if (ktruio != NULL) {
923 ktruio->uio_resid = td->td_retval[0];
924 ktrgenio(s, UIO_WRITE, ktruio, error);
935 struct sendto_args /* {
947 msg.msg_name = uap->to;
948 msg.msg_namelen = uap->tolen;
952 #ifdef COMPAT_OLDSOCK
955 aiov.iov_base = uap->buf;
956 aiov.iov_len = uap->len;
957 return (sendit(td, uap->s, &msg, uap->flags));
960 #ifdef COMPAT_OLDSOCK
964 struct osend_args /* {
978 aiov.iov_base = uap->buf;
979 aiov.iov_len = uap->len;
982 return (sendit(td, uap->s, &msg, uap->flags));
988 struct osendmsg_args /* {
998 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1001 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1005 msg.msg_flags = MSG_COMPAT;
1006 error = sendit(td, uap->s, &msg, uap->flags);
1013 sys_sendmsg(td, uap)
1015 struct sendmsg_args /* {
1025 error = copyin(uap->msg, &msg, sizeof (msg));
1028 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1032 #ifdef COMPAT_OLDSOCK
1035 error = sendit(td, uap->s, &msg, uap->flags);
1041 kern_recvit(td, s, mp, fromseg, controlp)
1045 enum uio_seg fromseg;
1046 struct mbuf **controlp;
1050 struct mbuf *m, *control = NULL;
1054 struct sockaddr *fromsa = NULL;
1055 cap_rights_t rights;
1057 struct uio *ktruio = NULL;
1062 if (controlp != NULL)
1066 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_RECV),
1073 error = mac_socket_check_receive(td->td_ucred, so);
1080 auio.uio_iov = mp->msg_iov;
1081 auio.uio_iovcnt = mp->msg_iovlen;
1082 auio.uio_segflg = UIO_USERSPACE;
1083 auio.uio_rw = UIO_READ;
1085 auio.uio_offset = 0; /* XXX */
1088 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
1089 if ((auio.uio_resid += iov->iov_len) < 0) {
1095 if (KTRPOINT(td, KTR_GENIO))
1096 ktruio = cloneuio(&auio);
1098 len = auio.uio_resid;
1099 error = soreceive(so, &fromsa, &auio, NULL,
1100 (mp->msg_control || controlp) ? &control : NULL,
1103 if (auio.uio_resid != len && (error == ERESTART ||
1104 error == EINTR || error == EWOULDBLOCK))
1108 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, fromsa);
1110 if (ktruio != NULL) {
1111 ktruio->uio_resid = len - auio.uio_resid;
1112 ktrgenio(s, UIO_READ, ktruio, error);
1117 td->td_retval[0] = len - auio.uio_resid;
1119 len = mp->msg_namelen;
1120 if (len <= 0 || fromsa == NULL)
1123 /* save sa_len before it is destroyed by MSG_COMPAT */
1124 len = MIN(len, fromsa->sa_len);
1125 #ifdef COMPAT_OLDSOCK
1126 if (mp->msg_flags & MSG_COMPAT)
1127 ((struct osockaddr *)fromsa)->sa_family =
1130 if (fromseg == UIO_USERSPACE) {
1131 error = copyout(fromsa, mp->msg_name,
1136 bcopy(fromsa, mp->msg_name, len);
1138 mp->msg_namelen = len;
1140 if (mp->msg_control && controlp == NULL) {
1141 #ifdef COMPAT_OLDSOCK
1143 * We assume that old recvmsg calls won't receive access
1144 * rights and other control info, esp. as control info
1145 * is always optional and those options didn't exist in 4.3.
1146 * If we receive rights, trim the cmsghdr; anything else
1149 if (control && mp->msg_flags & MSG_COMPAT) {
1150 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1152 mtod(control, struct cmsghdr *)->cmsg_type !=
1154 mp->msg_controllen = 0;
1157 control->m_len -= sizeof (struct cmsghdr);
1158 control->m_data += sizeof (struct cmsghdr);
1161 len = mp->msg_controllen;
1163 mp->msg_controllen = 0;
1164 ctlbuf = mp->msg_control;
1166 while (m && len > 0) {
1167 unsigned int tocopy;
1169 if (len >= m->m_len)
1172 mp->msg_flags |= MSG_CTRUNC;
1176 if ((error = copyout(mtod(m, caddr_t),
1177 ctlbuf, tocopy)) != 0)
1184 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1189 if (fromsa && KTRPOINT(td, KTR_STRUCT))
1190 ktrsockaddr(fromsa);
1192 free(fromsa, M_SONAME);
1194 if (error == 0 && controlp != NULL)
1195 *controlp = control;
1203 recvit(td, s, mp, namelenp)
1211 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1214 if (namelenp != NULL) {
1215 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1216 #ifdef COMPAT_OLDSOCK
1217 if (mp->msg_flags & MSG_COMPAT)
1218 error = 0; /* old recvfrom didn't check */
1225 sys_recvfrom(td, uap)
1227 struct recvfrom_args /* {
1232 struct sockaddr * __restrict from;
1233 socklen_t * __restrict fromlenaddr;
1240 if (uap->fromlenaddr) {
1241 error = copyin(uap->fromlenaddr,
1242 &msg.msg_namelen, sizeof (msg.msg_namelen));
1246 msg.msg_namelen = 0;
1248 msg.msg_name = uap->from;
1249 msg.msg_iov = &aiov;
1251 aiov.iov_base = uap->buf;
1252 aiov.iov_len = uap->len;
1253 msg.msg_control = 0;
1254 msg.msg_flags = uap->flags;
1255 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1260 #ifdef COMPAT_OLDSOCK
1264 struct recvfrom_args *uap;
1267 uap->flags |= MSG_COMPAT;
1268 return (sys_recvfrom(td, uap));
1272 #ifdef COMPAT_OLDSOCK
1276 struct orecv_args /* {
1287 msg.msg_namelen = 0;
1288 msg.msg_iov = &aiov;
1290 aiov.iov_base = uap->buf;
1291 aiov.iov_len = uap->len;
1292 msg.msg_control = 0;
1293 msg.msg_flags = uap->flags;
1294 return (recvit(td, uap->s, &msg, NULL));
1298 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1299 * overlays the new one, missing only the flags, and with the (old) access
1300 * rights where the control fields are now.
1305 struct orecvmsg_args /* {
1307 struct omsghdr *msg;
1315 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1318 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1321 msg.msg_flags = uap->flags | MSG_COMPAT;
1323 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1324 if (msg.msg_controllen && error == 0)
1325 error = copyout(&msg.msg_controllen,
1326 &uap->msg->msg_accrightslen, sizeof (int));
1333 sys_recvmsg(td, uap)
1335 struct recvmsg_args /* {
1342 struct iovec *uiov, *iov;
1345 error = copyin(uap->msg, &msg, sizeof (msg));
1348 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1351 msg.msg_flags = uap->flags;
1352 #ifdef COMPAT_OLDSOCK
1353 msg.msg_flags &= ~MSG_COMPAT;
1357 error = recvit(td, uap->s, &msg, NULL);
1360 error = copyout(&msg, uap->msg, sizeof(msg));
1368 sys_shutdown(td, uap)
1370 struct shutdown_args /* {
1377 cap_rights_t rights;
1380 AUDIT_ARG_FD(uap->s);
1381 error = getsock_cap(td, uap->s, cap_rights_init(&rights, CAP_SHUTDOWN),
1385 error = soshutdown(so, uap->how);
1393 sys_setsockopt(td, uap)
1395 struct setsockopt_args /* {
1404 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1405 uap->val, UIO_USERSPACE, uap->valsize));
1409 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1415 enum uio_seg valseg;
1420 struct sockopt sopt;
1421 cap_rights_t rights;
1424 if (val == NULL && valsize != 0)
1426 if ((int)valsize < 0)
1429 sopt.sopt_dir = SOPT_SET;
1430 sopt.sopt_level = level;
1431 sopt.sopt_name = name;
1432 sopt.sopt_val = val;
1433 sopt.sopt_valsize = valsize;
1439 sopt.sopt_td = NULL;
1442 panic("kern_setsockopt called with bad valseg");
1446 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_SETSOCKOPT),
1450 error = sosetopt(so, &sopt);
1458 sys_getsockopt(td, uap)
1460 struct getsockopt_args /* {
1464 void * __restrict val;
1465 socklen_t * __restrict avalsize;
1472 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1477 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1478 uap->val, UIO_USERSPACE, &valsize);
1481 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1486 * Kernel version of getsockopt.
1487 * optval can be a userland or userspace. optlen is always a kernel pointer.
1490 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1496 enum uio_seg valseg;
1501 struct sockopt sopt;
1502 cap_rights_t rights;
1507 if ((int)*valsize < 0)
1510 sopt.sopt_dir = SOPT_GET;
1511 sopt.sopt_level = level;
1512 sopt.sopt_name = name;
1513 sopt.sopt_val = val;
1514 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1520 sopt.sopt_td = NULL;
1523 panic("kern_getsockopt called with bad valseg");
1527 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_GETSOCKOPT),
1531 error = sogetopt(so, &sopt);
1532 *valsize = sopt.sopt_valsize;
1539 * getsockname1() - Get socket name.
1543 getsockname1(td, uap, compat)
1545 struct getsockname_args /* {
1547 struct sockaddr * __restrict asa;
1548 socklen_t * __restrict alen;
1552 struct sockaddr *sa;
1556 error = copyin(uap->alen, &len, sizeof(len));
1560 error = kern_getsockname(td, uap->fdes, &sa, &len);
1565 #ifdef COMPAT_OLDSOCK
1567 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1569 error = copyout(sa, uap->asa, (u_int)len);
1573 error = copyout(&len, uap->alen, sizeof(len));
1578 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1583 cap_rights_t rights;
1588 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_GETSOCKNAME),
1594 CURVNET_SET(so->so_vnet);
1595 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1602 len = MIN(*alen, (*sa)->sa_len);
1605 if (KTRPOINT(td, KTR_STRUCT))
1610 if (error != 0 && *sa != NULL) {
1611 free(*sa, M_SONAME);
1618 sys_getsockname(td, uap)
1620 struct getsockname_args *uap;
1623 return (getsockname1(td, uap, 0));
1626 #ifdef COMPAT_OLDSOCK
1628 ogetsockname(td, uap)
1630 struct getsockname_args *uap;
1633 return (getsockname1(td, uap, 1));
1635 #endif /* COMPAT_OLDSOCK */
1638 * getpeername1() - Get name of peer for connected socket.
1642 getpeername1(td, uap, compat)
1644 struct getpeername_args /* {
1646 struct sockaddr * __restrict asa;
1647 socklen_t * __restrict alen;
1651 struct sockaddr *sa;
1655 error = copyin(uap->alen, &len, sizeof (len));
1659 error = kern_getpeername(td, uap->fdes, &sa, &len);
1664 #ifdef COMPAT_OLDSOCK
1666 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1668 error = copyout(sa, uap->asa, (u_int)len);
1672 error = copyout(&len, uap->alen, sizeof(len));
1677 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1682 cap_rights_t rights;
1687 error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_GETPEERNAME),
1692 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1697 CURVNET_SET(so->so_vnet);
1698 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1705 len = MIN(*alen, (*sa)->sa_len);
1708 if (KTRPOINT(td, KTR_STRUCT))
1712 if (error != 0 && *sa != NULL) {
1713 free(*sa, M_SONAME);
1722 sys_getpeername(td, uap)
1724 struct getpeername_args *uap;
1727 return (getpeername1(td, uap, 0));
1730 #ifdef COMPAT_OLDSOCK
1732 ogetpeername(td, uap)
1734 struct ogetpeername_args *uap;
1737 /* XXX uap should have type `getpeername_args *' to begin with. */
1738 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1740 #endif /* COMPAT_OLDSOCK */
1743 sockargs(mp, buf, buflen, type)
1748 struct sockaddr *sa;
1752 if (buflen > MLEN) {
1753 #ifdef COMPAT_OLDSOCK
1754 if (type == MT_SONAME && buflen <= 112)
1755 buflen = MLEN; /* unix domain compat. hack */
1758 if (buflen > MCLBYTES)
1761 m = m_get2(buflen, M_WAITOK, type, 0);
1763 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1768 if (type == MT_SONAME) {
1769 sa = mtod(m, struct sockaddr *);
1771 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1772 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1773 sa->sa_family = sa->sa_len;
1775 sa->sa_len = buflen;
1782 getsockaddr(namp, uaddr, len)
1783 struct sockaddr **namp;
1787 struct sockaddr *sa;
1790 if (len > SOCK_MAXADDRLEN)
1791 return (ENAMETOOLONG);
1792 if (len < offsetof(struct sockaddr, sa_data[0]))
1794 sa = malloc(len, M_SONAME, M_WAITOK);
1795 error = copyin(uaddr, sa, len);
1799 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1800 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1801 sa->sa_family = sa->sa_len;
1809 struct sendfile_sync {
1816 * Add more references to a vm_page + sf_buf + sendfile_sync.
1819 sf_ext_ref(void *arg1, void *arg2)
1821 struct sf_buf *sf = arg1;
1822 struct sendfile_sync *sfs = arg2;
1823 vm_page_t pg = sf_buf_page(sf);
1832 mtx_lock(&sfs->mtx);
1833 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
1835 mtx_unlock(&sfs->mtx);
1840 * Detach mapped page and release resources back to the system.
1843 sf_ext_free(void *arg1, void *arg2)
1845 struct sf_buf *sf = arg1;
1846 struct sendfile_sync *sfs = arg2;
1847 vm_page_t pg = sf_buf_page(sf);
1852 vm_page_unwire(pg, PQ_INACTIVE);
1854 * Check for the object going away on us. This can
1855 * happen since we don't hold a reference to it.
1856 * If so, we're responsible for freeing the page.
1858 if (pg->wire_count == 0 && pg->object == NULL)
1863 mtx_lock(&sfs->mtx);
1864 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
1865 if (--sfs->count == 0)
1866 cv_signal(&sfs->cv);
1867 mtx_unlock(&sfs->mtx);
1874 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1875 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1877 * Send a file specified by 'fd' and starting at 'offset' to a socket
1878 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1879 * 0. Optionally add a header and/or trailer to the socket output. If
1880 * specified, write the total number of bytes sent into *sbytes.
1883 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1886 return (do_sendfile(td, uap, 0));
1890 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1892 struct sf_hdtr hdtr;
1893 struct uio *hdr_uio, *trl_uio;
1895 cap_rights_t rights;
1900 * File offset must be positive. If it goes beyond EOF
1901 * we send only the header/trailer and no payload data.
1903 if (uap->offset < 0)
1906 hdr_uio = trl_uio = NULL;
1908 if (uap->hdtr != NULL) {
1909 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1912 if (hdtr.headers != NULL) {
1913 error = copyinuio(hdtr.headers, hdtr.hdr_cnt,
1918 if (hdtr.trailers != NULL) {
1919 error = copyinuio(hdtr.trailers, hdtr.trl_cnt,
1926 AUDIT_ARG_FD(uap->fd);
1929 * sendfile(2) can start at any offset within a file so we require
1930 * CAP_READ+CAP_SEEK = CAP_PREAD.
1932 if ((error = fget_read(td, uap->fd,
1933 cap_rights_init(&rights, CAP_PREAD), &fp)) != 0) {
1937 error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
1938 uap->nbytes, &sbytes, uap->flags, compat ? SFK_COMPAT : 0, td);
1941 if (uap->sbytes != NULL)
1942 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1945 free(hdr_uio, M_IOV);
1946 free(trl_uio, M_IOV);
1950 #ifdef COMPAT_FREEBSD4
1952 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1954 struct sendfile_args args;
1958 args.offset = uap->offset;
1959 args.nbytes = uap->nbytes;
1960 args.hdtr = uap->hdtr;
1961 args.sbytes = uap->sbytes;
1962 args.flags = uap->flags;
1964 return (do_sendfile(td, &args, 1));
1966 #endif /* COMPAT_FREEBSD4 */
1969 sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd,
1970 off_t off, int xfsize, int bsize, struct thread *td, vm_page_t *res)
1975 int error, readahead, rv;
1977 pindex = OFF_TO_IDX(off);
1978 VM_OBJECT_WLOCK(obj);
1979 m = vm_page_grab(obj, pindex, (vp != NULL ? VM_ALLOC_NOBUSY |
1980 VM_ALLOC_IGN_SBUSY : 0) | VM_ALLOC_WIRED | VM_ALLOC_NORMAL);
1983 * Check if page is valid for what we need, otherwise initiate I/O.
1985 * The non-zero nd argument prevents disk I/O, instead we
1986 * return the caller what he specified in nd. In particular,
1987 * if we already turned some pages into mbufs, nd == EAGAIN
1988 * and the main function send them the pages before we come
1989 * here again and block.
1991 if (m->valid != 0 && vm_page_is_valid(m, off & PAGE_MASK, xfsize)) {
1994 VM_OBJECT_WUNLOCK(obj);
1997 } else if (nd != 0) {
2005 * Get the page from backing store.
2009 VM_OBJECT_WUNLOCK(obj);
2010 readahead = sfreadahead * MAXBSIZE;
2013 * Use vn_rdwr() instead of the pager interface for
2014 * the vnode, to allow the read-ahead.
2016 * XXXMAC: Because we don't have fp->f_cred here, we
2017 * pass in NOCRED. This is probably wrong, but is
2018 * consistent with our original implementation.
2020 error = vn_rdwr(UIO_READ, vp, NULL, readahead, trunc_page(off),
2021 UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((readahead /
2022 bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td);
2023 SFSTAT_INC(sf_iocnt);
2024 VM_OBJECT_WLOCK(obj);
2026 if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
2027 rv = vm_pager_get_pages(obj, &m, 1, 0);
2028 SFSTAT_INC(sf_iocnt);
2029 if (rv != VM_PAGER_OK) {
2038 m->valid = VM_PAGE_BITS_ALL;
2046 } else if (m != NULL) {
2049 vm_page_unwire(m, PQ_INACTIVE);
2052 * See if anyone else might know about this page. If
2053 * not and it is not valid, then free it.
2055 if (m->wire_count == 0 && m->valid == 0 && !vm_page_busied(m))
2059 KASSERT(error != 0 || (m->wire_count > 0 &&
2060 vm_page_is_valid(m, off & PAGE_MASK, xfsize)),
2061 ("wrong page state m %p off %#jx xfsize %d", m, (uintmax_t)off,
2063 VM_OBJECT_WUNLOCK(obj);
2068 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
2069 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
2075 struct shmfd *shmfd;
2078 vp = *vp_res = NULL;
2080 shmfd = *shmfd_res = NULL;
2084 * The file descriptor must be a regular file and have a
2085 * backing VM object.
2087 if (fp->f_type == DTYPE_VNODE) {
2089 vn_lock(vp, LK_SHARED | LK_RETRY);
2090 if (vp->v_type != VREG) {
2094 *bsize = vp->v_mount->mnt_stat.f_iosize;
2095 error = VOP_GETATTR(vp, &va, td->td_ucred);
2098 *obj_size = va.va_size;
2104 } else if (fp->f_type == DTYPE_SHM) {
2107 obj = shmfd->shm_object;
2108 *obj_size = shmfd->shm_size;
2114 VM_OBJECT_WLOCK(obj);
2115 if ((obj->flags & OBJ_DEAD) != 0) {
2116 VM_OBJECT_WUNLOCK(obj);
2122 * Temporarily increase the backing VM object's reference
2123 * count so that a forced reclamation of its vnode does not
2124 * immediately destroy it.
2126 vm_object_reference_locked(obj);
2127 VM_OBJECT_WUNLOCK(obj);
2139 kern_sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
2142 cap_rights_t rights;
2149 * The socket must be a stream socket and connected.
2151 error = getsock_cap(td, s, cap_rights_init(&rights, CAP_SEND),
2155 *so = (*sock_fp)->f_data;
2156 if ((*so)->so_type != SOCK_STREAM)
2158 if (((*so)->so_state & SS_ISCONNECTED) == 0)
2164 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
2165 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
2166 int kflags, struct thread *td)
2168 struct file *sock_fp;
2170 struct vm_object *obj;
2175 struct shmfd *shmfd;
2176 struct sendfile_sync *sfs;
2178 off_t off, xfsize, fsbytes, sbytes, rem, obj_size;
2179 int error, bsize, nd, hdrlen, mnw;
2186 fsbytes = sbytes = 0;
2191 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
2197 error = kern_sendfile_getsock(td, sockfd, &sock_fp, &so);
2202 * Do not wait on memory allocations but return ENOMEM for
2203 * caller to retry later.
2204 * XXX: Experimental.
2206 if (flags & SF_MNOWAIT)
2209 if (flags & SF_SYNC) {
2210 sfs = malloc(sizeof *sfs, M_TEMP, M_WAITOK | M_ZERO);
2211 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
2212 cv_init(&sfs->cv, "sendfile");
2216 error = mac_socket_check_send(td->td_ucred, so);
2221 /* If headers are specified copy them into mbufs. */
2222 if (hdr_uio != NULL) {
2223 hdr_uio->uio_td = td;
2224 hdr_uio->uio_rw = UIO_WRITE;
2225 if (hdr_uio->uio_resid > 0) {
2227 * In FBSD < 5.0 the nbytes to send also included
2228 * the header. If compat is specified subtract the
2229 * header size from nbytes.
2231 if (kflags & SFK_COMPAT) {
2232 if (nbytes > hdr_uio->uio_resid)
2233 nbytes -= hdr_uio->uio_resid;
2237 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
2240 error = mnw ? EAGAIN : ENOBUFS;
2243 hdrlen = m_length(m, NULL);
2248 * Protect against multiple writers to the socket.
2250 * XXXRW: Historically this has assumed non-interruptibility, so now
2251 * we implement that, but possibly shouldn't.
2253 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
2256 * Loop through the pages of the file, starting with the requested
2257 * offset. Get a file page (do I/O if necessary), map the file page
2258 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
2260 * This is done in two loops. The inner loop turns as many pages
2261 * as it can, up to available socket buffer space, without blocking
2262 * into mbufs to have it bulk delivered into the socket send buffer.
2263 * The outer loop checks the state and available space of the socket
2264 * and takes care of the overall progress.
2266 for (off = offset; ; ) {
2272 if ((nbytes != 0 && nbytes == fsbytes) ||
2273 (nbytes == 0 && obj_size == fsbytes))
2282 * Check the socket state for ongoing connection,
2283 * no errors and space in socket buffer.
2284 * If space is low allow for the remainder of the
2285 * file to be processed if it fits the socket buffer.
2286 * Otherwise block in waiting for sufficient space
2287 * to proceed, or if the socket is nonblocking, return
2288 * to userland with EAGAIN while reporting how far
2290 * We wait until the socket buffer has significant free
2291 * space to do bulk sends. This makes good use of file
2292 * system read ahead and allows packet segmentation
2293 * offloading hardware to take over lots of work. If
2294 * we were not careful here we would send off only one
2297 SOCKBUF_LOCK(&so->so_snd);
2298 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
2299 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
2301 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2303 SOCKBUF_UNLOCK(&so->so_snd);
2305 } else if (so->so_error) {
2306 error = so->so_error;
2308 SOCKBUF_UNLOCK(&so->so_snd);
2311 space = sbspace(&so->so_snd);
2314 space < so->so_snd.sb_lowat)) {
2315 if (so->so_state & SS_NBIO) {
2316 SOCKBUF_UNLOCK(&so->so_snd);
2321 * sbwait drops the lock while sleeping.
2322 * When we loop back to retry_space the
2323 * state may have changed and we retest
2326 error = sbwait(&so->so_snd);
2328 * An error from sbwait usually indicates that we've
2329 * been interrupted by a signal. If we've sent anything
2330 * then return bytes sent, otherwise return the error.
2333 SOCKBUF_UNLOCK(&so->so_snd);
2338 SOCKBUF_UNLOCK(&so->so_snd);
2341 * Reduce space in the socket buffer by the size of
2342 * the header mbuf chain.
2343 * hdrlen is set to 0 after the first loop.
2348 error = vn_lock(vp, LK_SHARED);
2351 error = VOP_GETATTR(vp, &va, td->td_ucred);
2352 if (error != 0 || off >= va.va_size) {
2356 obj_size = va.va_size;
2360 * Loop and construct maximum sized mbuf chain to be bulk
2361 * dumped into socket buffer.
2363 while (space > loopbytes) {
2368 * Calculate the amount to transfer.
2369 * Not to exceed a page, the EOF,
2370 * or the passed in nbytes.
2372 pgoff = (vm_offset_t)(off & PAGE_MASK);
2373 rem = obj_size - offset;
2375 rem = omin(rem, nbytes);
2376 rem -= fsbytes + loopbytes;
2377 xfsize = omin(PAGE_SIZE - pgoff, rem);
2378 xfsize = omin(space - loopbytes, xfsize);
2380 done = 1; /* all data sent */
2385 * Attempt to look up the page. Allocate
2386 * if not found or wait and loop if busy.
2389 nd = EAGAIN; /* send what we already got */
2390 else if ((flags & SF_NODISKIO) != 0)
2394 error = sendfile_readpage(obj, vp, nd, off,
2395 xfsize, bsize, td, &pg);
2397 if (error == EAGAIN)
2398 error = 0; /* not a real error */
2403 * Get a sendfile buf. When allocating the
2404 * first buffer for mbuf chain, we usually
2405 * wait as long as necessary, but this wait
2406 * can be interrupted. For consequent
2407 * buffers, do not sleep, since several
2408 * threads might exhaust the buffers and then
2411 sf = sf_buf_alloc(pg, (mnw || m != NULL) ? SFB_NOWAIT :
2414 SFSTAT_INC(sf_allocfail);
2416 vm_page_unwire(pg, PQ_INACTIVE);
2417 KASSERT(pg->object != NULL,
2418 ("%s: object disappeared", __func__));
2421 error = (mnw ? EAGAIN : EINTR);
2426 * Get an mbuf and set it up as having
2429 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
2431 error = (mnw ? EAGAIN : ENOBUFS);
2432 sf_ext_free(sf, NULL);
2436 * Attach EXT_SFBUF external storage.
2438 m0->m_ext.ext_buf = (caddr_t )sf_buf_kva(sf);
2439 m0->m_ext.ext_size = PAGE_SIZE;
2440 m0->m_ext.ext_arg1 = sf;
2441 m0->m_ext.ext_arg2 = sfs;
2442 m0->m_ext.ext_type = EXT_SFBUF;
2443 m0->m_ext.ext_flags = 0;
2444 m0->m_flags |= (M_EXT|M_RDONLY);
2445 m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
2448 /* Append to mbuf chain. */
2452 m_last(m)->m_next = m0;
2457 /* Keep track of bits processed. */
2458 loopbytes += xfsize;
2462 mtx_lock(&sfs->mtx);
2464 mtx_unlock(&sfs->mtx);
2471 /* Add the buffer chain to the socket buffer. */
2475 mlen = m_length(m, NULL);
2476 SOCKBUF_LOCK(&so->so_snd);
2477 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2479 SOCKBUF_UNLOCK(&so->so_snd);
2482 SOCKBUF_UNLOCK(&so->so_snd);
2483 CURVNET_SET(so->so_vnet);
2484 /* Avoid error aliasing. */
2485 err = (*so->so_proto->pr_usrreqs->pru_send)
2486 (so, 0, m, NULL, NULL, td);
2490 * We need two counters to get the
2491 * file offset and nbytes to send
2493 * - sbytes contains the total amount
2494 * of bytes sent, including headers.
2495 * - fsbytes contains the total amount
2496 * of bytes sent from the file.
2504 } else if (error == 0)
2506 m = NULL; /* pru_send always consumes */
2509 /* Quit outer loop on error or when we're done. */
2517 * Send trailers. Wimp out and use writev(2).
2519 if (trl_uio != NULL) {
2520 sbunlock(&so->so_snd);
2521 error = kern_writev(td, sockfd, trl_uio);
2523 sbytes += td->td_retval[0];
2528 sbunlock(&so->so_snd);
2531 * If there was no error we have to clear td->td_retval[0]
2532 * because it may have been set by writev.
2535 td->td_retval[0] = 0;
2541 vm_object_deallocate(obj);
2548 mtx_lock(&sfs->mtx);
2549 if (sfs->count != 0)
2550 cv_wait(&sfs->cv, &sfs->mtx);
2551 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
2552 cv_destroy(&sfs->cv);
2553 mtx_destroy(&sfs->mtx);
2557 if (error == ERESTART)