2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_capsicum.h"
40 #include "opt_inet6.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/capability.h>
48 #include <sys/condvar.h>
49 #include <sys/kernel.h>
51 #include <sys/mutex.h>
52 #include <sys/sysproto.h>
53 #include <sys/malloc.h>
54 #include <sys/filedesc.h>
55 #include <sys/event.h>
57 #include <sys/fcntl.h>
59 #include <sys/filio.h>
62 #include <sys/mount.h>
64 #include <sys/protosw.h>
65 #include <sys/rwlock.h>
66 #include <sys/sf_buf.h>
67 #include <sys/sf_sync.h>
68 #include <sys/sf_base.h>
69 #include <sys/sysent.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/signalvar.h>
73 #include <sys/syscallsubr.h>
74 #include <sys/sysctl.h>
76 #include <sys/vnode.h>
78 #include <sys/ktrace.h>
80 #ifdef COMPAT_FREEBSD32
81 #include <compat/freebsd32/freebsd32_util.h>
86 #include <security/audit/audit.h>
87 #include <security/mac/mac_framework.h>
90 #include <vm/vm_param.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
98 #if defined(INET) || defined(INET6)
100 #include <netinet/sctp.h>
101 #include <netinet/sctp_peeloff.h>
103 #endif /* INET || INET6 */
106 * Flags for accept1() and kern_accept4(), in addition to SOCK_CLOEXEC
109 #define ACCEPT4_INHERIT 0x1
110 #define ACCEPT4_COMPAT 0x2
112 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
113 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
115 static int accept1(struct thread *td, int s, struct sockaddr *uname,
116 socklen_t *anamelen, int flags);
117 static int do_sendfile(struct thread *td, struct sendfile_args *uap,
119 static int getsockname1(struct thread *td, struct getsockname_args *uap,
121 static int getpeername1(struct thread *td, struct getpeername_args *uap,
124 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
127 * sendfile(2)-related variables and associated sysctls
129 static SYSCTL_NODE(_kern_ipc, OID_AUTO, sendfile, CTLFLAG_RW, 0,
130 "sendfile(2) tunables");
131 static int sfreadahead = 1;
132 SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, readahead, CTLFLAG_RW,
133 &sfreadahead, 0, "Number of sendfile(2) read-ahead MAXBSIZE blocks");
135 static uma_zone_t zone_sfsync;
138 sfstat_init(const void *unused)
141 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
144 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
147 sf_sync_init(const void *unused)
150 zone_sfsync = uma_zcreate("sendfile_sync", sizeof(struct sendfile_sync),
156 SYSINIT(sf_sync, SI_SUB_MBUF, SI_ORDER_FIRST, sf_sync_init, NULL);
159 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
163 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
165 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
166 return (SYSCTL_OUT(req, &s, sizeof(s)));
168 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
169 NULL, 0, sfstat_sysctl, "I", "sendfile statistics");
172 * Convert a user file descriptor to a kernel file entry and check if required
173 * capability rights are present.
174 * A reference on the file entry is held upon returning.
177 getsock_cap(struct filedesc *fdp, int fd, cap_rights_t *rightsp,
178 struct file **fpp, u_int *fflagp)
183 error = fget_unlocked(fdp, fd, rightsp, 0, &fp, NULL);
186 if (fp->f_type != DTYPE_SOCKET) {
187 fdrop(fp, curthread);
191 *fflagp = fp->f_flag;
197 * System call interface to the socket abstraction.
199 #if defined(COMPAT_43)
200 #define COMPAT_OLDSOCK
206 struct socket_args /* {
214 int fd, error, type, oflag, fflag;
216 AUDIT_ARG_SOCKET(uap->domain, uap->type, uap->protocol);
221 if ((type & SOCK_CLOEXEC) != 0) {
222 type &= ~SOCK_CLOEXEC;
225 if ((type & SOCK_NONBLOCK) != 0) {
226 type &= ~SOCK_NONBLOCK;
231 error = mac_socket_check_create(td->td_ucred, uap->domain, type,
236 error = falloc(td, &fp, &fd, oflag);
239 /* An extra reference on `fp' has been held for us by falloc(). */
240 error = socreate(uap->domain, &so, type, uap->protocol,
243 fdclose(td->td_proc->p_fd, fp, fd, td);
245 finit(fp, FREAD | FWRITE | fflag, DTYPE_SOCKET, so, &socketops);
246 if ((fflag & FNONBLOCK) != 0)
247 (void) fo_ioctl(fp, FIONBIO, &fflag, td->td_ucred, td);
248 td->td_retval[0] = fd;
258 struct bind_args /* {
267 error = getsockaddr(&sa, uap->name, uap->namelen);
269 error = kern_bind(td, uap->s, sa);
276 kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
284 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
285 error = getsock_cap(td->td_proc->p_fd, fd,
286 cap_rights_init(&rights, CAP_BIND), &fp, NULL);
291 if (KTRPOINT(td, KTR_STRUCT))
295 error = mac_socket_check_bind(td->td_ucred, so, sa);
298 if (dirfd == AT_FDCWD)
299 error = sobind(so, sa, td);
301 error = sobindat(dirfd, so, sa, td);
310 kern_bind(struct thread *td, int fd, struct sockaddr *sa)
313 return (kern_bindat(td, AT_FDCWD, fd, sa));
320 struct bindat_args /* {
330 error = getsockaddr(&sa, uap->name, uap->namelen);
332 error = kern_bindat(td, uap->fd, uap->s, sa);
342 struct listen_args /* {
352 AUDIT_ARG_FD(uap->s);
353 error = getsock_cap(td->td_proc->p_fd, uap->s,
354 cap_rights_init(&rights, CAP_LISTEN), &fp, NULL);
358 error = mac_socket_check_listen(td->td_ucred, so);
361 error = solisten(so, uap->backlog, td);
371 accept1(td, s, uname, anamelen, flags)
374 struct sockaddr *uname;
378 struct sockaddr *name;
384 return (kern_accept4(td, s, NULL, NULL, flags, NULL));
386 error = copyin(anamelen, &namelen, sizeof (namelen));
390 error = kern_accept4(td, s, &name, &namelen, flags, &fp);
393 * return a namelen of zero for older code which might
394 * ignore the return value from accept.
397 (void) copyout(&namelen, anamelen, sizeof(*anamelen));
401 if (error == 0 && uname != NULL) {
402 #ifdef COMPAT_OLDSOCK
403 if (flags & ACCEPT4_COMPAT)
404 ((struct osockaddr *)name)->sa_family =
407 error = copyout(name, uname, namelen);
410 error = copyout(&namelen, anamelen,
413 fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
415 free(name, M_SONAME);
420 kern_accept(struct thread *td, int s, struct sockaddr **name,
421 socklen_t *namelen, struct file **fp)
423 return (kern_accept4(td, s, name, namelen, ACCEPT4_INHERIT, fp));
427 kern_accept4(struct thread *td, int s, struct sockaddr **name,
428 socklen_t *namelen, int flags, struct file **fp)
430 struct filedesc *fdp;
431 struct file *headfp, *nfp = NULL;
432 struct sockaddr *sa = NULL;
433 struct socket *head, *so;
443 fdp = td->td_proc->p_fd;
444 error = getsock_cap(fdp, s, cap_rights_init(&rights, CAP_ACCEPT),
448 head = headfp->f_data;
449 if ((head->so_options & SO_ACCEPTCONN) == 0) {
454 error = mac_socket_check_accept(td->td_ucred, head);
458 error = falloc(td, &nfp, &fd, (flags & SOCK_CLOEXEC) ? O_CLOEXEC : 0);
462 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
467 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
468 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
469 head->so_error = ECONNABORTED;
472 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
479 if (head->so_error) {
480 error = head->so_error;
485 so = TAILQ_FIRST(&head->so_comp);
486 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
487 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
490 * Before changing the flags on the socket, we have to bump the
491 * reference count. Otherwise, if the protocol calls sofree(),
492 * the socket will be released due to a zero refcount.
494 SOCK_LOCK(so); /* soref() and so_state update */
495 soref(so); /* file descriptor reference */
497 TAILQ_REMOVE(&head->so_comp, so, so_list);
499 if (flags & ACCEPT4_INHERIT)
500 so->so_state |= (head->so_state & SS_NBIO);
502 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
503 so->so_qstate &= ~SQ_COMP;
509 /* An extra reference on `nfp' has been held for us by falloc(). */
510 td->td_retval[0] = fd;
512 /* connection has been removed from the listen queue */
513 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
515 if (flags & ACCEPT4_INHERIT) {
516 pgid = fgetown(&head->so_sigio);
518 fsetown(pgid, &so->so_sigio);
520 fflag &= ~(FNONBLOCK | FASYNC);
521 if (flags & SOCK_NONBLOCK)
525 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
526 /* Sync socket nonblocking/async state with file flags */
527 tmp = fflag & FNONBLOCK;
528 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
529 tmp = fflag & FASYNC;
530 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
532 error = soaccept(so, &sa);
535 * return a namelen of zero for older code which might
536 * ignore the return value from accept.
547 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, sa);
549 /* check sa_len before it is destroyed */
550 if (*namelen > sa->sa_len)
551 *namelen = sa->sa_len;
553 if (KTRPOINT(td, KTR_STRUCT))
563 * close the new descriptor, assuming someone hasn't ripped it
567 fdclose(fdp, nfp, fd, td);
570 * Release explicitly held references before returning. We return
571 * a reference on nfp to the caller on success if they request it.
590 struct accept_args *uap;
593 return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT));
599 struct accept4_args *uap;
602 if (uap->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
605 return (accept1(td, uap->s, uap->name, uap->anamelen, uap->flags));
608 #ifdef COMPAT_OLDSOCK
612 struct accept_args *uap;
615 return (accept1(td, uap->s, uap->name, uap->anamelen,
616 ACCEPT4_INHERIT | ACCEPT4_COMPAT));
618 #endif /* COMPAT_OLDSOCK */
624 struct connect_args /* {
633 error = getsockaddr(&sa, uap->name, uap->namelen);
635 error = kern_connect(td, uap->s, sa);
642 kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
647 int error, interrupted = 0;
650 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
651 error = getsock_cap(td->td_proc->p_fd, fd,
652 cap_rights_init(&rights, CAP_CONNECT), &fp, NULL);
656 if (so->so_state & SS_ISCONNECTING) {
661 if (KTRPOINT(td, KTR_STRUCT))
665 error = mac_socket_check_connect(td->td_ucred, so, sa);
669 if (dirfd == AT_FDCWD)
670 error = soconnect(so, sa, td);
672 error = soconnectat(dirfd, so, sa, td);
675 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
680 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
681 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
684 if (error == EINTR || error == ERESTART)
690 error = so->so_error;
696 so->so_state &= ~SS_ISCONNECTING;
697 if (error == ERESTART)
705 kern_connect(struct thread *td, int fd, struct sockaddr *sa)
708 return (kern_connectat(td, AT_FDCWD, fd, sa));
713 sys_connectat(td, uap)
715 struct connectat_args /* {
725 error = getsockaddr(&sa, uap->name, uap->namelen);
727 error = kern_connectat(td, uap->fd, uap->s, sa);
734 kern_socketpair(struct thread *td, int domain, int type, int protocol,
737 struct filedesc *fdp = td->td_proc->p_fd;
738 struct file *fp1, *fp2;
739 struct socket *so1, *so2;
740 int fd, error, oflag, fflag;
742 AUDIT_ARG_SOCKET(domain, type, protocol);
746 if ((type & SOCK_CLOEXEC) != 0) {
747 type &= ~SOCK_CLOEXEC;
750 if ((type & SOCK_NONBLOCK) != 0) {
751 type &= ~SOCK_NONBLOCK;
755 /* We might want to have a separate check for socket pairs. */
756 error = mac_socket_check_create(td->td_ucred, domain, type,
761 error = socreate(domain, &so1, type, protocol, td->td_ucred, td);
764 error = socreate(domain, &so2, type, protocol, td->td_ucred, td);
767 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
768 error = falloc(td, &fp1, &fd, oflag);
772 fp1->f_data = so1; /* so1 already has ref count */
773 error = falloc(td, &fp2, &fd, oflag);
776 fp2->f_data = so2; /* so2 already has ref count */
778 error = soconnect2(so1, so2);
781 if (type == SOCK_DGRAM) {
783 * Datagram socket connection is asymmetric.
785 error = soconnect2(so2, so1);
789 finit(fp1, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp1->f_data,
791 finit(fp2, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp2->f_data,
793 if ((fflag & FNONBLOCK) != 0) {
794 (void) fo_ioctl(fp1, FIONBIO, &fflag, td->td_ucred, td);
795 (void) fo_ioctl(fp2, FIONBIO, &fflag, td->td_ucred, td);
801 fdclose(fdp, fp2, rsv[1], td);
804 fdclose(fdp, fp1, rsv[0], td);
816 sys_socketpair(struct thread *td, struct socketpair_args *uap)
820 error = kern_socketpair(td, uap->domain, uap->type,
824 error = copyout(sv, uap->rsv, 2 * sizeof(int));
826 (void)kern_close(td, sv[0]);
827 (void)kern_close(td, sv[1]);
833 sendit(td, s, mp, flags)
839 struct mbuf *control;
843 #ifdef CAPABILITY_MODE
844 if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL))
848 if (mp->msg_name != NULL) {
849 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
859 if (mp->msg_control) {
860 if (mp->msg_controllen < sizeof(struct cmsghdr)
861 #ifdef COMPAT_OLDSOCK
862 && mp->msg_flags != MSG_COMPAT
868 error = sockargs(&control, mp->msg_control,
869 mp->msg_controllen, MT_CONTROL);
872 #ifdef COMPAT_OLDSOCK
873 if (mp->msg_flags == MSG_COMPAT) {
876 M_PREPEND(control, sizeof(*cm), M_WAITOK);
877 cm = mtod(control, struct cmsghdr *);
878 cm->cmsg_len = control->m_len;
879 cm->cmsg_level = SOL_SOCKET;
880 cm->cmsg_type = SCM_RIGHTS;
887 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
895 kern_sendit(td, s, mp, flags, control, segflg)
900 struct mbuf *control;
909 struct uio *ktruio = NULL;
915 cap_rights_init(&rights, CAP_SEND);
916 if (mp->msg_name != NULL) {
917 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name);
918 cap_rights_set(&rights, CAP_CONNECT);
920 error = getsock_cap(td->td_proc->p_fd, s, &rights, &fp, NULL);
923 so = (struct socket *)fp->f_data;
926 if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT))
927 ktrsockaddr(mp->msg_name);
930 if (mp->msg_name != NULL) {
931 error = mac_socket_check_connect(td->td_ucred, so,
936 error = mac_socket_check_send(td->td_ucred, so);
941 auio.uio_iov = mp->msg_iov;
942 auio.uio_iovcnt = mp->msg_iovlen;
943 auio.uio_segflg = segflg;
944 auio.uio_rw = UIO_WRITE;
946 auio.uio_offset = 0; /* XXX */
949 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
950 if ((auio.uio_resid += iov->iov_len) < 0) {
956 if (KTRPOINT(td, KTR_GENIO))
957 ktruio = cloneuio(&auio);
959 len = auio.uio_resid;
960 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
962 if (auio.uio_resid != len && (error == ERESTART ||
963 error == EINTR || error == EWOULDBLOCK))
965 /* Generation of SIGPIPE can be controlled per socket */
966 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
967 !(flags & MSG_NOSIGNAL)) {
968 PROC_LOCK(td->td_proc);
969 tdsignal(td, SIGPIPE);
970 PROC_UNLOCK(td->td_proc);
974 td->td_retval[0] = len - auio.uio_resid;
976 if (ktruio != NULL) {
977 ktruio->uio_resid = td->td_retval[0];
978 ktrgenio(s, UIO_WRITE, ktruio, error);
989 struct sendto_args /* {
1001 msg.msg_name = uap->to;
1002 msg.msg_namelen = uap->tolen;
1003 msg.msg_iov = &aiov;
1005 msg.msg_control = 0;
1006 #ifdef COMPAT_OLDSOCK
1009 aiov.iov_base = uap->buf;
1010 aiov.iov_len = uap->len;
1011 return (sendit(td, uap->s, &msg, uap->flags));
1014 #ifdef COMPAT_OLDSOCK
1018 struct osend_args /* {
1029 msg.msg_namelen = 0;
1030 msg.msg_iov = &aiov;
1032 aiov.iov_base = uap->buf;
1033 aiov.iov_len = uap->len;
1034 msg.msg_control = 0;
1036 return (sendit(td, uap->s, &msg, uap->flags));
1042 struct osendmsg_args /* {
1052 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1055 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1059 msg.msg_flags = MSG_COMPAT;
1060 error = sendit(td, uap->s, &msg, uap->flags);
1067 sys_sendmsg(td, uap)
1069 struct sendmsg_args /* {
1079 error = copyin(uap->msg, &msg, sizeof (msg));
1082 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1086 #ifdef COMPAT_OLDSOCK
1089 error = sendit(td, uap->s, &msg, uap->flags);
1095 kern_recvit(td, s, mp, fromseg, controlp)
1099 enum uio_seg fromseg;
1100 struct mbuf **controlp;
1104 struct mbuf *m, *control = NULL;
1108 struct sockaddr *fromsa = NULL;
1109 cap_rights_t rights;
1111 struct uio *ktruio = NULL;
1116 if (controlp != NULL)
1120 error = getsock_cap(td->td_proc->p_fd, s,
1121 cap_rights_init(&rights, CAP_RECV), &fp, NULL);
1127 error = mac_socket_check_receive(td->td_ucred, so);
1134 auio.uio_iov = mp->msg_iov;
1135 auio.uio_iovcnt = mp->msg_iovlen;
1136 auio.uio_segflg = UIO_USERSPACE;
1137 auio.uio_rw = UIO_READ;
1139 auio.uio_offset = 0; /* XXX */
1142 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
1143 if ((auio.uio_resid += iov->iov_len) < 0) {
1149 if (KTRPOINT(td, KTR_GENIO))
1150 ktruio = cloneuio(&auio);
1152 len = auio.uio_resid;
1153 error = soreceive(so, &fromsa, &auio, NULL,
1154 (mp->msg_control || controlp) ? &control : NULL,
1157 if (auio.uio_resid != len && (error == ERESTART ||
1158 error == EINTR || error == EWOULDBLOCK))
1162 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, fromsa);
1164 if (ktruio != NULL) {
1165 ktruio->uio_resid = len - auio.uio_resid;
1166 ktrgenio(s, UIO_READ, ktruio, error);
1171 td->td_retval[0] = len - auio.uio_resid;
1173 len = mp->msg_namelen;
1174 if (len <= 0 || fromsa == NULL)
1177 /* save sa_len before it is destroyed by MSG_COMPAT */
1178 len = MIN(len, fromsa->sa_len);
1179 #ifdef COMPAT_OLDSOCK
1180 if (mp->msg_flags & MSG_COMPAT)
1181 ((struct osockaddr *)fromsa)->sa_family =
1184 if (fromseg == UIO_USERSPACE) {
1185 error = copyout(fromsa, mp->msg_name,
1190 bcopy(fromsa, mp->msg_name, len);
1192 mp->msg_namelen = len;
1194 if (mp->msg_control && controlp == NULL) {
1195 #ifdef COMPAT_OLDSOCK
1197 * We assume that old recvmsg calls won't receive access
1198 * rights and other control info, esp. as control info
1199 * is always optional and those options didn't exist in 4.3.
1200 * If we receive rights, trim the cmsghdr; anything else
1203 if (control && mp->msg_flags & MSG_COMPAT) {
1204 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1206 mtod(control, struct cmsghdr *)->cmsg_type !=
1208 mp->msg_controllen = 0;
1211 control->m_len -= sizeof (struct cmsghdr);
1212 control->m_data += sizeof (struct cmsghdr);
1215 len = mp->msg_controllen;
1217 mp->msg_controllen = 0;
1218 ctlbuf = mp->msg_control;
1220 while (m && len > 0) {
1221 unsigned int tocopy;
1223 if (len >= m->m_len)
1226 mp->msg_flags |= MSG_CTRUNC;
1230 if ((error = copyout(mtod(m, caddr_t),
1231 ctlbuf, tocopy)) != 0)
1238 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1243 if (fromsa && KTRPOINT(td, KTR_STRUCT))
1244 ktrsockaddr(fromsa);
1246 free(fromsa, M_SONAME);
1248 if (error == 0 && controlp != NULL)
1249 *controlp = control;
1257 recvit(td, s, mp, namelenp)
1265 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1268 if (namelenp != NULL) {
1269 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1270 #ifdef COMPAT_OLDSOCK
1271 if (mp->msg_flags & MSG_COMPAT)
1272 error = 0; /* old recvfrom didn't check */
1279 sys_recvfrom(td, uap)
1281 struct recvfrom_args /* {
1286 struct sockaddr * __restrict from;
1287 socklen_t * __restrict fromlenaddr;
1294 if (uap->fromlenaddr) {
1295 error = copyin(uap->fromlenaddr,
1296 &msg.msg_namelen, sizeof (msg.msg_namelen));
1300 msg.msg_namelen = 0;
1302 msg.msg_name = uap->from;
1303 msg.msg_iov = &aiov;
1305 aiov.iov_base = uap->buf;
1306 aiov.iov_len = uap->len;
1307 msg.msg_control = 0;
1308 msg.msg_flags = uap->flags;
1309 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1314 #ifdef COMPAT_OLDSOCK
1318 struct recvfrom_args *uap;
1321 uap->flags |= MSG_COMPAT;
1322 return (sys_recvfrom(td, uap));
1326 #ifdef COMPAT_OLDSOCK
1330 struct orecv_args /* {
1341 msg.msg_namelen = 0;
1342 msg.msg_iov = &aiov;
1344 aiov.iov_base = uap->buf;
1345 aiov.iov_len = uap->len;
1346 msg.msg_control = 0;
1347 msg.msg_flags = uap->flags;
1348 return (recvit(td, uap->s, &msg, NULL));
1352 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1353 * overlays the new one, missing only the flags, and with the (old) access
1354 * rights where the control fields are now.
1359 struct orecvmsg_args /* {
1361 struct omsghdr *msg;
1369 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1372 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1375 msg.msg_flags = uap->flags | MSG_COMPAT;
1377 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1378 if (msg.msg_controllen && error == 0)
1379 error = copyout(&msg.msg_controllen,
1380 &uap->msg->msg_accrightslen, sizeof (int));
1387 sys_recvmsg(td, uap)
1389 struct recvmsg_args /* {
1396 struct iovec *uiov, *iov;
1399 error = copyin(uap->msg, &msg, sizeof (msg));
1402 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1405 msg.msg_flags = uap->flags;
1406 #ifdef COMPAT_OLDSOCK
1407 msg.msg_flags &= ~MSG_COMPAT;
1411 error = recvit(td, uap->s, &msg, NULL);
1414 error = copyout(&msg, uap->msg, sizeof(msg));
1422 sys_shutdown(td, uap)
1424 struct shutdown_args /* {
1431 cap_rights_t rights;
1434 AUDIT_ARG_FD(uap->s);
1435 error = getsock_cap(td->td_proc->p_fd, uap->s,
1436 cap_rights_init(&rights, CAP_SHUTDOWN), &fp, NULL);
1439 error = soshutdown(so, uap->how);
1447 sys_setsockopt(td, uap)
1449 struct setsockopt_args /* {
1458 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1459 uap->val, UIO_USERSPACE, uap->valsize));
1463 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1469 enum uio_seg valseg;
1474 struct sockopt sopt;
1475 cap_rights_t rights;
1478 if (val == NULL && valsize != 0)
1480 if ((int)valsize < 0)
1483 sopt.sopt_dir = SOPT_SET;
1484 sopt.sopt_level = level;
1485 sopt.sopt_name = name;
1486 sopt.sopt_val = val;
1487 sopt.sopt_valsize = valsize;
1493 sopt.sopt_td = NULL;
1496 panic("kern_setsockopt called with bad valseg");
1500 error = getsock_cap(td->td_proc->p_fd, s,
1501 cap_rights_init(&rights, CAP_SETSOCKOPT), &fp, NULL);
1504 error = sosetopt(so, &sopt);
1512 sys_getsockopt(td, uap)
1514 struct getsockopt_args /* {
1518 void * __restrict val;
1519 socklen_t * __restrict avalsize;
1526 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1531 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1532 uap->val, UIO_USERSPACE, &valsize);
1535 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1540 * Kernel version of getsockopt.
1541 * optval can be a userland or userspace. optlen is always a kernel pointer.
1544 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1550 enum uio_seg valseg;
1555 struct sockopt sopt;
1556 cap_rights_t rights;
1561 if ((int)*valsize < 0)
1564 sopt.sopt_dir = SOPT_GET;
1565 sopt.sopt_level = level;
1566 sopt.sopt_name = name;
1567 sopt.sopt_val = val;
1568 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1574 sopt.sopt_td = NULL;
1577 panic("kern_getsockopt called with bad valseg");
1581 error = getsock_cap(td->td_proc->p_fd, s,
1582 cap_rights_init(&rights, CAP_GETSOCKOPT), &fp, NULL);
1585 error = sogetopt(so, &sopt);
1586 *valsize = sopt.sopt_valsize;
1593 * getsockname1() - Get socket name.
1597 getsockname1(td, uap, compat)
1599 struct getsockname_args /* {
1601 struct sockaddr * __restrict asa;
1602 socklen_t * __restrict alen;
1606 struct sockaddr *sa;
1610 error = copyin(uap->alen, &len, sizeof(len));
1614 error = kern_getsockname(td, uap->fdes, &sa, &len);
1619 #ifdef COMPAT_OLDSOCK
1621 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1623 error = copyout(sa, uap->asa, (u_int)len);
1627 error = copyout(&len, uap->alen, sizeof(len));
1632 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1637 cap_rights_t rights;
1642 error = getsock_cap(td->td_proc->p_fd, fd,
1643 cap_rights_init(&rights, CAP_GETSOCKNAME), &fp, NULL);
1648 CURVNET_SET(so->so_vnet);
1649 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1656 len = MIN(*alen, (*sa)->sa_len);
1659 if (KTRPOINT(td, KTR_STRUCT))
1664 if (error != 0 && *sa != NULL) {
1665 free(*sa, M_SONAME);
1672 sys_getsockname(td, uap)
1674 struct getsockname_args *uap;
1677 return (getsockname1(td, uap, 0));
1680 #ifdef COMPAT_OLDSOCK
1682 ogetsockname(td, uap)
1684 struct getsockname_args *uap;
1687 return (getsockname1(td, uap, 1));
1689 #endif /* COMPAT_OLDSOCK */
1692 * getpeername1() - Get name of peer for connected socket.
1696 getpeername1(td, uap, compat)
1698 struct getpeername_args /* {
1700 struct sockaddr * __restrict asa;
1701 socklen_t * __restrict alen;
1705 struct sockaddr *sa;
1709 error = copyin(uap->alen, &len, sizeof (len));
1713 error = kern_getpeername(td, uap->fdes, &sa, &len);
1718 #ifdef COMPAT_OLDSOCK
1720 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1722 error = copyout(sa, uap->asa, (u_int)len);
1726 error = copyout(&len, uap->alen, sizeof(len));
1731 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1736 cap_rights_t rights;
1741 error = getsock_cap(td->td_proc->p_fd, fd,
1742 cap_rights_init(&rights, CAP_GETPEERNAME), &fp, NULL);
1746 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1751 CURVNET_SET(so->so_vnet);
1752 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1759 len = MIN(*alen, (*sa)->sa_len);
1762 if (KTRPOINT(td, KTR_STRUCT))
1766 if (error != 0 && *sa != NULL) {
1767 free(*sa, M_SONAME);
1776 sys_getpeername(td, uap)
1778 struct getpeername_args *uap;
1781 return (getpeername1(td, uap, 0));
1784 #ifdef COMPAT_OLDSOCK
1786 ogetpeername(td, uap)
1788 struct ogetpeername_args *uap;
1791 /* XXX uap should have type `getpeername_args *' to begin with. */
1792 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1794 #endif /* COMPAT_OLDSOCK */
1797 sockargs(mp, buf, buflen, type)
1802 struct sockaddr *sa;
1806 if (buflen > MLEN) {
1807 #ifdef COMPAT_OLDSOCK
1808 if (type == MT_SONAME && buflen <= 112)
1809 buflen = MLEN; /* unix domain compat. hack */
1812 if (buflen > MCLBYTES)
1815 m = m_get2(buflen, M_WAITOK, type, 0);
1817 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1822 if (type == MT_SONAME) {
1823 sa = mtod(m, struct sockaddr *);
1825 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1826 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1827 sa->sa_family = sa->sa_len;
1829 sa->sa_len = buflen;
1836 getsockaddr(namp, uaddr, len)
1837 struct sockaddr **namp;
1841 struct sockaddr *sa;
1844 if (len > SOCK_MAXADDRLEN)
1845 return (ENAMETOOLONG);
1846 if (len < offsetof(struct sockaddr, sa_data[0]))
1848 sa = malloc(len, M_SONAME, M_WAITOK);
1849 error = copyin(uaddr, sa, len);
1853 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1854 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1855 sa->sa_family = sa->sa_len;
1864 * Detach mapped page and release resources back to the system.
1867 sf_buf_mext(struct mbuf *mb, void *addr, void *args)
1870 struct sendfile_sync *sfs;
1872 m = sf_buf_page(args);
1875 vm_page_unwire(m, 0);
1877 * Check for the object going away on us. This can
1878 * happen since we don't hold a reference to it.
1879 * If so, we're responsible for freeing the page.
1881 if (m->wire_count == 0 && m->object == NULL)
1888 return (EXT_FREE_OK);
1892 sf_sync_deref(struct sendfile_sync *sfs)
1898 mtx_lock(&sfs->mtx);
1899 KASSERT(sfs->count> 0, ("Sendfile sync botchup count == 0"));
1900 if (--sfs->count == 0)
1901 cv_signal(&sfs->cv);
1902 mtx_unlock(&sfs->mtx);
1906 * Allocate a sendfile_sync state structure.
1908 * For now this only knows about the "sleep" sync, but later it will
1909 * grow various other personalities.
1911 struct sendfile_sync *
1912 sf_sync_alloc(uint32_t flags)
1914 struct sendfile_sync *sfs;
1916 sfs = uma_zalloc(zone_sfsync, M_WAITOK | M_ZERO);
1917 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
1918 cv_init(&sfs->cv, "sendfile");
1925 * Take a reference to a sfsync instance.
1927 * This has to map 1:1 to free calls coming in via sf_buf_mext(),
1928 * so typically this will be referenced once for each mbuf allocated.
1931 sf_sync_ref(struct sendfile_sync *sfs)
1937 mtx_lock(&sfs->mtx);
1939 mtx_unlock(&sfs->mtx);
1943 sf_sync_syscall_wait(struct sendfile_sync *sfs)
1949 mtx_lock(&sfs->mtx);
1950 if (sfs->count != 0)
1951 cv_wait(&sfs->cv, &sfs->mtx);
1952 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
1953 mtx_unlock(&sfs->mtx);
1957 sf_sync_free(struct sendfile_sync *sfs)
1964 * XXX we should ensure that nothing else has this
1965 * locked before freeing.
1967 mtx_lock(&sfs->mtx);
1968 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
1969 cv_destroy(&sfs->cv);
1970 mtx_destroy(&sfs->mtx);
1971 uma_zfree(zone_sfsync, sfs);
1977 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1978 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1980 * Send a file specified by 'fd' and starting at 'offset' to a socket
1981 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1982 * 0. Optionally add a header and/or trailer to the socket output. If
1983 * specified, write the total number of bytes sent into *sbytes.
1986 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1989 return (do_sendfile(td, uap, 0));
1993 _do_sendfile(struct thread *td, int src_fd, int sock_fd, int flags,
1994 int compat, off_t offset, size_t nbytes, off_t *sbytes,
1995 struct uio *hdr_uio, struct uio *trl_uio)
1997 cap_rights_t rights;
1998 struct sendfile_sync *sfs = NULL;
2002 AUDIT_ARG_FD(src_fd);
2005 * sendfile(2) can start at any offset within a file so we require
2006 * CAP_READ+CAP_SEEK = CAP_PREAD.
2008 if ((error = fget_read(td, src_fd,
2009 cap_rights_init(&rights, CAP_PREAD), &fp)) != 0) {
2014 * If we need to wait for completion, initialise the sfsync
2017 if (flags & SF_SYNC)
2018 sfs = sf_sync_alloc(flags & SF_SYNC);
2020 error = fo_sendfile(fp, sock_fd, hdr_uio, trl_uio, offset,
2021 nbytes, sbytes, flags, compat ? SFK_COMPAT : 0, sfs, td);
2024 * If appropriate, do the wait and free here.
2027 sf_sync_syscall_wait(sfs);
2032 * XXX Should we wait until the send has completed before freeing the source
2033 * file handle? It's the previous behaviour, sure, but is it required?
2034 * We've wired down the page references after all.
2043 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
2045 struct sf_hdtr hdtr;
2046 struct uio *hdr_uio, *trl_uio;
2051 * File offset must be positive. If it goes beyond EOF
2052 * we send only the header/trailer and no payload data.
2054 if (uap->offset < 0)
2057 hdr_uio = trl_uio = NULL;
2059 if (uap->hdtr != NULL) {
2060 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
2063 if (hdtr.headers != NULL) {
2064 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
2068 if (hdtr.trailers != NULL) {
2069 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
2075 error = _do_sendfile(td, uap->fd, uap->s, uap->flags, compat,
2076 uap->offset, uap->nbytes, &sbytes, hdr_uio, trl_uio);
2078 if (uap->sbytes != NULL) {
2079 copyout(&sbytes, uap->sbytes, sizeof(off_t));
2082 free(hdr_uio, M_IOV);
2083 free(trl_uio, M_IOV);
2087 #ifdef COMPAT_FREEBSD4
2089 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
2091 struct sendfile_args args;
2095 args.offset = uap->offset;
2096 args.nbytes = uap->nbytes;
2097 args.hdtr = uap->hdtr;
2098 args.sbytes = uap->sbytes;
2099 args.flags = uap->flags;
2101 return (do_sendfile(td, &args, 1));
2103 #endif /* COMPAT_FREEBSD4 */
2106 sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd,
2107 off_t off, int xfsize, int bsize, struct thread *td, vm_page_t *res)
2112 int error, readahead, rv;
2114 pindex = OFF_TO_IDX(off);
2115 VM_OBJECT_WLOCK(obj);
2116 m = vm_page_grab(obj, pindex, (vp != NULL ? VM_ALLOC_NOBUSY |
2117 VM_ALLOC_IGN_SBUSY : 0) | VM_ALLOC_WIRED | VM_ALLOC_NORMAL);
2120 * Check if page is valid for what we need, otherwise initiate I/O.
2122 * The non-zero nd argument prevents disk I/O, instead we
2123 * return the caller what he specified in nd. In particular,
2124 * if we already turned some pages into mbufs, nd == EAGAIN
2125 * and the main function send them the pages before we come
2126 * here again and block.
2128 if (m->valid != 0 && vm_page_is_valid(m, off & PAGE_MASK, xfsize)) {
2131 VM_OBJECT_WUNLOCK(obj);
2134 } else if (nd != 0) {
2142 * Get the page from backing store.
2146 VM_OBJECT_WUNLOCK(obj);
2147 readahead = sfreadahead * MAXBSIZE;
2150 * Use vn_rdwr() instead of the pager interface for
2151 * the vnode, to allow the read-ahead.
2153 * XXXMAC: Because we don't have fp->f_cred here, we
2154 * pass in NOCRED. This is probably wrong, but is
2155 * consistent with our original implementation.
2157 error = vn_rdwr(UIO_READ, vp, NULL, readahead, trunc_page(off),
2158 UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((readahead /
2159 bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td);
2160 SFSTAT_INC(sf_iocnt);
2161 VM_OBJECT_WLOCK(obj);
2163 if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
2164 rv = vm_pager_get_pages(obj, &m, 1, 0);
2165 SFSTAT_INC(sf_iocnt);
2166 m = vm_page_lookup(obj, pindex);
2169 else if (rv != VM_PAGER_OK) {
2178 m->valid = VM_PAGE_BITS_ALL;
2186 } else if (m != NULL) {
2189 vm_page_unwire(m, 0);
2192 * See if anyone else might know about this page. If
2193 * not and it is not valid, then free it.
2195 if (m->wire_count == 0 && m->valid == 0 && !vm_page_busied(m))
2199 KASSERT(error != 0 || (m->wire_count > 0 &&
2200 vm_page_is_valid(m, off & PAGE_MASK, xfsize)),
2201 ("wrong page state m %p off %#jx xfsize %d", m, (uintmax_t)off,
2203 VM_OBJECT_WUNLOCK(obj);
2208 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
2209 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
2215 struct shmfd *shmfd;
2218 vp = *vp_res = NULL;
2220 shmfd = *shmfd_res = NULL;
2224 * The file descriptor must be a regular file and have a
2225 * backing VM object.
2227 if (fp->f_type == DTYPE_VNODE) {
2229 vn_lock(vp, LK_SHARED | LK_RETRY);
2230 if (vp->v_type != VREG) {
2234 *bsize = vp->v_mount->mnt_stat.f_iosize;
2235 error = VOP_GETATTR(vp, &va, td->td_ucred);
2238 *obj_size = va.va_size;
2244 } else if (fp->f_type == DTYPE_SHM) {
2246 obj = shmfd->shm_object;
2247 *obj_size = shmfd->shm_size;
2253 VM_OBJECT_WLOCK(obj);
2254 if ((obj->flags & OBJ_DEAD) != 0) {
2255 VM_OBJECT_WUNLOCK(obj);
2261 * Temporarily increase the backing VM object's reference
2262 * count so that a forced reclamation of its vnode does not
2263 * immediately destroy it.
2265 vm_object_reference_locked(obj);
2266 VM_OBJECT_WUNLOCK(obj);
2278 kern_sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
2281 cap_rights_t rights;
2288 * The socket must be a stream socket and connected.
2290 error = getsock_cap(td->td_proc->p_fd, s, cap_rights_init(&rights,
2291 CAP_SEND), sock_fp, NULL);
2294 *so = (*sock_fp)->f_data;
2295 if ((*so)->so_type != SOCK_STREAM)
2297 if (((*so)->so_state & SS_ISCONNECTED) == 0)
2303 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
2304 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
2305 int kflags, struct sendfile_sync *sfs, struct thread *td)
2307 struct file *sock_fp;
2309 struct vm_object *obj;
2314 struct shmfd *shmfd;
2316 off_t off, xfsize, fsbytes, sbytes, rem, obj_size;
2317 int error, bsize, nd, hdrlen, mnw;
2323 fsbytes = sbytes = 0;
2328 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
2334 error = kern_sendfile_getsock(td, sockfd, &sock_fp, &so);
2339 * Do not wait on memory allocations but return ENOMEM for
2340 * caller to retry later.
2341 * XXX: Experimental.
2343 if (flags & SF_MNOWAIT)
2347 error = mac_socket_check_send(td->td_ucred, so);
2352 /* If headers are specified copy them into mbufs. */
2353 if (hdr_uio != NULL) {
2354 hdr_uio->uio_td = td;
2355 hdr_uio->uio_rw = UIO_WRITE;
2356 if (hdr_uio->uio_resid > 0) {
2358 * In FBSD < 5.0 the nbytes to send also included
2359 * the header. If compat is specified subtract the
2360 * header size from nbytes.
2362 if (kflags & SFK_COMPAT) {
2363 if (nbytes > hdr_uio->uio_resid)
2364 nbytes -= hdr_uio->uio_resid;
2368 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
2371 error = mnw ? EAGAIN : ENOBUFS;
2374 hdrlen = m_length(m, NULL);
2379 * Protect against multiple writers to the socket.
2381 * XXXRW: Historically this has assumed non-interruptibility, so now
2382 * we implement that, but possibly shouldn't.
2384 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
2387 * Loop through the pages of the file, starting with the requested
2388 * offset. Get a file page (do I/O if necessary), map the file page
2389 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
2391 * This is done in two loops. The inner loop turns as many pages
2392 * as it can, up to available socket buffer space, without blocking
2393 * into mbufs to have it bulk delivered into the socket send buffer.
2394 * The outer loop checks the state and available space of the socket
2395 * and takes care of the overall progress.
2397 for (off = offset; ; ) {
2403 if ((nbytes != 0 && nbytes == fsbytes) ||
2404 (nbytes == 0 && obj_size == fsbytes))
2413 * Check the socket state for ongoing connection,
2414 * no errors and space in socket buffer.
2415 * If space is low allow for the remainder of the
2416 * file to be processed if it fits the socket buffer.
2417 * Otherwise block in waiting for sufficient space
2418 * to proceed, or if the socket is nonblocking, return
2419 * to userland with EAGAIN while reporting how far
2421 * We wait until the socket buffer has significant free
2422 * space to do bulk sends. This makes good use of file
2423 * system read ahead and allows packet segmentation
2424 * offloading hardware to take over lots of work. If
2425 * we were not careful here we would send off only one
2428 SOCKBUF_LOCK(&so->so_snd);
2429 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
2430 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
2432 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2434 SOCKBUF_UNLOCK(&so->so_snd);
2436 } else if (so->so_error) {
2437 error = so->so_error;
2439 SOCKBUF_UNLOCK(&so->so_snd);
2442 space = sbspace(&so->so_snd);
2445 space < so->so_snd.sb_lowat)) {
2446 if (so->so_state & SS_NBIO) {
2447 SOCKBUF_UNLOCK(&so->so_snd);
2452 * sbwait drops the lock while sleeping.
2453 * When we loop back to retry_space the
2454 * state may have changed and we retest
2457 error = sbwait(&so->so_snd);
2459 * An error from sbwait usually indicates that we've
2460 * been interrupted by a signal. If we've sent anything
2461 * then return bytes sent, otherwise return the error.
2464 SOCKBUF_UNLOCK(&so->so_snd);
2469 SOCKBUF_UNLOCK(&so->so_snd);
2472 * Reduce space in the socket buffer by the size of
2473 * the header mbuf chain.
2474 * hdrlen is set to 0 after the first loop.
2479 error = vn_lock(vp, LK_SHARED);
2482 error = VOP_GETATTR(vp, &va, td->td_ucred);
2483 if (error != 0 || off >= va.va_size) {
2487 obj_size = va.va_size;
2491 * Loop and construct maximum sized mbuf chain to be bulk
2492 * dumped into socket buffer.
2494 while (space > loopbytes) {
2499 * Calculate the amount to transfer.
2500 * Not to exceed a page, the EOF,
2501 * or the passed in nbytes.
2503 pgoff = (vm_offset_t)(off & PAGE_MASK);
2504 rem = obj_size - offset;
2506 rem = omin(rem, nbytes);
2507 rem -= fsbytes + loopbytes;
2508 xfsize = omin(PAGE_SIZE - pgoff, rem);
2509 xfsize = omin(space - loopbytes, xfsize);
2511 done = 1; /* all data sent */
2516 * Attempt to look up the page. Allocate
2517 * if not found or wait and loop if busy.
2520 nd = EAGAIN; /* send what we already got */
2521 else if ((flags & SF_NODISKIO) != 0)
2525 error = sendfile_readpage(obj, vp, nd, off,
2526 xfsize, bsize, td, &pg);
2528 if (error == EAGAIN)
2529 error = 0; /* not a real error */
2534 * Get a sendfile buf. When allocating the
2535 * first buffer for mbuf chain, we usually
2536 * wait as long as necessary, but this wait
2537 * can be interrupted. For consequent
2538 * buffers, do not sleep, since several
2539 * threads might exhaust the buffers and then
2542 sf = sf_buf_alloc(pg, (mnw || m != NULL) ? SFB_NOWAIT :
2545 SFSTAT_INC(sf_allocfail);
2547 vm_page_unwire(pg, 0);
2548 KASSERT(pg->object != NULL,
2549 ("%s: object disappeared", __func__));
2552 error = (mnw ? EAGAIN : EINTR);
2557 * Get an mbuf and set it up as having
2560 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
2562 error = (mnw ? EAGAIN : ENOBUFS);
2563 (void)sf_buf_mext(NULL, NULL, sf);
2566 if (m_extadd(m0, (caddr_t )sf_buf_kva(sf), PAGE_SIZE,
2567 sf_buf_mext, sfs, sf, M_RDONLY, EXT_SFBUF,
2568 (mnw ? M_NOWAIT : M_WAITOK)) != 0) {
2569 error = (mnw ? EAGAIN : ENOBUFS);
2570 (void)sf_buf_mext(NULL, NULL, sf);
2574 m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
2577 /* Append to mbuf chain. */
2581 m_last(m)->m_next = m0;
2586 /* Keep track of bits processed. */
2587 loopbytes += xfsize;
2591 * XXX eventually this should be a sfsync
2601 /* Add the buffer chain to the socket buffer. */
2605 mlen = m_length(m, NULL);
2606 SOCKBUF_LOCK(&so->so_snd);
2607 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2609 SOCKBUF_UNLOCK(&so->so_snd);
2612 SOCKBUF_UNLOCK(&so->so_snd);
2613 CURVNET_SET(so->so_vnet);
2614 /* Avoid error aliasing. */
2615 err = (*so->so_proto->pr_usrreqs->pru_send)
2616 (so, 0, m, NULL, NULL, td);
2620 * We need two counters to get the
2621 * file offset and nbytes to send
2623 * - sbytes contains the total amount
2624 * of bytes sent, including headers.
2625 * - fsbytes contains the total amount
2626 * of bytes sent from the file.
2634 } else if (error == 0)
2636 m = NULL; /* pru_send always consumes */
2639 /* Quit outer loop on error or when we're done. */
2647 * Send trailers. Wimp out and use writev(2).
2649 if (trl_uio != NULL) {
2650 sbunlock(&so->so_snd);
2651 error = kern_writev(td, sockfd, trl_uio);
2653 sbytes += td->td_retval[0];
2658 sbunlock(&so->so_snd);
2661 * If there was no error we have to clear td->td_retval[0]
2662 * because it may have been set by writev.
2665 td->td_retval[0] = 0;
2671 vm_object_deallocate(obj);
2677 if (error == ERESTART)
2685 * Functionality only compiled in if SCTP is defined in the kernel Makefile,
2686 * otherwise all return EOPNOTSUPP.
2687 * XXX: We should make this loadable one day.
2690 sys_sctp_peeloff(td, uap)
2692 struct sctp_peeloff_args /* {
2697 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2698 struct file *nfp = NULL;
2699 struct socket *head, *so;
2700 cap_rights_t rights;
2704 AUDIT_ARG_FD(uap->sd);
2705 error = fgetsock(td, uap->sd, cap_rights_init(&rights, CAP_PEELOFF),
2709 if (head->so_proto->pr_protocol != IPPROTO_SCTP) {
2713 error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
2717 * At this point we know we do have a assoc to pull
2718 * we proceed to get the fd setup. This may block
2722 error = falloc(td, &nfp, &fd, 0);
2725 td->td_retval[0] = fd;
2727 CURVNET_SET(head->so_vnet);
2728 so = sonewconn(head, SS_ISCONNECTED);
2734 * Before changing the flags on the socket, we have to bump the
2735 * reference count. Otherwise, if the protocol calls sofree(),
2736 * the socket will be released due to a zero refcount.
2739 soref(so); /* file descriptor reference */
2744 TAILQ_REMOVE(&head->so_comp, so, so_list);
2746 so->so_state |= (head->so_state & SS_NBIO);
2747 so->so_state &= ~SS_NOFDREF;
2748 so->so_qstate &= ~SQ_COMP;
2751 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
2752 error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
2755 if (head->so_sigio != NULL)
2756 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
2760 * close the new descriptor, assuming someone hasn't ripped it
2761 * out from under us.
2764 fdclose(td->td_proc->p_fd, nfp, fd, td);
2767 * Release explicitly held references before returning.
2777 return (EOPNOTSUPP);
2782 sys_sctp_generic_sendmsg (td, uap)
2784 struct sctp_generic_sendmsg_args /* {
2790 struct sctp_sndrcvinfo *sinfo,
2794 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2795 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2797 struct file *fp = NULL;
2798 struct sockaddr *to = NULL;
2800 struct uio *ktruio = NULL;
2803 struct iovec iov[1];
2804 cap_rights_t rights;
2807 if (uap->sinfo != NULL) {
2808 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2814 cap_rights_init(&rights, CAP_SEND);
2815 if (uap->tolen != 0) {
2816 error = getsockaddr(&to, uap->to, uap->tolen);
2821 cap_rights_set(&rights, CAP_CONNECT);
2824 AUDIT_ARG_FD(uap->sd);
2825 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
2829 if (to && (KTRPOINT(td, KTR_STRUCT)))
2833 iov[0].iov_base = uap->msg;
2834 iov[0].iov_len = uap->mlen;
2836 so = (struct socket *)fp->f_data;
2837 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
2842 error = mac_socket_check_send(td->td_ucred, so);
2848 auio.uio_iovcnt = 1;
2849 auio.uio_segflg = UIO_USERSPACE;
2850 auio.uio_rw = UIO_WRITE;
2852 auio.uio_offset = 0; /* XXX */
2854 len = auio.uio_resid = uap->mlen;
2855 CURVNET_SET(so->so_vnet);
2856 error = sctp_lower_sosend(so, to, &auio, (struct mbuf *)NULL,
2857 (struct mbuf *)NULL, uap->flags, u_sinfo, td);
2860 if (auio.uio_resid != len && (error == ERESTART ||
2861 error == EINTR || error == EWOULDBLOCK))
2863 /* Generation of SIGPIPE can be controlled per socket. */
2864 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2865 !(uap->flags & MSG_NOSIGNAL)) {
2866 PROC_LOCK(td->td_proc);
2867 tdsignal(td, SIGPIPE);
2868 PROC_UNLOCK(td->td_proc);
2872 td->td_retval[0] = len - auio.uio_resid;
2874 if (ktruio != NULL) {
2875 ktruio->uio_resid = td->td_retval[0];
2876 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2886 return (EOPNOTSUPP);
2891 sys_sctp_generic_sendmsg_iov(td, uap)
2893 struct sctp_generic_sendmsg_iov_args /* {
2899 struct sctp_sndrcvinfo *sinfo,
2903 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2904 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2906 struct file *fp = NULL;
2907 struct sockaddr *to = NULL;
2909 struct uio *ktruio = NULL;
2912 struct iovec *iov, *tiov;
2913 cap_rights_t rights;
2917 if (uap->sinfo != NULL) {
2918 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2923 cap_rights_init(&rights, CAP_SEND);
2924 if (uap->tolen != 0) {
2925 error = getsockaddr(&to, uap->to, uap->tolen);
2930 cap_rights_set(&rights, CAP_CONNECT);
2933 AUDIT_ARG_FD(uap->sd);
2934 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
2938 #ifdef COMPAT_FREEBSD32
2939 if (SV_CURPROC_FLAG(SV_ILP32))
2940 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
2941 uap->iovlen, &iov, EMSGSIZE);
2944 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2948 if (to && (KTRPOINT(td, KTR_STRUCT)))
2952 so = (struct socket *)fp->f_data;
2953 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
2958 error = mac_socket_check_send(td->td_ucred, so);
2964 auio.uio_iovcnt = uap->iovlen;
2965 auio.uio_segflg = UIO_USERSPACE;
2966 auio.uio_rw = UIO_WRITE;
2968 auio.uio_offset = 0; /* XXX */
2971 for (i = 0; i <uap->iovlen; i++, tiov++) {
2972 if ((auio.uio_resid += tiov->iov_len) < 0) {
2977 len = auio.uio_resid;
2978 CURVNET_SET(so->so_vnet);
2979 error = sctp_lower_sosend(so, to, &auio,
2980 (struct mbuf *)NULL, (struct mbuf *)NULL,
2981 uap->flags, u_sinfo, td);
2984 if (auio.uio_resid != len && (error == ERESTART ||
2985 error == EINTR || error == EWOULDBLOCK))
2987 /* Generation of SIGPIPE can be controlled per socket */
2988 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2989 !(uap->flags & MSG_NOSIGNAL)) {
2990 PROC_LOCK(td->td_proc);
2991 tdsignal(td, SIGPIPE);
2992 PROC_UNLOCK(td->td_proc);
2996 td->td_retval[0] = len - auio.uio_resid;
2998 if (ktruio != NULL) {
2999 ktruio->uio_resid = td->td_retval[0];
3000 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
3012 return (EOPNOTSUPP);
3017 sys_sctp_generic_recvmsg(td, uap)
3019 struct sctp_generic_recvmsg_args /* {
3023 struct sockaddr *from,
3024 __socklen_t *fromlenaddr,
3025 struct sctp_sndrcvinfo *sinfo,
3029 #if (defined(INET) || defined(INET6)) && defined(SCTP)
3030 uint8_t sockbufstore[256];
3032 struct iovec *iov, *tiov;
3033 struct sctp_sndrcvinfo sinfo;
3035 struct file *fp = NULL;
3036 struct sockaddr *fromsa;
3037 cap_rights_t rights;
3039 struct uio *ktruio = NULL;
3042 int error, fromlen, i, msg_flags;
3044 AUDIT_ARG_FD(uap->sd);
3045 error = getsock_cap(td->td_proc->p_fd, uap->sd,
3046 cap_rights_init(&rights, CAP_RECV), &fp, NULL);
3049 #ifdef COMPAT_FREEBSD32
3050 if (SV_CURPROC_FLAG(SV_ILP32))
3051 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
3052 uap->iovlen, &iov, EMSGSIZE);
3055 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
3060 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
3065 error = mac_socket_check_receive(td->td_ucred, so);
3070 if (uap->fromlenaddr != NULL) {
3071 error = copyin(uap->fromlenaddr, &fromlen, sizeof (fromlen));
3077 if (uap->msg_flags) {
3078 error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
3085 auio.uio_iovcnt = uap->iovlen;
3086 auio.uio_segflg = UIO_USERSPACE;
3087 auio.uio_rw = UIO_READ;
3089 auio.uio_offset = 0; /* XXX */
3092 for (i = 0; i <uap->iovlen; i++, tiov++) {
3093 if ((auio.uio_resid += tiov->iov_len) < 0) {
3098 len = auio.uio_resid;
3099 fromsa = (struct sockaddr *)sockbufstore;
3102 if (KTRPOINT(td, KTR_GENIO))
3103 ktruio = cloneuio(&auio);
3105 memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo));
3106 CURVNET_SET(so->so_vnet);
3107 error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
3108 fromsa, fromlen, &msg_flags,
3109 (struct sctp_sndrcvinfo *)&sinfo, 1);
3112 if (auio.uio_resid != len && (error == ERESTART ||
3113 error == EINTR || error == EWOULDBLOCK))
3117 error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
3120 if (ktruio != NULL) {
3121 ktruio->uio_resid = len - auio.uio_resid;
3122 ktrgenio(uap->sd, UIO_READ, ktruio, error);
3127 td->td_retval[0] = len - auio.uio_resid;
3129 if (fromlen && uap->from) {
3131 if (len <= 0 || fromsa == 0)
3134 len = MIN(len, fromsa->sa_len);
3135 error = copyout(fromsa, uap->from, (size_t)len);
3139 error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
3144 if (KTRPOINT(td, KTR_STRUCT))
3145 ktrsockaddr(fromsa);
3147 if (uap->msg_flags) {
3148 error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
3160 return (EOPNOTSUPP);