2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_capsicum.h"
40 #include "opt_inet6.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/capability.h>
48 #include <sys/condvar.h>
49 #include <sys/kernel.h>
51 #include <sys/mutex.h>
52 #include <sys/sysproto.h>
53 #include <sys/malloc.h>
54 #include <sys/filedesc.h>
55 #include <sys/event.h>
57 #include <sys/fcntl.h>
59 #include <sys/filio.h>
62 #include <sys/mount.h>
64 #include <sys/protosw.h>
65 #include <sys/rwlock.h>
66 #include <sys/sf_buf.h>
67 #include <sys/sf_sync.h>
68 #include <sys/sysent.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/signalvar.h>
72 #include <sys/syscallsubr.h>
73 #include <sys/sysctl.h>
75 #include <sys/vnode.h>
77 #include <sys/ktrace.h>
79 #ifdef COMPAT_FREEBSD32
80 #include <compat/freebsd32/freebsd32_util.h>
85 #include <security/audit/audit.h>
86 #include <security/mac/mac_framework.h>
89 #include <vm/vm_param.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
96 #if defined(INET) || defined(INET6)
98 #include <netinet/sctp.h>
99 #include <netinet/sctp_peeloff.h>
101 #endif /* INET || INET6 */
104 * Flags for accept1() and kern_accept4(), in addition to SOCK_CLOEXEC
107 #define ACCEPT4_INHERIT 0x1
108 #define ACCEPT4_COMPAT 0x2
110 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
111 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
113 static int accept1(struct thread *td, int s, struct sockaddr *uname,
114 socklen_t *anamelen, int flags);
115 static int do_sendfile(struct thread *td, struct sendfile_args *uap,
117 static int getsockname1(struct thread *td, struct getsockname_args *uap,
119 static int getpeername1(struct thread *td, struct getpeername_args *uap,
122 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
125 * sendfile(2)-related variables and associated sysctls
127 static SYSCTL_NODE(_kern_ipc, OID_AUTO, sendfile, CTLFLAG_RW, 0,
128 "sendfile(2) tunables");
129 static int sfreadahead = 1;
130 SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, readahead, CTLFLAG_RW,
131 &sfreadahead, 0, "Number of sendfile(2) read-ahead MAXBSIZE blocks");
135 sfstat_init(const void *unused)
138 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
141 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
144 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
148 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
150 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
151 return (SYSCTL_OUT(req, &s, sizeof(s)));
153 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
154 NULL, 0, sfstat_sysctl, "I", "sendfile statistics");
157 * Convert a user file descriptor to a kernel file entry and check if required
158 * capability rights are present.
159 * A reference on the file entry is held upon returning.
162 getsock_cap(struct filedesc *fdp, int fd, cap_rights_t *rightsp,
163 struct file **fpp, u_int *fflagp)
168 error = fget_unlocked(fdp, fd, rightsp, 0, &fp, NULL);
171 if (fp->f_type != DTYPE_SOCKET) {
172 fdrop(fp, curthread);
176 *fflagp = fp->f_flag;
182 * System call interface to the socket abstraction.
184 #if defined(COMPAT_43)
185 #define COMPAT_OLDSOCK
191 struct socket_args /* {
199 int fd, error, type, oflag, fflag;
201 AUDIT_ARG_SOCKET(uap->domain, uap->type, uap->protocol);
206 if ((type & SOCK_CLOEXEC) != 0) {
207 type &= ~SOCK_CLOEXEC;
210 if ((type & SOCK_NONBLOCK) != 0) {
211 type &= ~SOCK_NONBLOCK;
216 error = mac_socket_check_create(td->td_ucred, uap->domain, type,
221 error = falloc(td, &fp, &fd, oflag);
224 /* An extra reference on `fp' has been held for us by falloc(). */
225 error = socreate(uap->domain, &so, type, uap->protocol,
228 fdclose(td->td_proc->p_fd, fp, fd, td);
230 finit(fp, FREAD | FWRITE | fflag, DTYPE_SOCKET, so, &socketops);
231 if ((fflag & FNONBLOCK) != 0)
232 (void) fo_ioctl(fp, FIONBIO, &fflag, td->td_ucred, td);
233 td->td_retval[0] = fd;
243 struct bind_args /* {
252 error = getsockaddr(&sa, uap->name, uap->namelen);
254 error = kern_bind(td, uap->s, sa);
261 kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
269 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
270 error = getsock_cap(td->td_proc->p_fd, fd,
271 cap_rights_init(&rights, CAP_BIND), &fp, NULL);
276 if (KTRPOINT(td, KTR_STRUCT))
280 error = mac_socket_check_bind(td->td_ucred, so, sa);
283 if (dirfd == AT_FDCWD)
284 error = sobind(so, sa, td);
286 error = sobindat(dirfd, so, sa, td);
295 kern_bind(struct thread *td, int fd, struct sockaddr *sa)
298 return (kern_bindat(td, AT_FDCWD, fd, sa));
305 struct bindat_args /* {
315 error = getsockaddr(&sa, uap->name, uap->namelen);
317 error = kern_bindat(td, uap->fd, uap->s, sa);
327 struct listen_args /* {
337 AUDIT_ARG_FD(uap->s);
338 error = getsock_cap(td->td_proc->p_fd, uap->s,
339 cap_rights_init(&rights, CAP_LISTEN), &fp, NULL);
343 error = mac_socket_check_listen(td->td_ucred, so);
346 error = solisten(so, uap->backlog, td);
356 accept1(td, s, uname, anamelen, flags)
359 struct sockaddr *uname;
363 struct sockaddr *name;
369 return (kern_accept4(td, s, NULL, NULL, flags, NULL));
371 error = copyin(anamelen, &namelen, sizeof (namelen));
375 error = kern_accept4(td, s, &name, &namelen, flags, &fp);
378 * return a namelen of zero for older code which might
379 * ignore the return value from accept.
382 (void) copyout(&namelen, anamelen, sizeof(*anamelen));
386 if (error == 0 && uname != NULL) {
387 #ifdef COMPAT_OLDSOCK
388 if (flags & ACCEPT4_COMPAT)
389 ((struct osockaddr *)name)->sa_family =
392 error = copyout(name, uname, namelen);
395 error = copyout(&namelen, anamelen,
398 fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
400 free(name, M_SONAME);
405 kern_accept(struct thread *td, int s, struct sockaddr **name,
406 socklen_t *namelen, struct file **fp)
408 return (kern_accept4(td, s, name, namelen, ACCEPT4_INHERIT, fp));
412 kern_accept4(struct thread *td, int s, struct sockaddr **name,
413 socklen_t *namelen, int flags, struct file **fp)
415 struct filedesc *fdp;
416 struct file *headfp, *nfp = NULL;
417 struct sockaddr *sa = NULL;
418 struct socket *head, *so;
428 fdp = td->td_proc->p_fd;
429 error = getsock_cap(fdp, s, cap_rights_init(&rights, CAP_ACCEPT),
433 head = headfp->f_data;
434 if ((head->so_options & SO_ACCEPTCONN) == 0) {
439 error = mac_socket_check_accept(td->td_ucred, head);
443 error = falloc(td, &nfp, &fd, (flags & SOCK_CLOEXEC) ? O_CLOEXEC : 0);
447 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
452 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
453 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
454 head->so_error = ECONNABORTED;
457 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
464 if (head->so_error) {
465 error = head->so_error;
470 so = TAILQ_FIRST(&head->so_comp);
471 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
472 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
475 * Before changing the flags on the socket, we have to bump the
476 * reference count. Otherwise, if the protocol calls sofree(),
477 * the socket will be released due to a zero refcount.
479 SOCK_LOCK(so); /* soref() and so_state update */
480 soref(so); /* file descriptor reference */
482 TAILQ_REMOVE(&head->so_comp, so, so_list);
484 if (flags & ACCEPT4_INHERIT)
485 so->so_state |= (head->so_state & SS_NBIO);
487 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
488 so->so_qstate &= ~SQ_COMP;
494 /* An extra reference on `nfp' has been held for us by falloc(). */
495 td->td_retval[0] = fd;
497 /* connection has been removed from the listen queue */
498 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
500 if (flags & ACCEPT4_INHERIT) {
501 pgid = fgetown(&head->so_sigio);
503 fsetown(pgid, &so->so_sigio);
505 fflag &= ~(FNONBLOCK | FASYNC);
506 if (flags & SOCK_NONBLOCK)
510 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
511 /* Sync socket nonblocking/async state with file flags */
512 tmp = fflag & FNONBLOCK;
513 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
514 tmp = fflag & FASYNC;
515 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
517 error = soaccept(so, &sa);
520 * return a namelen of zero for older code which might
521 * ignore the return value from accept.
532 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, sa);
534 /* check sa_len before it is destroyed */
535 if (*namelen > sa->sa_len)
536 *namelen = sa->sa_len;
538 if (KTRPOINT(td, KTR_STRUCT))
548 * close the new descriptor, assuming someone hasn't ripped it
552 fdclose(fdp, nfp, fd, td);
555 * Release explicitly held references before returning. We return
556 * a reference on nfp to the caller on success if they request it.
575 struct accept_args *uap;
578 return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT));
584 struct accept4_args *uap;
587 if (uap->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
590 return (accept1(td, uap->s, uap->name, uap->anamelen, uap->flags));
593 #ifdef COMPAT_OLDSOCK
597 struct accept_args *uap;
600 return (accept1(td, uap->s, uap->name, uap->anamelen,
601 ACCEPT4_INHERIT | ACCEPT4_COMPAT));
603 #endif /* COMPAT_OLDSOCK */
609 struct connect_args /* {
618 error = getsockaddr(&sa, uap->name, uap->namelen);
620 error = kern_connect(td, uap->s, sa);
627 kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
632 int error, interrupted = 0;
635 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
636 error = getsock_cap(td->td_proc->p_fd, fd,
637 cap_rights_init(&rights, CAP_CONNECT), &fp, NULL);
641 if (so->so_state & SS_ISCONNECTING) {
646 if (KTRPOINT(td, KTR_STRUCT))
650 error = mac_socket_check_connect(td->td_ucred, so, sa);
654 if (dirfd == AT_FDCWD)
655 error = soconnect(so, sa, td);
657 error = soconnectat(dirfd, so, sa, td);
660 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
665 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
666 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
669 if (error == EINTR || error == ERESTART)
675 error = so->so_error;
681 so->so_state &= ~SS_ISCONNECTING;
682 if (error == ERESTART)
690 kern_connect(struct thread *td, int fd, struct sockaddr *sa)
693 return (kern_connectat(td, AT_FDCWD, fd, sa));
698 sys_connectat(td, uap)
700 struct connectat_args /* {
710 error = getsockaddr(&sa, uap->name, uap->namelen);
712 error = kern_connectat(td, uap->fd, uap->s, sa);
719 kern_socketpair(struct thread *td, int domain, int type, int protocol,
722 struct filedesc *fdp = td->td_proc->p_fd;
723 struct file *fp1, *fp2;
724 struct socket *so1, *so2;
725 int fd, error, oflag, fflag;
727 AUDIT_ARG_SOCKET(domain, type, protocol);
731 if ((type & SOCK_CLOEXEC) != 0) {
732 type &= ~SOCK_CLOEXEC;
735 if ((type & SOCK_NONBLOCK) != 0) {
736 type &= ~SOCK_NONBLOCK;
740 /* We might want to have a separate check for socket pairs. */
741 error = mac_socket_check_create(td->td_ucred, domain, type,
746 error = socreate(domain, &so1, type, protocol, td->td_ucred, td);
749 error = socreate(domain, &so2, type, protocol, td->td_ucred, td);
752 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
753 error = falloc(td, &fp1, &fd, oflag);
757 fp1->f_data = so1; /* so1 already has ref count */
758 error = falloc(td, &fp2, &fd, oflag);
761 fp2->f_data = so2; /* so2 already has ref count */
763 error = soconnect2(so1, so2);
766 if (type == SOCK_DGRAM) {
768 * Datagram socket connection is asymmetric.
770 error = soconnect2(so2, so1);
774 finit(fp1, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp1->f_data,
776 finit(fp2, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp2->f_data,
778 if ((fflag & FNONBLOCK) != 0) {
779 (void) fo_ioctl(fp1, FIONBIO, &fflag, td->td_ucred, td);
780 (void) fo_ioctl(fp2, FIONBIO, &fflag, td->td_ucred, td);
786 fdclose(fdp, fp2, rsv[1], td);
789 fdclose(fdp, fp1, rsv[0], td);
801 sys_socketpair(struct thread *td, struct socketpair_args *uap)
805 error = kern_socketpair(td, uap->domain, uap->type,
809 error = copyout(sv, uap->rsv, 2 * sizeof(int));
811 (void)kern_close(td, sv[0]);
812 (void)kern_close(td, sv[1]);
818 sendit(td, s, mp, flags)
824 struct mbuf *control;
828 #ifdef CAPABILITY_MODE
829 if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL))
833 if (mp->msg_name != NULL) {
834 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
844 if (mp->msg_control) {
845 if (mp->msg_controllen < sizeof(struct cmsghdr)
846 #ifdef COMPAT_OLDSOCK
847 && mp->msg_flags != MSG_COMPAT
853 error = sockargs(&control, mp->msg_control,
854 mp->msg_controllen, MT_CONTROL);
857 #ifdef COMPAT_OLDSOCK
858 if (mp->msg_flags == MSG_COMPAT) {
861 M_PREPEND(control, sizeof(*cm), M_WAITOK);
862 cm = mtod(control, struct cmsghdr *);
863 cm->cmsg_len = control->m_len;
864 cm->cmsg_level = SOL_SOCKET;
865 cm->cmsg_type = SCM_RIGHTS;
872 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
880 kern_sendit(td, s, mp, flags, control, segflg)
885 struct mbuf *control;
894 struct uio *ktruio = NULL;
900 cap_rights_init(&rights, CAP_SEND);
901 if (mp->msg_name != NULL) {
902 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name);
903 cap_rights_set(&rights, CAP_CONNECT);
905 error = getsock_cap(td->td_proc->p_fd, s, &rights, &fp, NULL);
908 so = (struct socket *)fp->f_data;
911 if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT))
912 ktrsockaddr(mp->msg_name);
915 if (mp->msg_name != NULL) {
916 error = mac_socket_check_connect(td->td_ucred, so,
921 error = mac_socket_check_send(td->td_ucred, so);
926 auio.uio_iov = mp->msg_iov;
927 auio.uio_iovcnt = mp->msg_iovlen;
928 auio.uio_segflg = segflg;
929 auio.uio_rw = UIO_WRITE;
931 auio.uio_offset = 0; /* XXX */
934 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
935 if ((auio.uio_resid += iov->iov_len) < 0) {
941 if (KTRPOINT(td, KTR_GENIO))
942 ktruio = cloneuio(&auio);
944 len = auio.uio_resid;
945 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
947 if (auio.uio_resid != len && (error == ERESTART ||
948 error == EINTR || error == EWOULDBLOCK))
950 /* Generation of SIGPIPE can be controlled per socket */
951 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
952 !(flags & MSG_NOSIGNAL)) {
953 PROC_LOCK(td->td_proc);
954 tdsignal(td, SIGPIPE);
955 PROC_UNLOCK(td->td_proc);
959 td->td_retval[0] = len - auio.uio_resid;
961 if (ktruio != NULL) {
962 ktruio->uio_resid = td->td_retval[0];
963 ktrgenio(s, UIO_WRITE, ktruio, error);
974 struct sendto_args /* {
986 msg.msg_name = uap->to;
987 msg.msg_namelen = uap->tolen;
991 #ifdef COMPAT_OLDSOCK
994 aiov.iov_base = uap->buf;
995 aiov.iov_len = uap->len;
996 return (sendit(td, uap->s, &msg, uap->flags));
999 #ifdef COMPAT_OLDSOCK
1003 struct osend_args /* {
1014 msg.msg_namelen = 0;
1015 msg.msg_iov = &aiov;
1017 aiov.iov_base = uap->buf;
1018 aiov.iov_len = uap->len;
1019 msg.msg_control = 0;
1021 return (sendit(td, uap->s, &msg, uap->flags));
1027 struct osendmsg_args /* {
1037 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1040 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1044 msg.msg_flags = MSG_COMPAT;
1045 error = sendit(td, uap->s, &msg, uap->flags);
1052 sys_sendmsg(td, uap)
1054 struct sendmsg_args /* {
1064 error = copyin(uap->msg, &msg, sizeof (msg));
1067 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1071 #ifdef COMPAT_OLDSOCK
1074 error = sendit(td, uap->s, &msg, uap->flags);
1080 kern_recvit(td, s, mp, fromseg, controlp)
1084 enum uio_seg fromseg;
1085 struct mbuf **controlp;
1089 struct mbuf *m, *control = NULL;
1093 struct sockaddr *fromsa = NULL;
1094 cap_rights_t rights;
1096 struct uio *ktruio = NULL;
1101 if (controlp != NULL)
1105 error = getsock_cap(td->td_proc->p_fd, s,
1106 cap_rights_init(&rights, CAP_RECV), &fp, NULL);
1112 error = mac_socket_check_receive(td->td_ucred, so);
1119 auio.uio_iov = mp->msg_iov;
1120 auio.uio_iovcnt = mp->msg_iovlen;
1121 auio.uio_segflg = UIO_USERSPACE;
1122 auio.uio_rw = UIO_READ;
1124 auio.uio_offset = 0; /* XXX */
1127 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
1128 if ((auio.uio_resid += iov->iov_len) < 0) {
1134 if (KTRPOINT(td, KTR_GENIO))
1135 ktruio = cloneuio(&auio);
1137 len = auio.uio_resid;
1138 error = soreceive(so, &fromsa, &auio, NULL,
1139 (mp->msg_control || controlp) ? &control : NULL,
1142 if (auio.uio_resid != len && (error == ERESTART ||
1143 error == EINTR || error == EWOULDBLOCK))
1147 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, fromsa);
1149 if (ktruio != NULL) {
1150 ktruio->uio_resid = len - auio.uio_resid;
1151 ktrgenio(s, UIO_READ, ktruio, error);
1156 td->td_retval[0] = len - auio.uio_resid;
1158 len = mp->msg_namelen;
1159 if (len <= 0 || fromsa == NULL)
1162 /* save sa_len before it is destroyed by MSG_COMPAT */
1163 len = MIN(len, fromsa->sa_len);
1164 #ifdef COMPAT_OLDSOCK
1165 if (mp->msg_flags & MSG_COMPAT)
1166 ((struct osockaddr *)fromsa)->sa_family =
1169 if (fromseg == UIO_USERSPACE) {
1170 error = copyout(fromsa, mp->msg_name,
1175 bcopy(fromsa, mp->msg_name, len);
1177 mp->msg_namelen = len;
1179 if (mp->msg_control && controlp == NULL) {
1180 #ifdef COMPAT_OLDSOCK
1182 * We assume that old recvmsg calls won't receive access
1183 * rights and other control info, esp. as control info
1184 * is always optional and those options didn't exist in 4.3.
1185 * If we receive rights, trim the cmsghdr; anything else
1188 if (control && mp->msg_flags & MSG_COMPAT) {
1189 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1191 mtod(control, struct cmsghdr *)->cmsg_type !=
1193 mp->msg_controllen = 0;
1196 control->m_len -= sizeof (struct cmsghdr);
1197 control->m_data += sizeof (struct cmsghdr);
1200 len = mp->msg_controllen;
1202 mp->msg_controllen = 0;
1203 ctlbuf = mp->msg_control;
1205 while (m && len > 0) {
1206 unsigned int tocopy;
1208 if (len >= m->m_len)
1211 mp->msg_flags |= MSG_CTRUNC;
1215 if ((error = copyout(mtod(m, caddr_t),
1216 ctlbuf, tocopy)) != 0)
1223 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1228 if (fromsa && KTRPOINT(td, KTR_STRUCT))
1229 ktrsockaddr(fromsa);
1231 free(fromsa, M_SONAME);
1233 if (error == 0 && controlp != NULL)
1234 *controlp = control;
1242 recvit(td, s, mp, namelenp)
1250 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1253 if (namelenp != NULL) {
1254 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1255 #ifdef COMPAT_OLDSOCK
1256 if (mp->msg_flags & MSG_COMPAT)
1257 error = 0; /* old recvfrom didn't check */
1264 sys_recvfrom(td, uap)
1266 struct recvfrom_args /* {
1271 struct sockaddr * __restrict from;
1272 socklen_t * __restrict fromlenaddr;
1279 if (uap->fromlenaddr) {
1280 error = copyin(uap->fromlenaddr,
1281 &msg.msg_namelen, sizeof (msg.msg_namelen));
1285 msg.msg_namelen = 0;
1287 msg.msg_name = uap->from;
1288 msg.msg_iov = &aiov;
1290 aiov.iov_base = uap->buf;
1291 aiov.iov_len = uap->len;
1292 msg.msg_control = 0;
1293 msg.msg_flags = uap->flags;
1294 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1299 #ifdef COMPAT_OLDSOCK
1303 struct recvfrom_args *uap;
1306 uap->flags |= MSG_COMPAT;
1307 return (sys_recvfrom(td, uap));
1311 #ifdef COMPAT_OLDSOCK
1315 struct orecv_args /* {
1326 msg.msg_namelen = 0;
1327 msg.msg_iov = &aiov;
1329 aiov.iov_base = uap->buf;
1330 aiov.iov_len = uap->len;
1331 msg.msg_control = 0;
1332 msg.msg_flags = uap->flags;
1333 return (recvit(td, uap->s, &msg, NULL));
1337 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1338 * overlays the new one, missing only the flags, and with the (old) access
1339 * rights where the control fields are now.
1344 struct orecvmsg_args /* {
1346 struct omsghdr *msg;
1354 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1357 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1360 msg.msg_flags = uap->flags | MSG_COMPAT;
1362 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1363 if (msg.msg_controllen && error == 0)
1364 error = copyout(&msg.msg_controllen,
1365 &uap->msg->msg_accrightslen, sizeof (int));
1372 sys_recvmsg(td, uap)
1374 struct recvmsg_args /* {
1381 struct iovec *uiov, *iov;
1384 error = copyin(uap->msg, &msg, sizeof (msg));
1387 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1390 msg.msg_flags = uap->flags;
1391 #ifdef COMPAT_OLDSOCK
1392 msg.msg_flags &= ~MSG_COMPAT;
1396 error = recvit(td, uap->s, &msg, NULL);
1399 error = copyout(&msg, uap->msg, sizeof(msg));
1407 sys_shutdown(td, uap)
1409 struct shutdown_args /* {
1416 cap_rights_t rights;
1419 AUDIT_ARG_FD(uap->s);
1420 error = getsock_cap(td->td_proc->p_fd, uap->s,
1421 cap_rights_init(&rights, CAP_SHUTDOWN), &fp, NULL);
1424 error = soshutdown(so, uap->how);
1432 sys_setsockopt(td, uap)
1434 struct setsockopt_args /* {
1443 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1444 uap->val, UIO_USERSPACE, uap->valsize));
1448 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1454 enum uio_seg valseg;
1459 struct sockopt sopt;
1460 cap_rights_t rights;
1463 if (val == NULL && valsize != 0)
1465 if ((int)valsize < 0)
1468 sopt.sopt_dir = SOPT_SET;
1469 sopt.sopt_level = level;
1470 sopt.sopt_name = name;
1471 sopt.sopt_val = val;
1472 sopt.sopt_valsize = valsize;
1478 sopt.sopt_td = NULL;
1481 panic("kern_setsockopt called with bad valseg");
1485 error = getsock_cap(td->td_proc->p_fd, s,
1486 cap_rights_init(&rights, CAP_SETSOCKOPT), &fp, NULL);
1489 error = sosetopt(so, &sopt);
1497 sys_getsockopt(td, uap)
1499 struct getsockopt_args /* {
1503 void * __restrict val;
1504 socklen_t * __restrict avalsize;
1511 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1516 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1517 uap->val, UIO_USERSPACE, &valsize);
1520 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1525 * Kernel version of getsockopt.
1526 * optval can be a userland or userspace. optlen is always a kernel pointer.
1529 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1535 enum uio_seg valseg;
1540 struct sockopt sopt;
1541 cap_rights_t rights;
1546 if ((int)*valsize < 0)
1549 sopt.sopt_dir = SOPT_GET;
1550 sopt.sopt_level = level;
1551 sopt.sopt_name = name;
1552 sopt.sopt_val = val;
1553 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1559 sopt.sopt_td = NULL;
1562 panic("kern_getsockopt called with bad valseg");
1566 error = getsock_cap(td->td_proc->p_fd, s,
1567 cap_rights_init(&rights, CAP_GETSOCKOPT), &fp, NULL);
1570 error = sogetopt(so, &sopt);
1571 *valsize = sopt.sopt_valsize;
1578 * getsockname1() - Get socket name.
1582 getsockname1(td, uap, compat)
1584 struct getsockname_args /* {
1586 struct sockaddr * __restrict asa;
1587 socklen_t * __restrict alen;
1591 struct sockaddr *sa;
1595 error = copyin(uap->alen, &len, sizeof(len));
1599 error = kern_getsockname(td, uap->fdes, &sa, &len);
1604 #ifdef COMPAT_OLDSOCK
1606 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1608 error = copyout(sa, uap->asa, (u_int)len);
1612 error = copyout(&len, uap->alen, sizeof(len));
1617 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1622 cap_rights_t rights;
1627 error = getsock_cap(td->td_proc->p_fd, fd,
1628 cap_rights_init(&rights, CAP_GETSOCKNAME), &fp, NULL);
1633 CURVNET_SET(so->so_vnet);
1634 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1641 len = MIN(*alen, (*sa)->sa_len);
1644 if (KTRPOINT(td, KTR_STRUCT))
1649 if (error != 0 && *sa != NULL) {
1650 free(*sa, M_SONAME);
1657 sys_getsockname(td, uap)
1659 struct getsockname_args *uap;
1662 return (getsockname1(td, uap, 0));
1665 #ifdef COMPAT_OLDSOCK
1667 ogetsockname(td, uap)
1669 struct getsockname_args *uap;
1672 return (getsockname1(td, uap, 1));
1674 #endif /* COMPAT_OLDSOCK */
1677 * getpeername1() - Get name of peer for connected socket.
1681 getpeername1(td, uap, compat)
1683 struct getpeername_args /* {
1685 struct sockaddr * __restrict asa;
1686 socklen_t * __restrict alen;
1690 struct sockaddr *sa;
1694 error = copyin(uap->alen, &len, sizeof (len));
1698 error = kern_getpeername(td, uap->fdes, &sa, &len);
1703 #ifdef COMPAT_OLDSOCK
1705 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1707 error = copyout(sa, uap->asa, (u_int)len);
1711 error = copyout(&len, uap->alen, sizeof(len));
1716 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1721 cap_rights_t rights;
1726 error = getsock_cap(td->td_proc->p_fd, fd,
1727 cap_rights_init(&rights, CAP_GETPEERNAME), &fp, NULL);
1731 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1736 CURVNET_SET(so->so_vnet);
1737 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1744 len = MIN(*alen, (*sa)->sa_len);
1747 if (KTRPOINT(td, KTR_STRUCT))
1751 if (error != 0 && *sa != NULL) {
1752 free(*sa, M_SONAME);
1761 sys_getpeername(td, uap)
1763 struct getpeername_args *uap;
1766 return (getpeername1(td, uap, 0));
1769 #ifdef COMPAT_OLDSOCK
1771 ogetpeername(td, uap)
1773 struct ogetpeername_args *uap;
1776 /* XXX uap should have type `getpeername_args *' to begin with. */
1777 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1779 #endif /* COMPAT_OLDSOCK */
1782 sockargs(mp, buf, buflen, type)
1787 struct sockaddr *sa;
1791 if (buflen > MLEN) {
1792 #ifdef COMPAT_OLDSOCK
1793 if (type == MT_SONAME && buflen <= 112)
1794 buflen = MLEN; /* unix domain compat. hack */
1797 if (buflen > MCLBYTES)
1800 m = m_get2(buflen, M_WAITOK, type, 0);
1802 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1807 if (type == MT_SONAME) {
1808 sa = mtod(m, struct sockaddr *);
1810 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1811 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1812 sa->sa_family = sa->sa_len;
1814 sa->sa_len = buflen;
1821 getsockaddr(namp, uaddr, len)
1822 struct sockaddr **namp;
1826 struct sockaddr *sa;
1829 if (len > SOCK_MAXADDRLEN)
1830 return (ENAMETOOLONG);
1831 if (len < offsetof(struct sockaddr, sa_data[0]))
1833 sa = malloc(len, M_SONAME, M_WAITOK);
1834 error = copyin(uaddr, sa, len);
1838 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1839 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1840 sa->sa_family = sa->sa_len;
1849 * Detach mapped page and release resources back to the system.
1852 sf_buf_mext(struct mbuf *mb, void *addr, void *args)
1855 struct sendfile_sync *sfs;
1857 m = sf_buf_page(args);
1860 vm_page_unwire(m, 0);
1862 * Check for the object going away on us. This can
1863 * happen since we don't hold a reference to it.
1864 * If so, we're responsible for freeing the page.
1866 if (m->wire_count == 0 && m->object == NULL)
1873 return (EXT_FREE_OK);
1877 sf_sync_deref(struct sendfile_sync *sfs)
1883 mtx_lock(&sfs->mtx);
1884 KASSERT(sfs->count> 0, ("Sendfile sync botchup count == 0"));
1885 if (--sfs->count == 0)
1886 cv_signal(&sfs->cv);
1887 mtx_unlock(&sfs->mtx);
1891 * Allocate a sendfile_sync state structure.
1893 * For now this only knows about the "sleep" sync, but later it will
1894 * grow various other personalities.
1896 struct sendfile_sync *
1897 sf_sync_alloc(uint32_t flags)
1899 struct sendfile_sync *sfs;
1901 sfs = malloc(sizeof *sfs, M_TEMP, M_WAITOK | M_ZERO);
1902 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
1903 cv_init(&sfs->cv, "sendfile");
1910 * Take a reference to a sfsync instance.
1912 * This has to map 1:1 to free calls coming in via sf_buf_mext(),
1913 * so typically this will be referenced once for each mbuf allocated.
1916 sf_sync_ref(struct sendfile_sync *sfs)
1922 mtx_lock(&sfs->mtx);
1924 mtx_unlock(&sfs->mtx);
1928 sf_sync_syscall_wait(struct sendfile_sync *sfs)
1934 mtx_lock(&sfs->mtx);
1935 if (sfs->count != 0)
1936 cv_wait(&sfs->cv, &sfs->mtx);
1937 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
1938 mtx_unlock(&sfs->mtx);
1942 sf_sync_free(struct sendfile_sync *sfs)
1949 * XXX we should ensure that nothing else has this
1950 * locked before freeing.
1952 mtx_lock(&sfs->mtx);
1953 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
1954 cv_destroy(&sfs->cv);
1955 mtx_destroy(&sfs->mtx);
1962 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1963 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1965 * Send a file specified by 'fd' and starting at 'offset' to a socket
1966 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1967 * 0. Optionally add a header and/or trailer to the socket output. If
1968 * specified, write the total number of bytes sent into *sbytes.
1971 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1974 return (do_sendfile(td, uap, 0));
1978 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1980 struct sf_hdtr hdtr;
1981 struct uio *hdr_uio, *trl_uio;
1983 cap_rights_t rights;
1986 struct sendfile_sync *sfs;
1989 * File offset must be positive. If it goes beyond EOF
1990 * we send only the header/trailer and no payload data.
1992 if (uap->offset < 0)
1995 hdr_uio = trl_uio = NULL;
1998 if (uap->hdtr != NULL) {
1999 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
2002 if (hdtr.headers != NULL) {
2003 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
2007 if (hdtr.trailers != NULL) {
2008 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
2015 AUDIT_ARG_FD(uap->fd);
2018 * sendfile(2) can start at any offset within a file so we require
2019 * CAP_READ+CAP_SEEK = CAP_PREAD.
2021 if ((error = fget_read(td, uap->fd,
2022 cap_rights_init(&rights, CAP_PREAD), &fp)) != 0) {
2027 * If we need to wait for completion, initialise the sfsync
2030 if (uap->flags & SF_SYNC)
2031 sfs = sf_sync_alloc(uap->flags & SF_SYNC);
2033 error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
2034 uap->nbytes, &sbytes, uap->flags, compat ? SFK_COMPAT : 0, sfs, td);
2037 * If appropriate, do the wait and free here.
2040 sf_sync_syscall_wait(sfs);
2045 * XXX Should we wait until the send has completed before freeing the source
2046 * file handle? It's the previous behaviour, sure, but is it required?
2047 * We've wired down the page references after all.
2051 if (uap->sbytes != NULL) {
2052 copyout(&sbytes, uap->sbytes, sizeof(off_t));
2055 free(hdr_uio, M_IOV);
2056 free(trl_uio, M_IOV);
2060 #ifdef COMPAT_FREEBSD4
2062 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
2064 struct sendfile_args args;
2068 args.offset = uap->offset;
2069 args.nbytes = uap->nbytes;
2070 args.hdtr = uap->hdtr;
2071 args.sbytes = uap->sbytes;
2072 args.flags = uap->flags;
2074 return (do_sendfile(td, &args, 1));
2076 #endif /* COMPAT_FREEBSD4 */
2079 sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd,
2080 off_t off, int xfsize, int bsize, struct thread *td, vm_page_t *res)
2085 int error, readahead, rv;
2087 pindex = OFF_TO_IDX(off);
2088 VM_OBJECT_WLOCK(obj);
2089 m = vm_page_grab(obj, pindex, (vp != NULL ? VM_ALLOC_NOBUSY |
2090 VM_ALLOC_IGN_SBUSY : 0) | VM_ALLOC_WIRED | VM_ALLOC_NORMAL);
2093 * Check if page is valid for what we need, otherwise initiate I/O.
2095 * The non-zero nd argument prevents disk I/O, instead we
2096 * return the caller what he specified in nd. In particular,
2097 * if we already turned some pages into mbufs, nd == EAGAIN
2098 * and the main function send them the pages before we come
2099 * here again and block.
2101 if (m->valid != 0 && vm_page_is_valid(m, off & PAGE_MASK, xfsize)) {
2104 VM_OBJECT_WUNLOCK(obj);
2107 } else if (nd != 0) {
2115 * Get the page from backing store.
2119 VM_OBJECT_WUNLOCK(obj);
2120 readahead = sfreadahead * MAXBSIZE;
2123 * Use vn_rdwr() instead of the pager interface for
2124 * the vnode, to allow the read-ahead.
2126 * XXXMAC: Because we don't have fp->f_cred here, we
2127 * pass in NOCRED. This is probably wrong, but is
2128 * consistent with our original implementation.
2130 error = vn_rdwr(UIO_READ, vp, NULL, readahead, trunc_page(off),
2131 UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((readahead /
2132 bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td);
2133 SFSTAT_INC(sf_iocnt);
2134 VM_OBJECT_WLOCK(obj);
2136 if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
2137 rv = vm_pager_get_pages(obj, &m, 1, 0);
2138 SFSTAT_INC(sf_iocnt);
2139 m = vm_page_lookup(obj, pindex);
2142 else if (rv != VM_PAGER_OK) {
2151 m->valid = VM_PAGE_BITS_ALL;
2159 } else if (m != NULL) {
2162 vm_page_unwire(m, 0);
2165 * See if anyone else might know about this page. If
2166 * not and it is not valid, then free it.
2168 if (m->wire_count == 0 && m->valid == 0 && !vm_page_busied(m))
2172 KASSERT(error != 0 || (m->wire_count > 0 &&
2173 vm_page_is_valid(m, off & PAGE_MASK, xfsize)),
2174 ("wrong page state m %p off %#jx xfsize %d", m, (uintmax_t)off,
2176 VM_OBJECT_WUNLOCK(obj);
2181 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
2182 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
2188 struct shmfd *shmfd;
2191 vp = *vp_res = NULL;
2193 shmfd = *shmfd_res = NULL;
2197 * The file descriptor must be a regular file and have a
2198 * backing VM object.
2200 if (fp->f_type == DTYPE_VNODE) {
2202 vn_lock(vp, LK_SHARED | LK_RETRY);
2203 if (vp->v_type != VREG) {
2207 *bsize = vp->v_mount->mnt_stat.f_iosize;
2208 error = VOP_GETATTR(vp, &va, td->td_ucred);
2211 *obj_size = va.va_size;
2217 } else if (fp->f_type == DTYPE_SHM) {
2219 obj = shmfd->shm_object;
2220 *obj_size = shmfd->shm_size;
2226 VM_OBJECT_WLOCK(obj);
2227 if ((obj->flags & OBJ_DEAD) != 0) {
2228 VM_OBJECT_WUNLOCK(obj);
2234 * Temporarily increase the backing VM object's reference
2235 * count so that a forced reclamation of its vnode does not
2236 * immediately destroy it.
2238 vm_object_reference_locked(obj);
2239 VM_OBJECT_WUNLOCK(obj);
2251 kern_sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
2254 cap_rights_t rights;
2261 * The socket must be a stream socket and connected.
2263 error = getsock_cap(td->td_proc->p_fd, s, cap_rights_init(&rights,
2264 CAP_SEND), sock_fp, NULL);
2267 *so = (*sock_fp)->f_data;
2268 if ((*so)->so_type != SOCK_STREAM)
2270 if (((*so)->so_state & SS_ISCONNECTED) == 0)
2276 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
2277 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
2278 int kflags, struct sendfile_sync *sfs, struct thread *td)
2280 struct file *sock_fp;
2282 struct vm_object *obj;
2287 struct shmfd *shmfd;
2289 off_t off, xfsize, fsbytes, sbytes, rem, obj_size;
2290 int error, bsize, nd, hdrlen, mnw;
2291 bool inflight_called;
2297 fsbytes = sbytes = 0;
2301 inflight_called = false;
2303 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
2309 error = kern_sendfile_getsock(td, sockfd, &sock_fp, &so);
2314 * Do not wait on memory allocations but return ENOMEM for
2315 * caller to retry later.
2316 * XXX: Experimental.
2318 if (flags & SF_MNOWAIT)
2322 error = mac_socket_check_send(td->td_ucred, so);
2327 /* If headers are specified copy them into mbufs. */
2328 if (hdr_uio != NULL) {
2329 hdr_uio->uio_td = td;
2330 hdr_uio->uio_rw = UIO_WRITE;
2331 if (hdr_uio->uio_resid > 0) {
2333 * In FBSD < 5.0 the nbytes to send also included
2334 * the header. If compat is specified subtract the
2335 * header size from nbytes.
2337 if (kflags & SFK_COMPAT) {
2338 if (nbytes > hdr_uio->uio_resid)
2339 nbytes -= hdr_uio->uio_resid;
2343 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
2346 error = mnw ? EAGAIN : ENOBUFS;
2349 hdrlen = m_length(m, NULL);
2354 * Protect against multiple writers to the socket.
2356 * XXXRW: Historically this has assumed non-interruptibility, so now
2357 * we implement that, but possibly shouldn't.
2359 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
2362 * Loop through the pages of the file, starting with the requested
2363 * offset. Get a file page (do I/O if necessary), map the file page
2364 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
2366 * This is done in two loops. The inner loop turns as many pages
2367 * as it can, up to available socket buffer space, without blocking
2368 * into mbufs to have it bulk delivered into the socket send buffer.
2369 * The outer loop checks the state and available space of the socket
2370 * and takes care of the overall progress.
2372 for (off = offset; ; ) {
2378 if ((nbytes != 0 && nbytes == fsbytes) ||
2379 (nbytes == 0 && obj_size == fsbytes))
2388 * Check the socket state for ongoing connection,
2389 * no errors and space in socket buffer.
2390 * If space is low allow for the remainder of the
2391 * file to be processed if it fits the socket buffer.
2392 * Otherwise block in waiting for sufficient space
2393 * to proceed, or if the socket is nonblocking, return
2394 * to userland with EAGAIN while reporting how far
2396 * We wait until the socket buffer has significant free
2397 * space to do bulk sends. This makes good use of file
2398 * system read ahead and allows packet segmentation
2399 * offloading hardware to take over lots of work. If
2400 * we were not careful here we would send off only one
2403 SOCKBUF_LOCK(&so->so_snd);
2404 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
2405 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
2407 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2409 SOCKBUF_UNLOCK(&so->so_snd);
2411 } else if (so->so_error) {
2412 error = so->so_error;
2414 SOCKBUF_UNLOCK(&so->so_snd);
2417 space = sbspace(&so->so_snd);
2420 space < so->so_snd.sb_lowat)) {
2421 if (so->so_state & SS_NBIO) {
2422 SOCKBUF_UNLOCK(&so->so_snd);
2427 * sbwait drops the lock while sleeping.
2428 * When we loop back to retry_space the
2429 * state may have changed and we retest
2432 error = sbwait(&so->so_snd);
2434 * An error from sbwait usually indicates that we've
2435 * been interrupted by a signal. If we've sent anything
2436 * then return bytes sent, otherwise return the error.
2439 SOCKBUF_UNLOCK(&so->so_snd);
2444 SOCKBUF_UNLOCK(&so->so_snd);
2447 * Reduce space in the socket buffer by the size of
2448 * the header mbuf chain.
2449 * hdrlen is set to 0 after the first loop.
2454 error = vn_lock(vp, LK_SHARED);
2457 error = VOP_GETATTR(vp, &va, td->td_ucred);
2458 if (error != 0 || off >= va.va_size) {
2462 obj_size = va.va_size;
2466 * Loop and construct maximum sized mbuf chain to be bulk
2467 * dumped into socket buffer.
2469 while (space > loopbytes) {
2474 * Calculate the amount to transfer.
2475 * Not to exceed a page, the EOF,
2476 * or the passed in nbytes.
2478 pgoff = (vm_offset_t)(off & PAGE_MASK);
2479 rem = obj_size - offset;
2481 rem = omin(rem, nbytes);
2482 rem -= fsbytes + loopbytes;
2483 xfsize = omin(PAGE_SIZE - pgoff, rem);
2484 xfsize = omin(space - loopbytes, xfsize);
2486 done = 1; /* all data sent */
2491 * Attempt to look up the page. Allocate
2492 * if not found or wait and loop if busy.
2495 nd = EAGAIN; /* send what we already got */
2496 else if ((flags & SF_NODISKIO) != 0)
2500 error = sendfile_readpage(obj, vp, nd, off,
2501 xfsize, bsize, td, &pg);
2503 if (error == EAGAIN)
2504 error = 0; /* not a real error */
2509 * Get a sendfile buf. When allocating the
2510 * first buffer for mbuf chain, we usually
2511 * wait as long as necessary, but this wait
2512 * can be interrupted. For consequent
2513 * buffers, do not sleep, since several
2514 * threads might exhaust the buffers and then
2517 sf = sf_buf_alloc(pg, (mnw || m != NULL) ? SFB_NOWAIT :
2520 SFSTAT_INC(sf_allocfail);
2522 vm_page_unwire(pg, 0);
2523 KASSERT(pg->object != NULL,
2524 ("%s: object disappeared", __func__));
2527 error = (mnw ? EAGAIN : EINTR);
2532 * Get an mbuf and set it up as having
2535 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
2537 error = (mnw ? EAGAIN : ENOBUFS);
2538 (void)sf_buf_mext(NULL, NULL, sf);
2541 if (m_extadd(m0, (caddr_t )sf_buf_kva(sf), PAGE_SIZE,
2542 sf_buf_mext, sfs, sf, M_RDONLY, EXT_SFBUF,
2543 (mnw ? M_NOWAIT : M_WAITOK)) != 0) {
2544 error = (mnw ? EAGAIN : ENOBUFS);
2545 (void)sf_buf_mext(NULL, NULL, sf);
2549 m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
2552 /* Append to mbuf chain. */
2556 m_last(m)->m_next = m0;
2561 /* Keep track of bits processed. */
2562 loopbytes += xfsize;
2566 * XXX eventually this should be a sfsync
2576 /* Add the buffer chain to the socket buffer. */
2580 mlen = m_length(m, NULL);
2581 SOCKBUF_LOCK(&so->so_snd);
2582 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2584 SOCKBUF_UNLOCK(&so->so_snd);
2587 SOCKBUF_UNLOCK(&so->so_snd);
2588 CURVNET_SET(so->so_vnet);
2589 /* Avoid error aliasing. */
2590 err = (*so->so_proto->pr_usrreqs->pru_send)
2591 (so, 0, m, NULL, NULL, td);
2595 * We need two counters to get the
2596 * file offset and nbytes to send
2598 * - sbytes contains the total amount
2599 * of bytes sent, including headers.
2600 * - fsbytes contains the total amount
2601 * of bytes sent from the file.
2609 } else if (error == 0)
2611 m = NULL; /* pru_send always consumes */
2614 /* Quit outer loop on error or when we're done. */
2622 * Send trailers. Wimp out and use writev(2).
2624 if (trl_uio != NULL) {
2625 sbunlock(&so->so_snd);
2626 error = kern_writev(td, sockfd, trl_uio);
2628 sbytes += td->td_retval[0];
2633 sbunlock(&so->so_snd);
2636 * If there was no error we have to clear td->td_retval[0]
2637 * because it may have been set by writev.
2640 td->td_retval[0] = 0;
2646 vm_object_deallocate(obj);
2652 if (error == ERESTART)
2660 * Functionality only compiled in if SCTP is defined in the kernel Makefile,
2661 * otherwise all return EOPNOTSUPP.
2662 * XXX: We should make this loadable one day.
2665 sys_sctp_peeloff(td, uap)
2667 struct sctp_peeloff_args /* {
2672 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2673 struct file *nfp = NULL;
2674 struct socket *head, *so;
2675 cap_rights_t rights;
2679 AUDIT_ARG_FD(uap->sd);
2680 error = fgetsock(td, uap->sd, cap_rights_init(&rights, CAP_PEELOFF),
2684 if (head->so_proto->pr_protocol != IPPROTO_SCTP) {
2688 error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
2692 * At this point we know we do have a assoc to pull
2693 * we proceed to get the fd setup. This may block
2697 error = falloc(td, &nfp, &fd, 0);
2700 td->td_retval[0] = fd;
2702 CURVNET_SET(head->so_vnet);
2703 so = sonewconn(head, SS_ISCONNECTED);
2709 * Before changing the flags on the socket, we have to bump the
2710 * reference count. Otherwise, if the protocol calls sofree(),
2711 * the socket will be released due to a zero refcount.
2714 soref(so); /* file descriptor reference */
2719 TAILQ_REMOVE(&head->so_comp, so, so_list);
2721 so->so_state |= (head->so_state & SS_NBIO);
2722 so->so_state &= ~SS_NOFDREF;
2723 so->so_qstate &= ~SQ_COMP;
2726 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
2727 error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
2730 if (head->so_sigio != NULL)
2731 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
2735 * close the new descriptor, assuming someone hasn't ripped it
2736 * out from under us.
2739 fdclose(td->td_proc->p_fd, nfp, fd, td);
2742 * Release explicitly held references before returning.
2752 return (EOPNOTSUPP);
2757 sys_sctp_generic_sendmsg (td, uap)
2759 struct sctp_generic_sendmsg_args /* {
2765 struct sctp_sndrcvinfo *sinfo,
2769 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2770 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2772 struct file *fp = NULL;
2773 struct sockaddr *to = NULL;
2775 struct uio *ktruio = NULL;
2778 struct iovec iov[1];
2779 cap_rights_t rights;
2782 if (uap->sinfo != NULL) {
2783 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2789 cap_rights_init(&rights, CAP_SEND);
2790 if (uap->tolen != 0) {
2791 error = getsockaddr(&to, uap->to, uap->tolen);
2796 cap_rights_set(&rights, CAP_CONNECT);
2799 AUDIT_ARG_FD(uap->sd);
2800 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
2804 if (to && (KTRPOINT(td, KTR_STRUCT)))
2808 iov[0].iov_base = uap->msg;
2809 iov[0].iov_len = uap->mlen;
2811 so = (struct socket *)fp->f_data;
2812 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
2817 error = mac_socket_check_send(td->td_ucred, so);
2823 auio.uio_iovcnt = 1;
2824 auio.uio_segflg = UIO_USERSPACE;
2825 auio.uio_rw = UIO_WRITE;
2827 auio.uio_offset = 0; /* XXX */
2829 len = auio.uio_resid = uap->mlen;
2830 CURVNET_SET(so->so_vnet);
2831 error = sctp_lower_sosend(so, to, &auio, (struct mbuf *)NULL,
2832 (struct mbuf *)NULL, uap->flags, u_sinfo, td);
2835 if (auio.uio_resid != len && (error == ERESTART ||
2836 error == EINTR || error == EWOULDBLOCK))
2838 /* Generation of SIGPIPE can be controlled per socket. */
2839 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2840 !(uap->flags & MSG_NOSIGNAL)) {
2841 PROC_LOCK(td->td_proc);
2842 tdsignal(td, SIGPIPE);
2843 PROC_UNLOCK(td->td_proc);
2847 td->td_retval[0] = len - auio.uio_resid;
2849 if (ktruio != NULL) {
2850 ktruio->uio_resid = td->td_retval[0];
2851 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2861 return (EOPNOTSUPP);
2866 sys_sctp_generic_sendmsg_iov(td, uap)
2868 struct sctp_generic_sendmsg_iov_args /* {
2874 struct sctp_sndrcvinfo *sinfo,
2878 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2879 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2881 struct file *fp = NULL;
2882 struct sockaddr *to = NULL;
2884 struct uio *ktruio = NULL;
2887 struct iovec *iov, *tiov;
2888 cap_rights_t rights;
2892 if (uap->sinfo != NULL) {
2893 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2898 cap_rights_init(&rights, CAP_SEND);
2899 if (uap->tolen != 0) {
2900 error = getsockaddr(&to, uap->to, uap->tolen);
2905 cap_rights_set(&rights, CAP_CONNECT);
2908 AUDIT_ARG_FD(uap->sd);
2909 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
2913 #ifdef COMPAT_FREEBSD32
2914 if (SV_CURPROC_FLAG(SV_ILP32))
2915 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
2916 uap->iovlen, &iov, EMSGSIZE);
2919 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2923 if (to && (KTRPOINT(td, KTR_STRUCT)))
2927 so = (struct socket *)fp->f_data;
2928 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
2933 error = mac_socket_check_send(td->td_ucred, so);
2939 auio.uio_iovcnt = uap->iovlen;
2940 auio.uio_segflg = UIO_USERSPACE;
2941 auio.uio_rw = UIO_WRITE;
2943 auio.uio_offset = 0; /* XXX */
2946 for (i = 0; i <uap->iovlen; i++, tiov++) {
2947 if ((auio.uio_resid += tiov->iov_len) < 0) {
2952 len = auio.uio_resid;
2953 CURVNET_SET(so->so_vnet);
2954 error = sctp_lower_sosend(so, to, &auio,
2955 (struct mbuf *)NULL, (struct mbuf *)NULL,
2956 uap->flags, u_sinfo, td);
2959 if (auio.uio_resid != len && (error == ERESTART ||
2960 error == EINTR || error == EWOULDBLOCK))
2962 /* Generation of SIGPIPE can be controlled per socket */
2963 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2964 !(uap->flags & MSG_NOSIGNAL)) {
2965 PROC_LOCK(td->td_proc);
2966 tdsignal(td, SIGPIPE);
2967 PROC_UNLOCK(td->td_proc);
2971 td->td_retval[0] = len - auio.uio_resid;
2973 if (ktruio != NULL) {
2974 ktruio->uio_resid = td->td_retval[0];
2975 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2987 return (EOPNOTSUPP);
2992 sys_sctp_generic_recvmsg(td, uap)
2994 struct sctp_generic_recvmsg_args /* {
2998 struct sockaddr *from,
2999 __socklen_t *fromlenaddr,
3000 struct sctp_sndrcvinfo *sinfo,
3004 #if (defined(INET) || defined(INET6)) && defined(SCTP)
3005 uint8_t sockbufstore[256];
3007 struct iovec *iov, *tiov;
3008 struct sctp_sndrcvinfo sinfo;
3010 struct file *fp = NULL;
3011 struct sockaddr *fromsa;
3012 cap_rights_t rights;
3014 struct uio *ktruio = NULL;
3017 int error, fromlen, i, msg_flags;
3019 AUDIT_ARG_FD(uap->sd);
3020 error = getsock_cap(td->td_proc->p_fd, uap->sd,
3021 cap_rights_init(&rights, CAP_RECV), &fp, NULL);
3024 #ifdef COMPAT_FREEBSD32
3025 if (SV_CURPROC_FLAG(SV_ILP32))
3026 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
3027 uap->iovlen, &iov, EMSGSIZE);
3030 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
3035 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
3040 error = mac_socket_check_receive(td->td_ucred, so);
3045 if (uap->fromlenaddr != NULL) {
3046 error = copyin(uap->fromlenaddr, &fromlen, sizeof (fromlen));
3052 if (uap->msg_flags) {
3053 error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
3060 auio.uio_iovcnt = uap->iovlen;
3061 auio.uio_segflg = UIO_USERSPACE;
3062 auio.uio_rw = UIO_READ;
3064 auio.uio_offset = 0; /* XXX */
3067 for (i = 0; i <uap->iovlen; i++, tiov++) {
3068 if ((auio.uio_resid += tiov->iov_len) < 0) {
3073 len = auio.uio_resid;
3074 fromsa = (struct sockaddr *)sockbufstore;
3077 if (KTRPOINT(td, KTR_GENIO))
3078 ktruio = cloneuio(&auio);
3080 memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo));
3081 CURVNET_SET(so->so_vnet);
3082 error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
3083 fromsa, fromlen, &msg_flags,
3084 (struct sctp_sndrcvinfo *)&sinfo, 1);
3087 if (auio.uio_resid != len && (error == ERESTART ||
3088 error == EINTR || error == EWOULDBLOCK))
3092 error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
3095 if (ktruio != NULL) {
3096 ktruio->uio_resid = len - auio.uio_resid;
3097 ktrgenio(uap->sd, UIO_READ, ktruio, error);
3102 td->td_retval[0] = len - auio.uio_resid;
3104 if (fromlen && uap->from) {
3106 if (len <= 0 || fromsa == 0)
3109 len = MIN(len, fromsa->sa_len);
3110 error = copyout(fromsa, uap->from, (size_t)len);
3114 error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
3119 if (KTRPOINT(td, KTR_STRUCT))
3120 ktrsockaddr(fromsa);
3122 if (uap->msg_flags) {
3123 error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
3135 return (EOPNOTSUPP);