2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_capsicum.h"
40 #include "opt_inet6.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/capsicum.h>
48 #include <sys/condvar.h>
49 #include <sys/kernel.h>
51 #include <sys/mutex.h>
52 #include <sys/sysproto.h>
53 #include <sys/malloc.h>
54 #include <sys/filedesc.h>
55 #include <sys/event.h>
57 #include <sys/fcntl.h>
59 #include <sys/filio.h>
62 #include <sys/mount.h>
64 #include <sys/protosw.h>
65 #include <sys/rwlock.h>
66 #include <sys/sf_buf.h>
67 #include <sys/sf_sync.h>
68 #include <sys/sf_base.h>
69 #include <sys/sysent.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/signalvar.h>
73 #include <sys/syscallsubr.h>
74 #include <sys/sysctl.h>
76 #include <sys/vnode.h>
78 #include <sys/ktrace.h>
80 #ifdef COMPAT_FREEBSD32
81 #include <compat/freebsd32/freebsd32_util.h>
86 #include <security/audit/audit.h>
87 #include <security/mac/mac_framework.h>
90 #include <vm/vm_param.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
98 #if defined(INET) || defined(INET6)
100 #include <netinet/sctp.h>
101 #include <netinet/sctp_peeloff.h>
103 #endif /* INET || INET6 */
106 * Flags for accept1() and kern_accept4(), in addition to SOCK_CLOEXEC
109 #define ACCEPT4_INHERIT 0x1
110 #define ACCEPT4_COMPAT 0x2
112 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
113 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
115 static int accept1(struct thread *td, int s, struct sockaddr *uname,
116 socklen_t *anamelen, int flags);
117 static int do_sendfile(struct thread *td, struct sendfile_args *uap,
119 static int getsockname1(struct thread *td, struct getsockname_args *uap,
121 static int getpeername1(struct thread *td, struct getpeername_args *uap,
124 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
126 static int filt_sfsync_attach(struct knote *kn);
127 static void filt_sfsync_detach(struct knote *kn);
128 static int filt_sfsync(struct knote *kn, long hint);
131 * sendfile(2)-related variables and associated sysctls
133 static SYSCTL_NODE(_kern_ipc, OID_AUTO, sendfile, CTLFLAG_RW, 0,
134 "sendfile(2) tunables");
135 static int sfreadahead = 1;
136 SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, readahead, CTLFLAG_RW,
137 &sfreadahead, 0, "Number of sendfile(2) read-ahead MAXBSIZE blocks");
140 static int sf_sync_debug = 0;
141 SYSCTL_INT(_debug, OID_AUTO, sf_sync_debug, CTLFLAG_RW,
142 &sf_sync_debug, 0, "Output debugging during sf_sync lifecycle");
143 #define SFSYNC_DPRINTF(s, ...) \
146 printf((s), ##__VA_ARGS__); \
149 #define SFSYNC_DPRINTF(c, ...)
152 static uma_zone_t zone_sfsync;
154 static struct filterops sendfile_filtops = {
156 .f_attach = filt_sfsync_attach,
157 .f_detach = filt_sfsync_detach,
158 .f_event = filt_sfsync,
162 sfstat_init(const void *unused)
165 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
168 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
171 sf_sync_init(const void *unused)
174 zone_sfsync = uma_zcreate("sendfile_sync", sizeof(struct sendfile_sync),
179 kqueue_add_filteropts(EVFILT_SENDFILE, &sendfile_filtops);
181 SYSINIT(sf_sync, SI_SUB_MBUF, SI_ORDER_FIRST, sf_sync_init, NULL);
184 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
188 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
190 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
191 return (SYSCTL_OUT(req, &s, sizeof(s)));
193 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
194 NULL, 0, sfstat_sysctl, "I", "sendfile statistics");
197 * Convert a user file descriptor to a kernel file entry and check if required
198 * capability rights are present.
199 * A reference on the file entry is held upon returning.
202 getsock_cap(struct filedesc *fdp, int fd, cap_rights_t *rightsp,
203 struct file **fpp, u_int *fflagp)
208 error = fget_unlocked(fdp, fd, rightsp, 0, &fp, NULL);
211 if (fp->f_type != DTYPE_SOCKET) {
212 fdrop(fp, curthread);
216 *fflagp = fp->f_flag;
222 * System call interface to the socket abstraction.
224 #if defined(COMPAT_43)
225 #define COMPAT_OLDSOCK
231 struct socket_args /* {
239 int fd, error, type, oflag, fflag;
241 AUDIT_ARG_SOCKET(uap->domain, uap->type, uap->protocol);
246 if ((type & SOCK_CLOEXEC) != 0) {
247 type &= ~SOCK_CLOEXEC;
250 if ((type & SOCK_NONBLOCK) != 0) {
251 type &= ~SOCK_NONBLOCK;
256 error = mac_socket_check_create(td->td_ucred, uap->domain, type,
261 error = falloc(td, &fp, &fd, oflag);
264 /* An extra reference on `fp' has been held for us by falloc(). */
265 error = socreate(uap->domain, &so, type, uap->protocol,
268 fdclose(td->td_proc->p_fd, fp, fd, td);
270 finit(fp, FREAD | FWRITE | fflag, DTYPE_SOCKET, so, &socketops);
271 if ((fflag & FNONBLOCK) != 0)
272 (void) fo_ioctl(fp, FIONBIO, &fflag, td->td_ucred, td);
273 td->td_retval[0] = fd;
283 struct bind_args /* {
292 error = getsockaddr(&sa, uap->name, uap->namelen);
294 error = kern_bind(td, uap->s, sa);
301 kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
309 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
310 error = getsock_cap(td->td_proc->p_fd, fd,
311 cap_rights_init(&rights, CAP_BIND), &fp, NULL);
316 if (KTRPOINT(td, KTR_STRUCT))
320 error = mac_socket_check_bind(td->td_ucred, so, sa);
323 if (dirfd == AT_FDCWD)
324 error = sobind(so, sa, td);
326 error = sobindat(dirfd, so, sa, td);
335 kern_bind(struct thread *td, int fd, struct sockaddr *sa)
338 return (kern_bindat(td, AT_FDCWD, fd, sa));
345 struct bindat_args /* {
355 error = getsockaddr(&sa, uap->name, uap->namelen);
357 error = kern_bindat(td, uap->fd, uap->s, sa);
367 struct listen_args /* {
377 AUDIT_ARG_FD(uap->s);
378 error = getsock_cap(td->td_proc->p_fd, uap->s,
379 cap_rights_init(&rights, CAP_LISTEN), &fp, NULL);
383 error = mac_socket_check_listen(td->td_ucred, so);
386 error = solisten(so, uap->backlog, td);
396 accept1(td, s, uname, anamelen, flags)
399 struct sockaddr *uname;
403 struct sockaddr *name;
409 return (kern_accept4(td, s, NULL, NULL, flags, NULL));
411 error = copyin(anamelen, &namelen, sizeof (namelen));
415 error = kern_accept4(td, s, &name, &namelen, flags, &fp);
420 if (error == 0 && uname != NULL) {
421 #ifdef COMPAT_OLDSOCK
422 if (flags & ACCEPT4_COMPAT)
423 ((struct osockaddr *)name)->sa_family =
426 error = copyout(name, uname, namelen);
429 error = copyout(&namelen, anamelen,
432 fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
434 free(name, M_SONAME);
439 kern_accept(struct thread *td, int s, struct sockaddr **name,
440 socklen_t *namelen, struct file **fp)
442 return (kern_accept4(td, s, name, namelen, ACCEPT4_INHERIT, fp));
446 kern_accept4(struct thread *td, int s, struct sockaddr **name,
447 socklen_t *namelen, int flags, struct file **fp)
449 struct filedesc *fdp;
450 struct file *headfp, *nfp = NULL;
451 struct sockaddr *sa = NULL;
452 struct socket *head, *so;
462 fdp = td->td_proc->p_fd;
463 error = getsock_cap(fdp, s, cap_rights_init(&rights, CAP_ACCEPT),
467 head = headfp->f_data;
468 if ((head->so_options & SO_ACCEPTCONN) == 0) {
473 error = mac_socket_check_accept(td->td_ucred, head);
477 error = falloc(td, &nfp, &fd, (flags & SOCK_CLOEXEC) ? O_CLOEXEC : 0);
481 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
486 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
487 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
488 head->so_error = ECONNABORTED;
491 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
498 if (head->so_error) {
499 error = head->so_error;
504 so = TAILQ_FIRST(&head->so_comp);
505 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
506 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
509 * Before changing the flags on the socket, we have to bump the
510 * reference count. Otherwise, if the protocol calls sofree(),
511 * the socket will be released due to a zero refcount.
513 SOCK_LOCK(so); /* soref() and so_state update */
514 soref(so); /* file descriptor reference */
516 TAILQ_REMOVE(&head->so_comp, so, so_list);
518 if (flags & ACCEPT4_INHERIT)
519 so->so_state |= (head->so_state & SS_NBIO);
521 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
522 so->so_qstate &= ~SQ_COMP;
528 /* An extra reference on `nfp' has been held for us by falloc(). */
529 td->td_retval[0] = fd;
531 /* connection has been removed from the listen queue */
532 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
534 if (flags & ACCEPT4_INHERIT) {
535 pgid = fgetown(&head->so_sigio);
537 fsetown(pgid, &so->so_sigio);
539 fflag &= ~(FNONBLOCK | FASYNC);
540 if (flags & SOCK_NONBLOCK)
544 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
545 /* Sync socket nonblocking/async state with file flags */
546 tmp = fflag & FNONBLOCK;
547 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
548 tmp = fflag & FASYNC;
549 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
551 error = soaccept(so, &sa);
559 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, sa);
561 /* check sa_len before it is destroyed */
562 if (*namelen > sa->sa_len)
563 *namelen = sa->sa_len;
565 if (KTRPOINT(td, KTR_STRUCT))
575 * close the new descriptor, assuming someone hasn't ripped it
579 fdclose(fdp, nfp, fd, td);
582 * Release explicitly held references before returning. We return
583 * a reference on nfp to the caller on success if they request it.
602 struct accept_args *uap;
605 return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT));
611 struct accept4_args *uap;
614 if (uap->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
617 return (accept1(td, uap->s, uap->name, uap->anamelen, uap->flags));
620 #ifdef COMPAT_OLDSOCK
624 struct accept_args *uap;
627 return (accept1(td, uap->s, uap->name, uap->anamelen,
628 ACCEPT4_INHERIT | ACCEPT4_COMPAT));
630 #endif /* COMPAT_OLDSOCK */
636 struct connect_args /* {
645 error = getsockaddr(&sa, uap->name, uap->namelen);
647 error = kern_connect(td, uap->s, sa);
654 kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa)
659 int error, interrupted = 0;
662 AUDIT_ARG_SOCKADDR(td, dirfd, sa);
663 error = getsock_cap(td->td_proc->p_fd, fd,
664 cap_rights_init(&rights, CAP_CONNECT), &fp, NULL);
668 if (so->so_state & SS_ISCONNECTING) {
673 if (KTRPOINT(td, KTR_STRUCT))
677 error = mac_socket_check_connect(td->td_ucred, so, sa);
681 if (dirfd == AT_FDCWD)
682 error = soconnect(so, sa, td);
684 error = soconnectat(dirfd, so, sa, td);
687 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
692 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
693 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
696 if (error == EINTR || error == ERESTART)
702 error = so->so_error;
708 so->so_state &= ~SS_ISCONNECTING;
709 if (error == ERESTART)
717 kern_connect(struct thread *td, int fd, struct sockaddr *sa)
720 return (kern_connectat(td, AT_FDCWD, fd, sa));
725 sys_connectat(td, uap)
727 struct connectat_args /* {
737 error = getsockaddr(&sa, uap->name, uap->namelen);
739 error = kern_connectat(td, uap->fd, uap->s, sa);
746 kern_socketpair(struct thread *td, int domain, int type, int protocol,
749 struct filedesc *fdp = td->td_proc->p_fd;
750 struct file *fp1, *fp2;
751 struct socket *so1, *so2;
752 int fd, error, oflag, fflag;
754 AUDIT_ARG_SOCKET(domain, type, protocol);
758 if ((type & SOCK_CLOEXEC) != 0) {
759 type &= ~SOCK_CLOEXEC;
762 if ((type & SOCK_NONBLOCK) != 0) {
763 type &= ~SOCK_NONBLOCK;
767 /* We might want to have a separate check for socket pairs. */
768 error = mac_socket_check_create(td->td_ucred, domain, type,
773 error = socreate(domain, &so1, type, protocol, td->td_ucred, td);
776 error = socreate(domain, &so2, type, protocol, td->td_ucred, td);
779 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
780 error = falloc(td, &fp1, &fd, oflag);
784 fp1->f_data = so1; /* so1 already has ref count */
785 error = falloc(td, &fp2, &fd, oflag);
788 fp2->f_data = so2; /* so2 already has ref count */
790 error = soconnect2(so1, so2);
793 if (type == SOCK_DGRAM) {
795 * Datagram socket connection is asymmetric.
797 error = soconnect2(so2, so1);
801 finit(fp1, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp1->f_data,
803 finit(fp2, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp2->f_data,
805 if ((fflag & FNONBLOCK) != 0) {
806 (void) fo_ioctl(fp1, FIONBIO, &fflag, td->td_ucred, td);
807 (void) fo_ioctl(fp2, FIONBIO, &fflag, td->td_ucred, td);
813 fdclose(fdp, fp2, rsv[1], td);
816 fdclose(fdp, fp1, rsv[0], td);
828 sys_socketpair(struct thread *td, struct socketpair_args *uap)
832 error = kern_socketpair(td, uap->domain, uap->type,
836 error = copyout(sv, uap->rsv, 2 * sizeof(int));
838 (void)kern_close(td, sv[0]);
839 (void)kern_close(td, sv[1]);
845 sendit(td, s, mp, flags)
851 struct mbuf *control;
855 #ifdef CAPABILITY_MODE
856 if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL))
860 if (mp->msg_name != NULL) {
861 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
871 if (mp->msg_control) {
872 if (mp->msg_controllen < sizeof(struct cmsghdr)
873 #ifdef COMPAT_OLDSOCK
874 && mp->msg_flags != MSG_COMPAT
880 error = sockargs(&control, mp->msg_control,
881 mp->msg_controllen, MT_CONTROL);
884 #ifdef COMPAT_OLDSOCK
885 if (mp->msg_flags == MSG_COMPAT) {
888 M_PREPEND(control, sizeof(*cm), M_WAITOK);
889 cm = mtod(control, struct cmsghdr *);
890 cm->cmsg_len = control->m_len;
891 cm->cmsg_level = SOL_SOCKET;
892 cm->cmsg_type = SCM_RIGHTS;
899 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
907 kern_sendit(td, s, mp, flags, control, segflg)
912 struct mbuf *control;
921 struct uio *ktruio = NULL;
927 cap_rights_init(&rights, CAP_SEND);
928 if (mp->msg_name != NULL) {
929 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name);
930 cap_rights_set(&rights, CAP_CONNECT);
932 error = getsock_cap(td->td_proc->p_fd, s, &rights, &fp, NULL);
935 so = (struct socket *)fp->f_data;
938 if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT))
939 ktrsockaddr(mp->msg_name);
942 if (mp->msg_name != NULL) {
943 error = mac_socket_check_connect(td->td_ucred, so,
948 error = mac_socket_check_send(td->td_ucred, so);
953 auio.uio_iov = mp->msg_iov;
954 auio.uio_iovcnt = mp->msg_iovlen;
955 auio.uio_segflg = segflg;
956 auio.uio_rw = UIO_WRITE;
958 auio.uio_offset = 0; /* XXX */
961 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
962 if ((auio.uio_resid += iov->iov_len) < 0) {
968 if (KTRPOINT(td, KTR_GENIO))
969 ktruio = cloneuio(&auio);
971 len = auio.uio_resid;
972 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
974 if (auio.uio_resid != len && (error == ERESTART ||
975 error == EINTR || error == EWOULDBLOCK))
977 /* Generation of SIGPIPE can be controlled per socket */
978 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
979 !(flags & MSG_NOSIGNAL)) {
980 PROC_LOCK(td->td_proc);
981 tdsignal(td, SIGPIPE);
982 PROC_UNLOCK(td->td_proc);
986 td->td_retval[0] = len - auio.uio_resid;
988 if (ktruio != NULL) {
989 ktruio->uio_resid = td->td_retval[0];
990 ktrgenio(s, UIO_WRITE, ktruio, error);
1001 struct sendto_args /* {
1013 msg.msg_name = uap->to;
1014 msg.msg_namelen = uap->tolen;
1015 msg.msg_iov = &aiov;
1017 msg.msg_control = 0;
1018 #ifdef COMPAT_OLDSOCK
1021 aiov.iov_base = uap->buf;
1022 aiov.iov_len = uap->len;
1023 return (sendit(td, uap->s, &msg, uap->flags));
1026 #ifdef COMPAT_OLDSOCK
1030 struct osend_args /* {
1041 msg.msg_namelen = 0;
1042 msg.msg_iov = &aiov;
1044 aiov.iov_base = uap->buf;
1045 aiov.iov_len = uap->len;
1046 msg.msg_control = 0;
1048 return (sendit(td, uap->s, &msg, uap->flags));
1054 struct osendmsg_args /* {
1064 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1067 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1071 msg.msg_flags = MSG_COMPAT;
1072 error = sendit(td, uap->s, &msg, uap->flags);
1079 sys_sendmsg(td, uap)
1081 struct sendmsg_args /* {
1091 error = copyin(uap->msg, &msg, sizeof (msg));
1094 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1098 #ifdef COMPAT_OLDSOCK
1101 error = sendit(td, uap->s, &msg, uap->flags);
1107 kern_recvit(td, s, mp, fromseg, controlp)
1111 enum uio_seg fromseg;
1112 struct mbuf **controlp;
1116 struct mbuf *m, *control = NULL;
1120 struct sockaddr *fromsa = NULL;
1121 cap_rights_t rights;
1123 struct uio *ktruio = NULL;
1128 if (controlp != NULL)
1132 error = getsock_cap(td->td_proc->p_fd, s,
1133 cap_rights_init(&rights, CAP_RECV), &fp, NULL);
1139 error = mac_socket_check_receive(td->td_ucred, so);
1146 auio.uio_iov = mp->msg_iov;
1147 auio.uio_iovcnt = mp->msg_iovlen;
1148 auio.uio_segflg = UIO_USERSPACE;
1149 auio.uio_rw = UIO_READ;
1151 auio.uio_offset = 0; /* XXX */
1154 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
1155 if ((auio.uio_resid += iov->iov_len) < 0) {
1161 if (KTRPOINT(td, KTR_GENIO))
1162 ktruio = cloneuio(&auio);
1164 len = auio.uio_resid;
1165 error = soreceive(so, &fromsa, &auio, NULL,
1166 (mp->msg_control || controlp) ? &control : NULL,
1169 if (auio.uio_resid != len && (error == ERESTART ||
1170 error == EINTR || error == EWOULDBLOCK))
1174 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, fromsa);
1176 if (ktruio != NULL) {
1177 ktruio->uio_resid = len - auio.uio_resid;
1178 ktrgenio(s, UIO_READ, ktruio, error);
1183 td->td_retval[0] = len - auio.uio_resid;
1185 len = mp->msg_namelen;
1186 if (len <= 0 || fromsa == NULL)
1189 /* save sa_len before it is destroyed by MSG_COMPAT */
1190 len = MIN(len, fromsa->sa_len);
1191 #ifdef COMPAT_OLDSOCK
1192 if (mp->msg_flags & MSG_COMPAT)
1193 ((struct osockaddr *)fromsa)->sa_family =
1196 if (fromseg == UIO_USERSPACE) {
1197 error = copyout(fromsa, mp->msg_name,
1202 bcopy(fromsa, mp->msg_name, len);
1204 mp->msg_namelen = len;
1206 if (mp->msg_control && controlp == NULL) {
1207 #ifdef COMPAT_OLDSOCK
1209 * We assume that old recvmsg calls won't receive access
1210 * rights and other control info, esp. as control info
1211 * is always optional and those options didn't exist in 4.3.
1212 * If we receive rights, trim the cmsghdr; anything else
1215 if (control && mp->msg_flags & MSG_COMPAT) {
1216 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1218 mtod(control, struct cmsghdr *)->cmsg_type !=
1220 mp->msg_controllen = 0;
1223 control->m_len -= sizeof (struct cmsghdr);
1224 control->m_data += sizeof (struct cmsghdr);
1227 len = mp->msg_controllen;
1229 mp->msg_controllen = 0;
1230 ctlbuf = mp->msg_control;
1232 while (m && len > 0) {
1233 unsigned int tocopy;
1235 if (len >= m->m_len)
1238 mp->msg_flags |= MSG_CTRUNC;
1242 if ((error = copyout(mtod(m, caddr_t),
1243 ctlbuf, tocopy)) != 0)
1250 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1255 if (fromsa && KTRPOINT(td, KTR_STRUCT))
1256 ktrsockaddr(fromsa);
1258 free(fromsa, M_SONAME);
1260 if (error == 0 && controlp != NULL)
1261 *controlp = control;
1269 recvit(td, s, mp, namelenp)
1277 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1280 if (namelenp != NULL) {
1281 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1282 #ifdef COMPAT_OLDSOCK
1283 if (mp->msg_flags & MSG_COMPAT)
1284 error = 0; /* old recvfrom didn't check */
1291 sys_recvfrom(td, uap)
1293 struct recvfrom_args /* {
1298 struct sockaddr * __restrict from;
1299 socklen_t * __restrict fromlenaddr;
1306 if (uap->fromlenaddr) {
1307 error = copyin(uap->fromlenaddr,
1308 &msg.msg_namelen, sizeof (msg.msg_namelen));
1312 msg.msg_namelen = 0;
1314 msg.msg_name = uap->from;
1315 msg.msg_iov = &aiov;
1317 aiov.iov_base = uap->buf;
1318 aiov.iov_len = uap->len;
1319 msg.msg_control = 0;
1320 msg.msg_flags = uap->flags;
1321 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1326 #ifdef COMPAT_OLDSOCK
1330 struct recvfrom_args *uap;
1333 uap->flags |= MSG_COMPAT;
1334 return (sys_recvfrom(td, uap));
1338 #ifdef COMPAT_OLDSOCK
1342 struct orecv_args /* {
1353 msg.msg_namelen = 0;
1354 msg.msg_iov = &aiov;
1356 aiov.iov_base = uap->buf;
1357 aiov.iov_len = uap->len;
1358 msg.msg_control = 0;
1359 msg.msg_flags = uap->flags;
1360 return (recvit(td, uap->s, &msg, NULL));
1364 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1365 * overlays the new one, missing only the flags, and with the (old) access
1366 * rights where the control fields are now.
1371 struct orecvmsg_args /* {
1373 struct omsghdr *msg;
1381 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1384 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1387 msg.msg_flags = uap->flags | MSG_COMPAT;
1389 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1390 if (msg.msg_controllen && error == 0)
1391 error = copyout(&msg.msg_controllen,
1392 &uap->msg->msg_accrightslen, sizeof (int));
1399 sys_recvmsg(td, uap)
1401 struct recvmsg_args /* {
1408 struct iovec *uiov, *iov;
1411 error = copyin(uap->msg, &msg, sizeof (msg));
1414 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1417 msg.msg_flags = uap->flags;
1418 #ifdef COMPAT_OLDSOCK
1419 msg.msg_flags &= ~MSG_COMPAT;
1423 error = recvit(td, uap->s, &msg, NULL);
1426 error = copyout(&msg, uap->msg, sizeof(msg));
1434 sys_shutdown(td, uap)
1436 struct shutdown_args /* {
1443 cap_rights_t rights;
1446 AUDIT_ARG_FD(uap->s);
1447 error = getsock_cap(td->td_proc->p_fd, uap->s,
1448 cap_rights_init(&rights, CAP_SHUTDOWN), &fp, NULL);
1451 error = soshutdown(so, uap->how);
1459 sys_setsockopt(td, uap)
1461 struct setsockopt_args /* {
1470 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1471 uap->val, UIO_USERSPACE, uap->valsize));
1475 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1481 enum uio_seg valseg;
1486 struct sockopt sopt;
1487 cap_rights_t rights;
1490 if (val == NULL && valsize != 0)
1492 if ((int)valsize < 0)
1495 sopt.sopt_dir = SOPT_SET;
1496 sopt.sopt_level = level;
1497 sopt.sopt_name = name;
1498 sopt.sopt_val = val;
1499 sopt.sopt_valsize = valsize;
1505 sopt.sopt_td = NULL;
1508 panic("kern_setsockopt called with bad valseg");
1512 error = getsock_cap(td->td_proc->p_fd, s,
1513 cap_rights_init(&rights, CAP_SETSOCKOPT), &fp, NULL);
1516 error = sosetopt(so, &sopt);
1524 sys_getsockopt(td, uap)
1526 struct getsockopt_args /* {
1530 void * __restrict val;
1531 socklen_t * __restrict avalsize;
1538 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1543 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1544 uap->val, UIO_USERSPACE, &valsize);
1547 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1552 * Kernel version of getsockopt.
1553 * optval can be a userland or userspace. optlen is always a kernel pointer.
1556 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1562 enum uio_seg valseg;
1567 struct sockopt sopt;
1568 cap_rights_t rights;
1573 if ((int)*valsize < 0)
1576 sopt.sopt_dir = SOPT_GET;
1577 sopt.sopt_level = level;
1578 sopt.sopt_name = name;
1579 sopt.sopt_val = val;
1580 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1586 sopt.sopt_td = NULL;
1589 panic("kern_getsockopt called with bad valseg");
1593 error = getsock_cap(td->td_proc->p_fd, s,
1594 cap_rights_init(&rights, CAP_GETSOCKOPT), &fp, NULL);
1597 error = sogetopt(so, &sopt);
1598 *valsize = sopt.sopt_valsize;
1605 * getsockname1() - Get socket name.
1609 getsockname1(td, uap, compat)
1611 struct getsockname_args /* {
1613 struct sockaddr * __restrict asa;
1614 socklen_t * __restrict alen;
1618 struct sockaddr *sa;
1622 error = copyin(uap->alen, &len, sizeof(len));
1626 error = kern_getsockname(td, uap->fdes, &sa, &len);
1631 #ifdef COMPAT_OLDSOCK
1633 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1635 error = copyout(sa, uap->asa, (u_int)len);
1639 error = copyout(&len, uap->alen, sizeof(len));
1644 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1649 cap_rights_t rights;
1654 error = getsock_cap(td->td_proc->p_fd, fd,
1655 cap_rights_init(&rights, CAP_GETSOCKNAME), &fp, NULL);
1660 CURVNET_SET(so->so_vnet);
1661 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1668 len = MIN(*alen, (*sa)->sa_len);
1671 if (KTRPOINT(td, KTR_STRUCT))
1676 if (error != 0 && *sa != NULL) {
1677 free(*sa, M_SONAME);
1684 sys_getsockname(td, uap)
1686 struct getsockname_args *uap;
1689 return (getsockname1(td, uap, 0));
1692 #ifdef COMPAT_OLDSOCK
1694 ogetsockname(td, uap)
1696 struct getsockname_args *uap;
1699 return (getsockname1(td, uap, 1));
1701 #endif /* COMPAT_OLDSOCK */
1704 * getpeername1() - Get name of peer for connected socket.
1708 getpeername1(td, uap, compat)
1710 struct getpeername_args /* {
1712 struct sockaddr * __restrict asa;
1713 socklen_t * __restrict alen;
1717 struct sockaddr *sa;
1721 error = copyin(uap->alen, &len, sizeof (len));
1725 error = kern_getpeername(td, uap->fdes, &sa, &len);
1730 #ifdef COMPAT_OLDSOCK
1732 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1734 error = copyout(sa, uap->asa, (u_int)len);
1738 error = copyout(&len, uap->alen, sizeof(len));
1743 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1748 cap_rights_t rights;
1753 error = getsock_cap(td->td_proc->p_fd, fd,
1754 cap_rights_init(&rights, CAP_GETPEERNAME), &fp, NULL);
1758 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1763 CURVNET_SET(so->so_vnet);
1764 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1771 len = MIN(*alen, (*sa)->sa_len);
1774 if (KTRPOINT(td, KTR_STRUCT))
1778 if (error != 0 && *sa != NULL) {
1779 free(*sa, M_SONAME);
1788 sys_getpeername(td, uap)
1790 struct getpeername_args *uap;
1793 return (getpeername1(td, uap, 0));
1796 #ifdef COMPAT_OLDSOCK
1798 ogetpeername(td, uap)
1800 struct ogetpeername_args *uap;
1803 /* XXX uap should have type `getpeername_args *' to begin with. */
1804 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1806 #endif /* COMPAT_OLDSOCK */
1809 sockargs(mp, buf, buflen, type)
1814 struct sockaddr *sa;
1818 if (buflen > MLEN) {
1819 #ifdef COMPAT_OLDSOCK
1820 if (type == MT_SONAME && buflen <= 112)
1821 buflen = MLEN; /* unix domain compat. hack */
1824 if (buflen > MCLBYTES)
1827 m = m_get2(buflen, M_WAITOK, type, 0);
1829 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1834 if (type == MT_SONAME) {
1835 sa = mtod(m, struct sockaddr *);
1837 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1838 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1839 sa->sa_family = sa->sa_len;
1841 sa->sa_len = buflen;
1848 getsockaddr(namp, uaddr, len)
1849 struct sockaddr **namp;
1853 struct sockaddr *sa;
1856 if (len > SOCK_MAXADDRLEN)
1857 return (ENAMETOOLONG);
1858 if (len < offsetof(struct sockaddr, sa_data[0]))
1860 sa = malloc(len, M_SONAME, M_WAITOK);
1861 error = copyin(uaddr, sa, len);
1865 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1866 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1867 sa->sa_family = sa->sa_len;
1876 filt_sfsync_attach(struct knote *kn)
1878 struct sendfile_sync *sfs = (struct sendfile_sync *) kn->kn_sdata;
1879 struct knlist *knl = &sfs->klist;
1881 SFSYNC_DPRINTF("%s: kn=%p, sfs=%p\n", __func__, kn, sfs);
1884 * Validate that we actually received this via the kernel API.
1886 if ((kn->kn_flags & EV_FLAG1) == 0)
1889 kn->kn_ptr.p_v = sfs;
1890 kn->kn_flags &= ~EV_FLAG1;
1892 knl->kl_lock(knl->kl_lockarg);
1894 * If we're in the "freeing" state,
1895 * don't allow the add. That way we don't
1896 * end up racing with some other thread that
1897 * is trying to finish some setup.
1899 if (sfs->state == SF_STATE_FREEING) {
1900 knl->kl_unlock(knl->kl_lockarg);
1903 knlist_add(&sfs->klist, kn, 1);
1904 knl->kl_unlock(knl->kl_lockarg);
1910 * Called when a knote is being detached.
1913 filt_sfsync_detach(struct knote *kn)
1916 struct sendfile_sync *sfs;
1919 sfs = kn->kn_ptr.p_v;
1922 SFSYNC_DPRINTF("%s: kn=%p, sfs=%p\n", __func__, kn, sfs);
1924 knl->kl_lock(knl->kl_lockarg);
1925 if (!knlist_empty(knl))
1926 knlist_remove(knl, kn, 1);
1929 * If the list is empty _AND_ the refcount is 0
1930 * _AND_ we've finished the setup phase and now
1931 * we're in the running phase, we can free the
1932 * underlying sendfile_sync.
1934 * But we shouldn't do it before finishing the
1935 * underlying divorce from the knote.
1937 * So, we have the sfsync lock held; transition
1938 * it to "freeing", then unlock, then free
1941 if (knlist_empty(knl)) {
1942 if (sfs->state == SF_STATE_COMPLETED && sfs->count == 0) {
1943 SFSYNC_DPRINTF("%s: (%llu) sfs=%p; completed, "
1944 "count==0, empty list: time to free!\n",
1946 (unsigned long long) curthread->td_tid,
1948 sf_sync_set_state(sfs, SF_STATE_FREEING, 1);
1952 knl->kl_unlock(knl->kl_lockarg);
1955 * Only call free if we're the one who has transitioned things
1956 * to free. Otherwise we could race with another thread that
1957 * is currently tearing things down.
1960 SFSYNC_DPRINTF("%s: (%llu) sfs=%p, %s:%d\n",
1962 (unsigned long long) curthread->td_tid,
1971 filt_sfsync(struct knote *kn, long hint)
1973 struct sendfile_sync *sfs = (struct sendfile_sync *) kn->kn_ptr.p_v;
1976 SFSYNC_DPRINTF("%s: kn=%p, sfs=%p\n", __func__, kn, sfs);
1979 * XXX add a lock assertion here!
1981 ret = (sfs->count == 0 && sfs->state == SF_STATE_COMPLETED);
1987 * Add more references to a vm_page + sf_buf + sendfile_sync.
1990 sf_ext_ref(void *arg1, void *arg2)
1992 struct sf_buf *sf = arg1;
1993 struct sendfile_sync *sfs = arg2;
1994 vm_page_t pg = sf_buf_page(sf);
1996 /* XXXGL: there should be sf_buf_ref() */
1997 sf_buf_alloc(sf_buf_page(sf), SFB_NOWAIT);
2004 mtx_lock(&sfs->mtx);
2005 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
2007 mtx_unlock(&sfs->mtx);
2012 * Detach mapped page and release resources back to the system.
2015 sf_ext_free(void *arg1, void *arg2)
2017 struct sf_buf *sf = arg1;
2018 struct sendfile_sync *sfs = arg2;
2019 vm_page_t pg = sf_buf_page(sf);
2024 vm_page_unwire(pg, PQ_INACTIVE);
2026 * Check for the object going away on us. This can
2027 * happen since we don't hold a reference to it.
2028 * If so, we're responsible for freeing the page.
2030 if (pg->wire_count == 0 && pg->object == NULL)
2039 * Called to remove a reference to a sf_sync object.
2041 * This is generally done during the mbuf free path to signify
2042 * that one of the mbufs in the transaction has been completed.
2044 * If we're doing SF_SYNC and the refcount is zero then we'll wake
2047 * IF we're doing SF_KQUEUE and the refcount is zero then we'll
2048 * fire off the knote.
2051 sf_sync_deref(struct sendfile_sync *sfs)
2058 mtx_lock(&sfs->mtx);
2059 KASSERT(sfs->count> 0, ("Sendfile sync botchup count == 0"));
2063 * Only fire off the wakeup / kqueue notification if
2064 * we are in the running state.
2066 if (sfs->count == 0 && sfs->state == SF_STATE_COMPLETED) {
2067 if (sfs->flags & SF_SYNC)
2068 cv_signal(&sfs->cv);
2070 if (sfs->flags & SF_KQUEUE) {
2071 SFSYNC_DPRINTF("%s: (%llu) sfs=%p: knote!\n",
2073 (unsigned long long) curthread->td_tid,
2075 KNOTE_LOCKED(&sfs->klist, 1);
2079 * If we're not waiting around for a sync,
2080 * check if the knote list is empty.
2081 * If it is, we transition to free.
2083 * XXX I think it's about time I added some state
2084 * or flag that says whether we're supposed to be
2085 * waiting around until we've done a signal.
2087 * XXX Ie, the reason that I don't free it here
2088 * is because the caller will free the last reference,
2089 * not us. That should be codified in some flag
2090 * that indicates "self-free" rather than checking
2091 * for SF_SYNC all the time.
2093 if ((sfs->flags & SF_SYNC) == 0 && knlist_empty(&sfs->klist)) {
2094 SFSYNC_DPRINTF("%s: (%llu) sfs=%p; completed, "
2095 "count==0, empty list: time to free!\n",
2097 (unsigned long long) curthread->td_tid,
2099 sf_sync_set_state(sfs, SF_STATE_FREEING, 1);
2104 mtx_unlock(&sfs->mtx);
2107 * Attempt to do a free here.
2109 * We do this outside of the lock because it may destroy the
2110 * lock in question as it frees things. We can optimise this
2113 * XXX yes, we should make it a requirement to hold the
2114 * lock across sf_sync_free().
2117 SFSYNC_DPRINTF("%s: (%llu) sfs=%p\n",
2119 (unsigned long long) curthread->td_tid,
2126 * Allocate a sendfile_sync state structure.
2128 * For now this only knows about the "sleep" sync, but later it will
2129 * grow various other personalities.
2131 struct sendfile_sync *
2132 sf_sync_alloc(uint32_t flags)
2134 struct sendfile_sync *sfs;
2136 sfs = uma_zalloc(zone_sfsync, M_WAITOK | M_ZERO);
2137 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
2138 cv_init(&sfs->cv, "sendfile");
2140 sfs->state = SF_STATE_SETUP;
2141 knlist_init_mtx(&sfs->klist, &sfs->mtx);
2143 SFSYNC_DPRINTF("%s: sfs=%p, flags=0x%08x\n", __func__, sfs, sfs->flags);
2149 * Take a reference to a sfsync instance.
2151 * This has to map 1:1 to free calls coming in via sf_ext_free(),
2152 * so typically this will be referenced once for each mbuf allocated.
2155 sf_sync_ref(struct sendfile_sync *sfs)
2161 mtx_lock(&sfs->mtx);
2163 mtx_unlock(&sfs->mtx);
2167 sf_sync_syscall_wait(struct sendfile_sync *sfs)
2173 KASSERT(mtx_owned(&sfs->mtx), ("%s: sfs=%p: not locked but should be!",
2178 * If we're not requested to wait during the syscall,
2179 * don't bother waiting.
2181 if ((sfs->flags & SF_SYNC) == 0)
2185 * This is a bit suboptimal and confusing, so bear with me.
2187 * Ideally sf_sync_syscall_wait() will wait until
2188 * all pending mbuf transmit operations are done.
2189 * This means that when sendfile becomes async, it'll
2190 * run in the background and will transition from
2191 * RUNNING to COMPLETED when it's finished acquiring
2192 * new things to send. Then, when the mbufs finish
2193 * sending, COMPLETED + sfs->count == 0 is enough to
2194 * know that no further work is being done.
2196 * So, we will sleep on both RUNNING and COMPLETED.
2197 * It's up to the (in progress) async sendfile loop
2198 * to transition the sf_sync from RUNNING to
2199 * COMPLETED so the wakeup above will actually
2200 * do the cv_signal() call.
2202 if (sfs->state != SF_STATE_COMPLETED && sfs->state != SF_STATE_RUNNING)
2205 if (sfs->count != 0)
2206 cv_wait(&sfs->cv, &sfs->mtx);
2207 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
2214 * Free an sf_sync if it's appropriate to.
2217 sf_sync_free(struct sendfile_sync *sfs)
2223 SFSYNC_DPRINTF("%s: (%lld) sfs=%p; called; state=%d, flags=0x%08x "
2226 (long long) curthread->td_tid,
2232 mtx_lock(&sfs->mtx);
2235 * We keep the sf_sync around if the state is active,
2236 * we are doing kqueue notification and we have active
2239 * If the caller wants to free us right this second it
2240 * should transition this to the freeing state.
2242 * So, complain loudly if they break this rule.
2244 if (sfs->state != SF_STATE_FREEING) {
2245 printf("%s: (%llu) sfs=%p; not freeing; let's wait!\n",
2247 (unsigned long long) curthread->td_tid,
2249 mtx_unlock(&sfs->mtx);
2253 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
2254 cv_destroy(&sfs->cv);
2256 * This doesn't call knlist_detach() on each knote; it just frees
2259 knlist_delete(&sfs->klist, curthread, 1);
2260 mtx_destroy(&sfs->mtx);
2261 SFSYNC_DPRINTF("%s: (%llu) sfs=%p; freeing\n",
2263 (unsigned long long) curthread->td_tid,
2265 uma_zfree(zone_sfsync, sfs);
2269 * Setup a sf_sync to post a kqueue notification when things are complete.
2272 sf_sync_kqueue_setup(struct sendfile_sync *sfs, struct sf_hdtr_kq *sfkq)
2277 sfs->flags |= SF_KQUEUE;
2279 /* Check the flags are valid */
2280 if ((sfkq->kq_flags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0)
2283 SFSYNC_DPRINTF("%s: sfs=%p: kqfd=%d, flags=0x%08x, ident=%p, udata=%p\n",
2288 (void *) sfkq->kq_ident,
2289 (void *) sfkq->kq_udata);
2291 /* Setup and register a knote on the given kqfd. */
2292 kev.ident = (uintptr_t) sfkq->kq_ident;
2293 kev.filter = EVFILT_SENDFILE;
2294 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | sfkq->kq_flags;
2295 kev.data = (intptr_t) sfs;
2296 kev.udata = sfkq->kq_udata;
2298 error = kqfd_register(sfkq->kq_fd, &kev, curthread, 1);
2300 SFSYNC_DPRINTF("%s: returned %d\n", __func__, error);
2306 sf_sync_set_state(struct sendfile_sync *sfs, sendfile_sync_state_t state,
2309 sendfile_sync_state_t old_state;
2312 mtx_lock(&sfs->mtx);
2315 * Update our current state.
2317 old_state = sfs->state;
2319 SFSYNC_DPRINTF("%s: (%llu) sfs=%p; going from %d to %d\n",
2321 (unsigned long long) curthread->td_tid,
2327 * If we're transitioning from RUNNING to COMPLETED and the count is
2328 * zero, then post the knote. The caller may have completed the
2329 * send before we updated the state to COMPLETED and we need to make
2330 * sure this is communicated.
2332 if (old_state == SF_STATE_RUNNING
2333 && state == SF_STATE_COMPLETED
2335 && sfs->flags & SF_KQUEUE) {
2336 SFSYNC_DPRINTF("%s: (%llu) sfs=%p: triggering knote!\n",
2338 (unsigned long long) curthread->td_tid,
2340 KNOTE_LOCKED(&sfs->klist, 1);
2344 mtx_unlock(&sfs->mtx);
2348 * Set the retval/errno for the given transaction.
2350 * This will eventually/ideally be used when the KNOTE is fired off
2351 * to signify the completion of this transaction.
2353 * The sfsync lock should be held before entering this function.
2356 sf_sync_set_retval(struct sendfile_sync *sfs, off_t retval, int xerrno)
2359 KASSERT(mtx_owned(&sfs->mtx), ("%s: sfs=%p: not locked but should be!",
2363 SFSYNC_DPRINTF("%s: (%llu) sfs=%p: errno=%d, retval=%jd\n",
2365 (unsigned long long) curthread->td_tid,
2370 sfs->retval = retval;
2371 sfs->xerrno = xerrno;
2377 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
2378 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
2380 * Send a file specified by 'fd' and starting at 'offset' to a socket
2381 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
2382 * 0. Optionally add a header and/or trailer to the socket output. If
2383 * specified, write the total number of bytes sent into *sbytes.
2386 sys_sendfile(struct thread *td, struct sendfile_args *uap)
2389 return (do_sendfile(td, uap, 0));
2393 _do_sendfile(struct thread *td, int src_fd, int sock_fd, int flags,
2394 int compat, off_t offset, size_t nbytes, off_t *sbytes,
2395 struct uio *hdr_uio,
2396 struct uio *trl_uio, struct sf_hdtr_kq *hdtr_kq)
2398 cap_rights_t rights;
2399 struct sendfile_sync *sfs = NULL;
2405 AUDIT_ARG_FD(src_fd);
2407 if (hdtr_kq != NULL)
2411 * sendfile(2) can start at any offset within a file so we require
2412 * CAP_READ+CAP_SEEK = CAP_PREAD.
2414 if ((error = fget_read(td, src_fd,
2415 cap_rights_init(&rights, CAP_PREAD), &fp)) != 0) {
2420 * IF SF_KQUEUE is set but we haven't copied in anything for
2421 * kqueue data, error out.
2423 if (flags & SF_KQUEUE && do_kqueue == 0) {
2424 SFSYNC_DPRINTF("%s: SF_KQUEUE but no KQUEUE data!\n", __func__);
2429 * If we need to wait for completion, initialise the sfsync
2432 if (flags & (SF_SYNC | SF_KQUEUE))
2433 sfs = sf_sync_alloc(flags & (SF_SYNC | SF_KQUEUE));
2435 if (flags & SF_KQUEUE) {
2436 error = sf_sync_kqueue_setup(sfs, hdtr_kq);
2438 SFSYNC_DPRINTF("%s: (%llu) error; sfs=%p\n",
2440 (unsigned long long) curthread->td_tid,
2442 sf_sync_set_state(sfs, SF_STATE_FREEING, 0);
2449 * Do the sendfile call.
2451 * If this fails, it'll free the mbuf chain which will free up the
2452 * sendfile_sync references.
2454 error = fo_sendfile(fp, sock_fd, hdr_uio, trl_uio, offset,
2455 nbytes, sbytes, flags, compat ? SFK_COMPAT : 0, sfs, td);
2458 * If the sendfile call succeeded, transition the sf_sync state
2459 * to RUNNING, then COMPLETED.
2461 * If the sendfile call failed, then the sendfile call may have
2462 * actually sent some data first - so we check to see whether
2463 * any data was sent. If some data was queued (ie, count > 0)
2464 * then we can't call free; we have to wait until the partial
2465 * transaction completes before we continue along.
2467 * This has the side effect of firing off the knote
2468 * if the refcount has hit zero by the time we get here.
2471 mtx_lock(&sfs->mtx);
2472 if (error == 0 || sfs->count > 0) {
2474 * When it's time to do async sendfile, the transition
2475 * to RUNNING signifies that we're actually actively
2476 * adding and completing mbufs. When the last disk
2477 * buffer is read (ie, when we're not doing any
2478 * further read IO and all subsequent stuff is mbuf
2479 * transmissions) we'll transition to COMPLETED
2480 * and when the final mbuf is freed, the completion
2483 sf_sync_set_state(sfs, SF_STATE_RUNNING, 1);
2486 * Set the retval before we signal completed.
2487 * If we do it the other way around then transitioning to
2488 * COMPLETED may post the knote before you set the return
2491 * XXX for now, errno is always 0, as we don't post
2492 * knotes if sendfile failed. Maybe that'll change later.
2494 sf_sync_set_retval(sfs, *sbytes, error);
2497 * And now transition to completed, which will kick off
2498 * the knote if required.
2500 sf_sync_set_state(sfs, SF_STATE_COMPLETED, 1);
2503 * Error isn't zero, sfs_count is zero, so we
2504 * won't have some other thing to wake things up.
2507 sf_sync_set_state(sfs, SF_STATE_FREEING, 1);
2512 * Next - wait if appropriate.
2514 sf_sync_syscall_wait(sfs);
2517 * If we're not doing kqueue notifications, we can
2518 * transition this immediately to the freeing state.
2520 if ((sfs->flags & SF_KQUEUE) == 0) {
2521 sf_sync_set_state(sfs, SF_STATE_FREEING, 1);
2525 mtx_unlock(&sfs->mtx);
2529 * If do_free is set, free here.
2531 * If we're doing no-kqueue notification and it's just sleep notification,
2532 * we also do free; it's the only chance we have.
2534 if (sfs != NULL && do_free == 1) {
2539 * XXX Should we wait until the send has completed before freeing the source
2540 * file handle? It's the previous behaviour, sure, but is it required?
2541 * We've wired down the page references after all.
2552 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
2554 struct sf_hdtr hdtr;
2555 struct sf_hdtr_kq hdtr_kq;
2556 struct uio *hdr_uio, *trl_uio;
2562 * File offset must be positive. If it goes beyond EOF
2563 * we send only the header/trailer and no payload data.
2565 if (uap->offset < 0)
2568 hdr_uio = trl_uio = NULL;
2570 if (uap->hdtr != NULL) {
2571 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
2574 if (hdtr.headers != NULL) {
2575 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
2579 if (hdtr.trailers != NULL) {
2580 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
2586 * If SF_KQUEUE is set, then we need to also copy in
2587 * the kqueue data after the normal hdtr set and set
2590 if (uap->flags & SF_KQUEUE) {
2591 error = copyin(((char *) uap->hdtr) + sizeof(hdtr),
2601 error = _do_sendfile(td, uap->fd, uap->s, uap->flags, compat,
2602 uap->offset, uap->nbytes, &sbytes, hdr_uio, trl_uio, &hdtr_kq);
2604 if (uap->sbytes != NULL) {
2605 copyout(&sbytes, uap->sbytes, sizeof(off_t));
2608 free(hdr_uio, M_IOV);
2609 free(trl_uio, M_IOV);
2613 #ifdef COMPAT_FREEBSD4
2615 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
2617 struct sendfile_args args;
2621 args.offset = uap->offset;
2622 args.nbytes = uap->nbytes;
2623 args.hdtr = uap->hdtr;
2624 args.sbytes = uap->sbytes;
2625 args.flags = uap->flags;
2627 return (do_sendfile(td, &args, 1));
2629 #endif /* COMPAT_FREEBSD4 */
2632 sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd,
2633 off_t off, int xfsize, int bsize, struct thread *td, vm_page_t *res)
2638 int error, readahead, rv;
2640 pindex = OFF_TO_IDX(off);
2641 VM_OBJECT_WLOCK(obj);
2642 m = vm_page_grab(obj, pindex, (vp != NULL ? VM_ALLOC_NOBUSY |
2643 VM_ALLOC_IGN_SBUSY : 0) | VM_ALLOC_WIRED | VM_ALLOC_NORMAL);
2646 * Check if page is valid for what we need, otherwise initiate I/O.
2648 * The non-zero nd argument prevents disk I/O, instead we
2649 * return the caller what he specified in nd. In particular,
2650 * if we already turned some pages into mbufs, nd == EAGAIN
2651 * and the main function send them the pages before we come
2652 * here again and block.
2654 if (m->valid != 0 && vm_page_is_valid(m, off & PAGE_MASK, xfsize)) {
2657 VM_OBJECT_WUNLOCK(obj);
2660 } else if (nd != 0) {
2668 * Get the page from backing store.
2672 VM_OBJECT_WUNLOCK(obj);
2673 readahead = sfreadahead * MAXBSIZE;
2676 * Use vn_rdwr() instead of the pager interface for
2677 * the vnode, to allow the read-ahead.
2679 * XXXMAC: Because we don't have fp->f_cred here, we
2680 * pass in NOCRED. This is probably wrong, but is
2681 * consistent with our original implementation.
2683 error = vn_rdwr(UIO_READ, vp, NULL, readahead, trunc_page(off),
2684 UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((readahead /
2685 bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td);
2686 SFSTAT_INC(sf_iocnt);
2687 VM_OBJECT_WLOCK(obj);
2689 if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
2690 rv = vm_pager_get_pages(obj, &m, 1, 0);
2691 SFSTAT_INC(sf_iocnt);
2692 m = vm_page_lookup(obj, pindex);
2695 else if (rv != VM_PAGER_OK) {
2704 m->valid = VM_PAGE_BITS_ALL;
2712 } else if (m != NULL) {
2715 vm_page_unwire(m, PQ_INACTIVE);
2718 * See if anyone else might know about this page. If
2719 * not and it is not valid, then free it.
2721 if (m->wire_count == 0 && m->valid == 0 && !vm_page_busied(m))
2725 KASSERT(error != 0 || (m->wire_count > 0 &&
2726 vm_page_is_valid(m, off & PAGE_MASK, xfsize)),
2727 ("wrong page state m %p off %#jx xfsize %d", m, (uintmax_t)off,
2729 VM_OBJECT_WUNLOCK(obj);
2734 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
2735 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
2741 struct shmfd *shmfd;
2744 vp = *vp_res = NULL;
2746 shmfd = *shmfd_res = NULL;
2750 * The file descriptor must be a regular file and have a
2751 * backing VM object.
2753 if (fp->f_type == DTYPE_VNODE) {
2755 vn_lock(vp, LK_SHARED | LK_RETRY);
2756 if (vp->v_type != VREG) {
2760 *bsize = vp->v_mount->mnt_stat.f_iosize;
2761 error = VOP_GETATTR(vp, &va, td->td_ucred);
2764 *obj_size = va.va_size;
2770 } else if (fp->f_type == DTYPE_SHM) {
2772 obj = shmfd->shm_object;
2773 *obj_size = shmfd->shm_size;
2779 VM_OBJECT_WLOCK(obj);
2780 if ((obj->flags & OBJ_DEAD) != 0) {
2781 VM_OBJECT_WUNLOCK(obj);
2787 * Temporarily increase the backing VM object's reference
2788 * count so that a forced reclamation of its vnode does not
2789 * immediately destroy it.
2791 vm_object_reference_locked(obj);
2792 VM_OBJECT_WUNLOCK(obj);
2804 kern_sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
2807 cap_rights_t rights;
2814 * The socket must be a stream socket and connected.
2816 error = getsock_cap(td->td_proc->p_fd, s, cap_rights_init(&rights,
2817 CAP_SEND), sock_fp, NULL);
2820 *so = (*sock_fp)->f_data;
2821 if ((*so)->so_type != SOCK_STREAM)
2823 if (((*so)->so_state & SS_ISCONNECTED) == 0)
2829 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
2830 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
2831 int kflags, struct sendfile_sync *sfs, struct thread *td)
2833 struct file *sock_fp;
2835 struct vm_object *obj;
2840 struct shmfd *shmfd;
2842 off_t off, xfsize, fsbytes, sbytes, rem, obj_size;
2843 int error, bsize, nd, hdrlen, mnw;
2849 fsbytes = sbytes = 0;
2854 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
2860 error = kern_sendfile_getsock(td, sockfd, &sock_fp, &so);
2865 * Do not wait on memory allocations but return ENOMEM for
2866 * caller to retry later.
2867 * XXX: Experimental.
2869 if (flags & SF_MNOWAIT)
2873 error = mac_socket_check_send(td->td_ucred, so);
2878 /* If headers are specified copy them into mbufs. */
2879 if (hdr_uio != NULL) {
2880 hdr_uio->uio_td = td;
2881 hdr_uio->uio_rw = UIO_WRITE;
2882 if (hdr_uio->uio_resid > 0) {
2884 * In FBSD < 5.0 the nbytes to send also included
2885 * the header. If compat is specified subtract the
2886 * header size from nbytes.
2888 if (kflags & SFK_COMPAT) {
2889 if (nbytes > hdr_uio->uio_resid)
2890 nbytes -= hdr_uio->uio_resid;
2894 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
2897 error = mnw ? EAGAIN : ENOBUFS;
2900 hdrlen = m_length(m, NULL);
2905 * Protect against multiple writers to the socket.
2907 * XXXRW: Historically this has assumed non-interruptibility, so now
2908 * we implement that, but possibly shouldn't.
2910 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
2913 * Loop through the pages of the file, starting with the requested
2914 * offset. Get a file page (do I/O if necessary), map the file page
2915 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
2917 * This is done in two loops. The inner loop turns as many pages
2918 * as it can, up to available socket buffer space, without blocking
2919 * into mbufs to have it bulk delivered into the socket send buffer.
2920 * The outer loop checks the state and available space of the socket
2921 * and takes care of the overall progress.
2923 for (off = offset; ; ) {
2929 if ((nbytes != 0 && nbytes == fsbytes) ||
2930 (nbytes == 0 && obj_size == fsbytes))
2939 * Check the socket state for ongoing connection,
2940 * no errors and space in socket buffer.
2941 * If space is low allow for the remainder of the
2942 * file to be processed if it fits the socket buffer.
2943 * Otherwise block in waiting for sufficient space
2944 * to proceed, or if the socket is nonblocking, return
2945 * to userland with EAGAIN while reporting how far
2947 * We wait until the socket buffer has significant free
2948 * space to do bulk sends. This makes good use of file
2949 * system read ahead and allows packet segmentation
2950 * offloading hardware to take over lots of work. If
2951 * we were not careful here we would send off only one
2954 SOCKBUF_LOCK(&so->so_snd);
2955 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
2956 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
2958 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2960 SOCKBUF_UNLOCK(&so->so_snd);
2962 } else if (so->so_error) {
2963 error = so->so_error;
2965 SOCKBUF_UNLOCK(&so->so_snd);
2968 space = sbspace(&so->so_snd);
2971 space < so->so_snd.sb_lowat)) {
2972 if (so->so_state & SS_NBIO) {
2973 SOCKBUF_UNLOCK(&so->so_snd);
2978 * sbwait drops the lock while sleeping.
2979 * When we loop back to retry_space the
2980 * state may have changed and we retest
2983 error = sbwait(&so->so_snd);
2985 * An error from sbwait usually indicates that we've
2986 * been interrupted by a signal. If we've sent anything
2987 * then return bytes sent, otherwise return the error.
2990 SOCKBUF_UNLOCK(&so->so_snd);
2995 SOCKBUF_UNLOCK(&so->so_snd);
2998 * Reduce space in the socket buffer by the size of
2999 * the header mbuf chain.
3000 * hdrlen is set to 0 after the first loop.
3005 error = vn_lock(vp, LK_SHARED);
3008 error = VOP_GETATTR(vp, &va, td->td_ucred);
3009 if (error != 0 || off >= va.va_size) {
3013 obj_size = va.va_size;
3017 * Loop and construct maximum sized mbuf chain to be bulk
3018 * dumped into socket buffer.
3020 while (space > loopbytes) {
3025 * Calculate the amount to transfer.
3026 * Not to exceed a page, the EOF,
3027 * or the passed in nbytes.
3029 pgoff = (vm_offset_t)(off & PAGE_MASK);
3030 rem = obj_size - offset;
3032 rem = omin(rem, nbytes);
3033 rem -= fsbytes + loopbytes;
3034 xfsize = omin(PAGE_SIZE - pgoff, rem);
3035 xfsize = omin(space - loopbytes, xfsize);
3037 done = 1; /* all data sent */
3042 * Attempt to look up the page. Allocate
3043 * if not found or wait and loop if busy.
3046 nd = EAGAIN; /* send what we already got */
3047 else if ((flags & SF_NODISKIO) != 0)
3051 error = sendfile_readpage(obj, vp, nd, off,
3052 xfsize, bsize, td, &pg);
3054 if (error == EAGAIN)
3055 error = 0; /* not a real error */
3060 * Get a sendfile buf. When allocating the
3061 * first buffer for mbuf chain, we usually
3062 * wait as long as necessary, but this wait
3063 * can be interrupted. For consequent
3064 * buffers, do not sleep, since several
3065 * threads might exhaust the buffers and then
3068 sf = sf_buf_alloc(pg, (mnw || m != NULL) ? SFB_NOWAIT :
3071 SFSTAT_INC(sf_allocfail);
3073 vm_page_unwire(pg, PQ_INACTIVE);
3074 KASSERT(pg->object != NULL,
3075 ("%s: object disappeared", __func__));
3078 error = (mnw ? EAGAIN : EINTR);
3083 * Get an mbuf and set it up as having
3086 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
3088 error = (mnw ? EAGAIN : ENOBUFS);
3089 sf_ext_free(sf, NULL);
3093 * Attach EXT_SFBUF external storage.
3095 m0->m_ext.ext_buf = (caddr_t )sf_buf_kva(sf);
3096 m0->m_ext.ext_size = PAGE_SIZE;
3097 m0->m_ext.ext_arg1 = sf;
3098 m0->m_ext.ext_arg2 = sfs;
3099 m0->m_ext.ext_type = EXT_SFBUF;
3100 m0->m_ext.ext_flags = 0;
3101 m0->m_flags |= (M_EXT|M_RDONLY);
3102 m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
3105 /* Append to mbuf chain. */
3109 m_last(m)->m_next = m0;
3114 /* Keep track of bits processed. */
3115 loopbytes += xfsize;
3119 * XXX eventually this should be a sfsync
3129 /* Add the buffer chain to the socket buffer. */
3133 mlen = m_length(m, NULL);
3134 SOCKBUF_LOCK(&so->so_snd);
3135 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3137 SOCKBUF_UNLOCK(&so->so_snd);
3140 SOCKBUF_UNLOCK(&so->so_snd);
3141 CURVNET_SET(so->so_vnet);
3142 /* Avoid error aliasing. */
3143 err = (*so->so_proto->pr_usrreqs->pru_send)
3144 (so, 0, m, NULL, NULL, td);
3148 * We need two counters to get the
3149 * file offset and nbytes to send
3151 * - sbytes contains the total amount
3152 * of bytes sent, including headers.
3153 * - fsbytes contains the total amount
3154 * of bytes sent from the file.
3162 } else if (error == 0)
3164 m = NULL; /* pru_send always consumes */
3167 /* Quit outer loop on error or when we're done. */
3175 * Send trailers. Wimp out and use writev(2).
3177 if (trl_uio != NULL) {
3178 sbunlock(&so->so_snd);
3179 error = kern_writev(td, sockfd, trl_uio);
3181 sbytes += td->td_retval[0];
3186 sbunlock(&so->so_snd);
3189 * If there was no error we have to clear td->td_retval[0]
3190 * because it may have been set by writev.
3193 td->td_retval[0] = 0;
3199 vm_object_deallocate(obj);
3205 if (error == ERESTART)
3213 * Functionality only compiled in if SCTP is defined in the kernel Makefile,
3214 * otherwise all return EOPNOTSUPP.
3215 * XXX: We should make this loadable one day.
3218 sys_sctp_peeloff(td, uap)
3220 struct sctp_peeloff_args /* {
3225 #if (defined(INET) || defined(INET6)) && defined(SCTP)
3226 struct file *nfp = NULL;
3227 struct socket *head, *so;
3228 cap_rights_t rights;
3232 AUDIT_ARG_FD(uap->sd);
3233 error = fgetsock(td, uap->sd, cap_rights_init(&rights, CAP_PEELOFF),
3237 if (head->so_proto->pr_protocol != IPPROTO_SCTP) {
3241 error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
3245 * At this point we know we do have a assoc to pull
3246 * we proceed to get the fd setup. This may block
3250 error = falloc(td, &nfp, &fd, 0);
3253 td->td_retval[0] = fd;
3255 CURVNET_SET(head->so_vnet);
3256 so = sonewconn(head, SS_ISCONNECTED);
3262 * Before changing the flags on the socket, we have to bump the
3263 * reference count. Otherwise, if the protocol calls sofree(),
3264 * the socket will be released due to a zero refcount.
3267 soref(so); /* file descriptor reference */
3272 TAILQ_REMOVE(&head->so_comp, so, so_list);
3274 so->so_state |= (head->so_state & SS_NBIO);
3275 so->so_state &= ~SS_NOFDREF;
3276 so->so_qstate &= ~SQ_COMP;
3279 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
3280 error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
3283 if (head->so_sigio != NULL)
3284 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
3288 * close the new descriptor, assuming someone hasn't ripped it
3289 * out from under us.
3292 fdclose(td->td_proc->p_fd, nfp, fd, td);
3295 * Release explicitly held references before returning.
3305 return (EOPNOTSUPP);
3310 sys_sctp_generic_sendmsg (td, uap)
3312 struct sctp_generic_sendmsg_args /* {
3318 struct sctp_sndrcvinfo *sinfo,
3322 #if (defined(INET) || defined(INET6)) && defined(SCTP)
3323 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
3325 struct file *fp = NULL;
3326 struct sockaddr *to = NULL;
3328 struct uio *ktruio = NULL;
3331 struct iovec iov[1];
3332 cap_rights_t rights;
3335 if (uap->sinfo != NULL) {
3336 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
3342 cap_rights_init(&rights, CAP_SEND);
3343 if (uap->tolen != 0) {
3344 error = getsockaddr(&to, uap->to, uap->tolen);
3349 cap_rights_set(&rights, CAP_CONNECT);
3352 AUDIT_ARG_FD(uap->sd);
3353 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
3357 if (to && (KTRPOINT(td, KTR_STRUCT)))
3361 iov[0].iov_base = uap->msg;
3362 iov[0].iov_len = uap->mlen;
3364 so = (struct socket *)fp->f_data;
3365 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
3370 error = mac_socket_check_send(td->td_ucred, so);
3376 auio.uio_iovcnt = 1;
3377 auio.uio_segflg = UIO_USERSPACE;
3378 auio.uio_rw = UIO_WRITE;
3380 auio.uio_offset = 0; /* XXX */
3382 len = auio.uio_resid = uap->mlen;
3383 CURVNET_SET(so->so_vnet);
3384 error = sctp_lower_sosend(so, to, &auio, (struct mbuf *)NULL,
3385 (struct mbuf *)NULL, uap->flags, u_sinfo, td);
3388 if (auio.uio_resid != len && (error == ERESTART ||
3389 error == EINTR || error == EWOULDBLOCK))
3391 /* Generation of SIGPIPE can be controlled per socket. */
3392 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
3393 !(uap->flags & MSG_NOSIGNAL)) {
3394 PROC_LOCK(td->td_proc);
3395 tdsignal(td, SIGPIPE);
3396 PROC_UNLOCK(td->td_proc);
3400 td->td_retval[0] = len - auio.uio_resid;
3402 if (ktruio != NULL) {
3403 ktruio->uio_resid = td->td_retval[0];
3404 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
3414 return (EOPNOTSUPP);
3419 sys_sctp_generic_sendmsg_iov(td, uap)
3421 struct sctp_generic_sendmsg_iov_args /* {
3427 struct sctp_sndrcvinfo *sinfo,
3431 #if (defined(INET) || defined(INET6)) && defined(SCTP)
3432 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
3434 struct file *fp = NULL;
3435 struct sockaddr *to = NULL;
3437 struct uio *ktruio = NULL;
3440 struct iovec *iov, *tiov;
3441 cap_rights_t rights;
3445 if (uap->sinfo != NULL) {
3446 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
3451 cap_rights_init(&rights, CAP_SEND);
3452 if (uap->tolen != 0) {
3453 error = getsockaddr(&to, uap->to, uap->tolen);
3458 cap_rights_set(&rights, CAP_CONNECT);
3461 AUDIT_ARG_FD(uap->sd);
3462 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL);
3466 #ifdef COMPAT_FREEBSD32
3467 if (SV_CURPROC_FLAG(SV_ILP32))
3468 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
3469 uap->iovlen, &iov, EMSGSIZE);
3472 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
3476 if (to && (KTRPOINT(td, KTR_STRUCT)))
3480 so = (struct socket *)fp->f_data;
3481 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
3486 error = mac_socket_check_send(td->td_ucred, so);
3492 auio.uio_iovcnt = uap->iovlen;
3493 auio.uio_segflg = UIO_USERSPACE;
3494 auio.uio_rw = UIO_WRITE;
3496 auio.uio_offset = 0; /* XXX */
3499 for (i = 0; i <uap->iovlen; i++, tiov++) {
3500 if ((auio.uio_resid += tiov->iov_len) < 0) {
3505 len = auio.uio_resid;
3506 CURVNET_SET(so->so_vnet);
3507 error = sctp_lower_sosend(so, to, &auio,
3508 (struct mbuf *)NULL, (struct mbuf *)NULL,
3509 uap->flags, u_sinfo, td);
3512 if (auio.uio_resid != len && (error == ERESTART ||
3513 error == EINTR || error == EWOULDBLOCK))
3515 /* Generation of SIGPIPE can be controlled per socket */
3516 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
3517 !(uap->flags & MSG_NOSIGNAL)) {
3518 PROC_LOCK(td->td_proc);
3519 tdsignal(td, SIGPIPE);
3520 PROC_UNLOCK(td->td_proc);
3524 td->td_retval[0] = len - auio.uio_resid;
3526 if (ktruio != NULL) {
3527 ktruio->uio_resid = td->td_retval[0];
3528 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
3540 return (EOPNOTSUPP);
3545 sys_sctp_generic_recvmsg(td, uap)
3547 struct sctp_generic_recvmsg_args /* {
3551 struct sockaddr *from,
3552 __socklen_t *fromlenaddr,
3553 struct sctp_sndrcvinfo *sinfo,
3557 #if (defined(INET) || defined(INET6)) && defined(SCTP)
3558 uint8_t sockbufstore[256];
3560 struct iovec *iov, *tiov;
3561 struct sctp_sndrcvinfo sinfo;
3563 struct file *fp = NULL;
3564 struct sockaddr *fromsa;
3565 cap_rights_t rights;
3567 struct uio *ktruio = NULL;
3570 int error, fromlen, i, msg_flags;
3572 AUDIT_ARG_FD(uap->sd);
3573 error = getsock_cap(td->td_proc->p_fd, uap->sd,
3574 cap_rights_init(&rights, CAP_RECV), &fp, NULL);
3577 #ifdef COMPAT_FREEBSD32
3578 if (SV_CURPROC_FLAG(SV_ILP32))
3579 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
3580 uap->iovlen, &iov, EMSGSIZE);
3583 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
3588 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
3593 error = mac_socket_check_receive(td->td_ucred, so);
3598 if (uap->fromlenaddr != NULL) {
3599 error = copyin(uap->fromlenaddr, &fromlen, sizeof (fromlen));
3605 if (uap->msg_flags) {
3606 error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
3613 auio.uio_iovcnt = uap->iovlen;
3614 auio.uio_segflg = UIO_USERSPACE;
3615 auio.uio_rw = UIO_READ;
3617 auio.uio_offset = 0; /* XXX */
3620 for (i = 0; i <uap->iovlen; i++, tiov++) {
3621 if ((auio.uio_resid += tiov->iov_len) < 0) {
3626 len = auio.uio_resid;
3627 fromsa = (struct sockaddr *)sockbufstore;
3630 if (KTRPOINT(td, KTR_GENIO))
3631 ktruio = cloneuio(&auio);
3633 memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo));
3634 CURVNET_SET(so->so_vnet);
3635 error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
3636 fromsa, fromlen, &msg_flags,
3637 (struct sctp_sndrcvinfo *)&sinfo, 1);
3640 if (auio.uio_resid != len && (error == ERESTART ||
3641 error == EINTR || error == EWOULDBLOCK))
3645 error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
3648 if (ktruio != NULL) {
3649 ktruio->uio_resid = len - auio.uio_resid;
3650 ktrgenio(uap->sd, UIO_READ, ktruio, error);
3655 td->td_retval[0] = len - auio.uio_resid;
3657 if (fromlen && uap->from) {
3659 if (len <= 0 || fromsa == 0)
3662 len = MIN(len, fromsa->sa_len);
3663 error = copyout(fromsa, uap->from, (size_t)len);
3667 error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
3672 if (KTRPOINT(td, KTR_STRUCT))
3673 ktrsockaddr(fromsa);
3675 if (uap->msg_flags) {
3676 error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
3688 return (EOPNOTSUPP);