2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_compat.h"
40 #include "opt_ktrace.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
47 #include <sys/mutex.h>
48 #include <sys/sysproto.h>
49 #include <sys/malloc.h>
50 #include <sys/filedesc.h>
51 #include <sys/event.h>
53 #include <sys/fcntl.h>
55 #include <sys/filio.h>
56 #include <sys/mount.h>
58 #include <sys/protosw.h>
59 #include <sys/sf_buf.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/signalvar.h>
63 #include <sys/syscallsubr.h>
64 #include <sys/sysctl.h>
66 #include <sys/vnode.h>
68 #include <sys/ktrace.h>
71 #include <security/mac/mac_framework.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_extern.h>
81 #include <netinet/sctp.h>
82 #include <netinet/sctp_peeloff.h>
85 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
86 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
88 static int accept1(struct thread *td, struct accept_args *uap, int compat);
89 static int do_sendfile(struct thread *td, struct sendfile_args *uap, int compat);
90 static int getsockname1(struct thread *td, struct getsockname_args *uap,
92 static int getpeername1(struct thread *td, struct getpeername_args *uap,
96 * NSFBUFS-related variables and associated sysctls
102 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
103 "Maximum number of sendfile(2) sf_bufs available");
104 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
105 "Number of sendfile(2) sf_bufs at peak usage");
106 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
107 "Number of sendfile(2) sf_bufs in use");
110 * Convert a user file descriptor to a kernel file entry. A reference on the
111 * file entry is held upon returning. This is lighter weight than
112 * fgetsock(), which bumps the socket reference drops the file reference
113 * count instead, as this approach avoids several additional mutex operations
114 * associated with the additional reference count. If requested, return the
118 getsock(struct filedesc *fdp, int fd, struct file **fpp, u_int *fflagp)
128 fp = fget_locked(fdp, fd);
131 else if (fp->f_type != DTYPE_SOCKET) {
137 *fflagp = fp->f_flag;
140 FILEDESC_SUNLOCK(fdp);
147 * System call interface to the socket abstraction.
149 #if defined(COMPAT_43)
150 #define COMPAT_OLDSOCK
156 struct socket_args /* {
162 struct filedesc *fdp;
168 error = mac_socket_check_create(td->td_ucred, uap->domain, uap->type,
173 fdp = td->td_proc->p_fd;
174 error = falloc(td, &fp, &fd);
177 /* An extra reference on `fp' has been held for us by falloc(). */
178 error = socreate(uap->domain, &so, uap->type, uap->protocol,
181 fdclose(fdp, fp, fd, td);
184 fp->f_data = so; /* already has ref count */
185 fp->f_flag = FREAD|FWRITE;
186 fp->f_type = DTYPE_SOCKET;
187 fp->f_ops = &socketops;
189 td->td_retval[0] = fd;
199 struct bind_args /* {
208 if ((error = getsockaddr(&sa, uap->name, uap->namelen)) != 0)
211 error = kern_bind(td, uap->s, sa);
217 kern_bind(td, fd, sa)
226 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
232 error = mac_socket_check_bind(td->td_ucred, so, sa);
237 error = sobind(so, sa, td);
249 struct listen_args /* {
258 error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
263 error = mac_socket_check_listen(td->td_ucred, so);
268 error = solisten(so, uap->backlog, td);
281 accept1(td, uap, compat)
283 struct accept_args /* {
285 struct sockaddr * __restrict name;
286 socklen_t * __restrict anamelen;
290 struct sockaddr *name;
295 if (uap->name == NULL)
296 return (kern_accept(td, uap->s, NULL, NULL, NULL));
298 error = copyin(uap->anamelen, &namelen, sizeof (namelen));
302 error = kern_accept(td, uap->s, &name, &namelen, &fp);
305 * return a namelen of zero for older code which might
306 * ignore the return value from accept.
309 (void) copyout(&namelen,
310 uap->anamelen, sizeof(*uap->anamelen));
314 if (error == 0 && name != NULL) {
315 #ifdef COMPAT_OLDSOCK
317 ((struct osockaddr *)name)->sa_family =
320 error = copyout(name, uap->name, namelen);
323 error = copyout(&namelen, uap->anamelen,
326 fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
328 free(name, M_SONAME);
333 kern_accept(struct thread *td, int s, struct sockaddr **name,
334 socklen_t *namelen, struct file **fp)
336 struct filedesc *fdp;
337 struct file *headfp, *nfp = NULL;
338 struct sockaddr *sa = NULL;
340 struct socket *head, *so;
352 fdp = td->td_proc->p_fd;
353 error = getsock(fdp, s, &headfp, &fflag);
356 head = headfp->f_data;
357 if ((head->so_options & SO_ACCEPTCONN) == 0) {
363 error = mac_socket_check_accept(td->td_ucred, head);
368 error = falloc(td, &nfp, &fd);
372 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
377 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
378 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
379 head->so_error = ECONNABORTED;
382 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
389 if (head->so_error) {
390 error = head->so_error;
395 so = TAILQ_FIRST(&head->so_comp);
396 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
397 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
400 * Before changing the flags on the socket, we have to bump the
401 * reference count. Otherwise, if the protocol calls sofree(),
402 * the socket will be released due to a zero refcount.
404 SOCK_LOCK(so); /* soref() and so_state update */
405 soref(so); /* file descriptor reference */
407 TAILQ_REMOVE(&head->so_comp, so, so_list);
409 so->so_state |= (head->so_state & SS_NBIO);
410 so->so_qstate &= ~SQ_COMP;
416 /* An extra reference on `nfp' has been held for us by falloc(). */
417 td->td_retval[0] = fd;
419 /* connection has been removed from the listen queue */
420 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
422 pgid = fgetown(&head->so_sigio);
424 fsetown(pgid, &so->so_sigio);
427 nfp->f_data = so; /* nfp has ref count from falloc */
429 nfp->f_type = DTYPE_SOCKET;
430 nfp->f_ops = &socketops;
432 /* Sync socket nonblocking/async state with file flags */
433 tmp = fflag & FNONBLOCK;
434 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
435 tmp = fflag & FASYNC;
436 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
438 error = soaccept(so, &sa);
441 * return a namelen of zero for older code which might
442 * ignore the return value from accept.
454 /* check sa_len before it is destroyed */
455 if (*namelen > sa->sa_len)
456 *namelen = sa->sa_len;
465 * close the new descriptor, assuming someone hasn't ripped it
469 fdclose(fdp, nfp, fd, td);
472 * Release explicitly held references before returning. We return
473 * a reference on nfp to the caller on success if they request it.
492 struct accept_args *uap;
495 return (accept1(td, uap, 0));
498 #ifdef COMPAT_OLDSOCK
502 struct accept_args *uap;
505 return (accept1(td, uap, 1));
507 #endif /* COMPAT_OLDSOCK */
513 struct connect_args /* {
522 error = getsockaddr(&sa, uap->name, uap->namelen);
526 error = kern_connect(td, uap->s, sa);
533 kern_connect(td, fd, sa)
543 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
547 if (so->so_state & SS_ISCONNECTING) {
553 error = mac_socket_check_connect(td->td_ucred, so, sa);
558 error = soconnect(so, sa, td);
561 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
566 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
567 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
570 if (error == EINTR || error == ERESTART)
576 error = so->so_error;
582 so->so_state &= ~SS_ISCONNECTING;
583 if (error == ERESTART)
593 struct socketpair_args /* {
600 struct filedesc *fdp = td->td_proc->p_fd;
601 struct file *fp1, *fp2;
602 struct socket *so1, *so2;
603 int fd, error, sv[2];
606 /* We might want to have a separate check for socket pairs. */
607 error = mac_socket_check_create(td->td_ucred, uap->domain, uap->type,
613 error = socreate(uap->domain, &so1, uap->type, uap->protocol,
617 error = socreate(uap->domain, &so2, uap->type, uap->protocol,
621 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
622 error = falloc(td, &fp1, &fd);
626 fp1->f_data = so1; /* so1 already has ref count */
627 error = falloc(td, &fp2, &fd);
630 fp2->f_data = so2; /* so2 already has ref count */
632 error = soconnect2(so1, so2);
635 if (uap->type == SOCK_DGRAM) {
637 * Datagram socket connection is asymmetric.
639 error = soconnect2(so2, so1);
644 fp1->f_flag = FREAD|FWRITE;
645 fp1->f_type = DTYPE_SOCKET;
646 fp1->f_ops = &socketops;
649 fp2->f_flag = FREAD|FWRITE;
650 fp2->f_type = DTYPE_SOCKET;
651 fp2->f_ops = &socketops;
654 error = copyout(sv, uap->rsv, 2 * sizeof (int));
661 fdclose(fdp, fp2, sv[1], td);
664 fdclose(fdp, fp1, sv[0], td);
676 sendit(td, s, mp, flags)
682 struct mbuf *control;
686 if (mp->msg_name != NULL) {
687 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
697 if (mp->msg_control) {
698 if (mp->msg_controllen < sizeof(struct cmsghdr)
699 #ifdef COMPAT_OLDSOCK
700 && mp->msg_flags != MSG_COMPAT
706 error = sockargs(&control, mp->msg_control,
707 mp->msg_controllen, MT_CONTROL);
710 #ifdef COMPAT_OLDSOCK
711 if (mp->msg_flags == MSG_COMPAT) {
714 M_PREPEND(control, sizeof(*cm), M_TRYWAIT);
719 cm = mtod(control, struct cmsghdr *);
720 cm->cmsg_len = control->m_len;
721 cm->cmsg_level = SOL_SOCKET;
722 cm->cmsg_type = SCM_RIGHTS;
730 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
739 kern_sendit(td, s, mp, flags, control, segflg)
744 struct mbuf *control;
754 struct uio *ktruio = NULL;
757 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
760 so = (struct socket *)fp->f_data;
764 error = mac_socket_check_send(td->td_ucred, so);
770 auio.uio_iov = mp->msg_iov;
771 auio.uio_iovcnt = mp->msg_iovlen;
772 auio.uio_segflg = segflg;
773 auio.uio_rw = UIO_WRITE;
775 auio.uio_offset = 0; /* XXX */
778 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
779 if ((auio.uio_resid += iov->iov_len) < 0) {
785 if (KTRPOINT(td, KTR_GENIO))
786 ktruio = cloneuio(&auio);
788 len = auio.uio_resid;
789 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
791 if (auio.uio_resid != len && (error == ERESTART ||
792 error == EINTR || error == EWOULDBLOCK))
794 /* Generation of SIGPIPE can be controlled per socket */
795 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
796 !(flags & MSG_NOSIGNAL)) {
797 PROC_LOCK(td->td_proc);
798 psignal(td->td_proc, SIGPIPE);
799 PROC_UNLOCK(td->td_proc);
803 td->td_retval[0] = len - auio.uio_resid;
805 if (ktruio != NULL) {
806 ktruio->uio_resid = td->td_retval[0];
807 ktrgenio(s, UIO_WRITE, ktruio, error);
818 struct sendto_args /* {
831 msg.msg_name = uap->to;
832 msg.msg_namelen = uap->tolen;
836 #ifdef COMPAT_OLDSOCK
839 aiov.iov_base = uap->buf;
840 aiov.iov_len = uap->len;
841 error = sendit(td, uap->s, &msg, uap->flags);
845 #ifdef COMPAT_OLDSOCK
849 struct osend_args /* {
864 aiov.iov_base = uap->buf;
865 aiov.iov_len = uap->len;
868 error = sendit(td, uap->s, &msg, uap->flags);
875 struct osendmsg_args /* {
885 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
888 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
892 msg.msg_flags = MSG_COMPAT;
893 error = sendit(td, uap->s, &msg, uap->flags);
902 struct sendmsg_args /* {
912 error = copyin(uap->msg, &msg, sizeof (msg));
915 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
919 #ifdef COMPAT_OLDSOCK
922 error = sendit(td, uap->s, &msg, uap->flags);
928 kern_recvit(td, s, mp, fromseg, controlp)
932 enum uio_seg fromseg;
933 struct mbuf **controlp;
940 struct mbuf *m, *control = 0;
944 struct sockaddr *fromsa = 0;
946 struct uio *ktruio = NULL;
952 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
959 error = mac_socket_check_receive(td->td_ucred, so);
967 auio.uio_iov = mp->msg_iov;
968 auio.uio_iovcnt = mp->msg_iovlen;
969 auio.uio_segflg = UIO_USERSPACE;
970 auio.uio_rw = UIO_READ;
972 auio.uio_offset = 0; /* XXX */
975 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
976 if ((auio.uio_resid += iov->iov_len) < 0) {
982 if (KTRPOINT(td, KTR_GENIO))
983 ktruio = cloneuio(&auio);
985 len = auio.uio_resid;
986 error = soreceive(so, &fromsa, &auio, (struct mbuf **)0,
987 (mp->msg_control || controlp) ? &control : (struct mbuf **)0,
990 if (auio.uio_resid != (int)len && (error == ERESTART ||
991 error == EINTR || error == EWOULDBLOCK))
995 if (ktruio != NULL) {
996 ktruio->uio_resid = (int)len - auio.uio_resid;
997 ktrgenio(s, UIO_READ, ktruio, error);
1002 td->td_retval[0] = (int)len - auio.uio_resid;
1004 len = mp->msg_namelen;
1005 if (len <= 0 || fromsa == 0)
1008 /* save sa_len before it is destroyed by MSG_COMPAT */
1009 len = MIN(len, fromsa->sa_len);
1010 #ifdef COMPAT_OLDSOCK
1011 if (mp->msg_flags & MSG_COMPAT)
1012 ((struct osockaddr *)fromsa)->sa_family =
1015 if (fromseg == UIO_USERSPACE) {
1016 error = copyout(fromsa, mp->msg_name,
1021 bcopy(fromsa, mp->msg_name, len);
1023 mp->msg_namelen = len;
1025 if (mp->msg_control && controlp == NULL) {
1026 #ifdef COMPAT_OLDSOCK
1028 * We assume that old recvmsg calls won't receive access
1029 * rights and other control info, esp. as control info
1030 * is always optional and those options didn't exist in 4.3.
1031 * If we receive rights, trim the cmsghdr; anything else
1034 if (control && mp->msg_flags & MSG_COMPAT) {
1035 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1037 mtod(control, struct cmsghdr *)->cmsg_type !=
1039 mp->msg_controllen = 0;
1042 control->m_len -= sizeof (struct cmsghdr);
1043 control->m_data += sizeof (struct cmsghdr);
1046 len = mp->msg_controllen;
1048 mp->msg_controllen = 0;
1049 ctlbuf = mp->msg_control;
1051 while (m && len > 0) {
1052 unsigned int tocopy;
1054 if (len >= m->m_len)
1057 mp->msg_flags |= MSG_CTRUNC;
1061 if ((error = copyout(mtod(m, caddr_t),
1062 ctlbuf, tocopy)) != 0)
1069 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1074 FREE(fromsa, M_SONAME);
1076 if (error == 0 && controlp != NULL)
1077 *controlp = control;
1085 recvit(td, s, mp, namelenp)
1093 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1097 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1098 #ifdef COMPAT_OLDSOCK
1099 if (mp->msg_flags & MSG_COMPAT)
1100 error = 0; /* old recvfrom didn't check */
1109 struct recvfrom_args /* {
1114 struct sockaddr * __restrict from;
1115 socklen_t * __restrict fromlenaddr;
1122 if (uap->fromlenaddr) {
1123 error = copyin(uap->fromlenaddr,
1124 &msg.msg_namelen, sizeof (msg.msg_namelen));
1128 msg.msg_namelen = 0;
1130 msg.msg_name = uap->from;
1131 msg.msg_iov = &aiov;
1133 aiov.iov_base = uap->buf;
1134 aiov.iov_len = uap->len;
1135 msg.msg_control = 0;
1136 msg.msg_flags = uap->flags;
1137 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1142 #ifdef COMPAT_OLDSOCK
1146 struct recvfrom_args *uap;
1149 uap->flags |= MSG_COMPAT;
1150 return (recvfrom(td, uap));
1154 #ifdef COMPAT_OLDSOCK
1158 struct orecv_args /* {
1170 msg.msg_namelen = 0;
1171 msg.msg_iov = &aiov;
1173 aiov.iov_base = uap->buf;
1174 aiov.iov_len = uap->len;
1175 msg.msg_control = 0;
1176 msg.msg_flags = uap->flags;
1177 error = recvit(td, uap->s, &msg, NULL);
1182 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1183 * overlays the new one, missing only the flags, and with the (old) access
1184 * rights where the control fields are now.
1189 struct orecvmsg_args /* {
1191 struct omsghdr *msg;
1199 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1202 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1205 msg.msg_flags = uap->flags | MSG_COMPAT;
1207 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1208 if (msg.msg_controllen && error == 0)
1209 error = copyout(&msg.msg_controllen,
1210 &uap->msg->msg_accrightslen, sizeof (int));
1219 struct recvmsg_args /* {
1226 struct iovec *uiov, *iov;
1229 error = copyin(uap->msg, &msg, sizeof (msg));
1232 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1235 msg.msg_flags = uap->flags;
1236 #ifdef COMPAT_OLDSOCK
1237 msg.msg_flags &= ~MSG_COMPAT;
1241 error = recvit(td, uap->s, &msg, NULL);
1244 error = copyout(&msg, uap->msg, sizeof(msg));
1254 struct shutdown_args /* {
1263 error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
1266 error = soshutdown(so, uap->how);
1276 struct setsockopt_args /* {
1285 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1286 uap->val, UIO_USERSPACE, uap->valsize));
1290 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1296 enum uio_seg valseg;
1302 struct sockopt sopt;
1304 if (val == NULL && valsize != 0)
1306 if ((int)valsize < 0)
1309 sopt.sopt_dir = SOPT_SET;
1310 sopt.sopt_level = level;
1311 sopt.sopt_name = name;
1312 sopt.sopt_val = val;
1313 sopt.sopt_valsize = valsize;
1319 sopt.sopt_td = NULL;
1322 panic("kern_setsockopt called with bad valseg");
1325 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
1328 error = sosetopt(so, &sopt);
1338 struct getsockopt_args /* {
1342 void * __restrict val;
1343 socklen_t * __restrict avalsize;
1350 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1355 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1356 uap->val, UIO_USERSPACE, &valsize);
1359 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1364 * Kernel version of getsockopt.
1365 * optval can be a userland or userspace. optlen is always a kernel pointer.
1368 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1374 enum uio_seg valseg;
1380 struct sockopt sopt;
1384 if ((int)*valsize < 0)
1387 sopt.sopt_dir = SOPT_GET;
1388 sopt.sopt_level = level;
1389 sopt.sopt_name = name;
1390 sopt.sopt_val = val;
1391 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1397 sopt.sopt_td = NULL;
1400 panic("kern_getsockopt called with bad valseg");
1403 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
1406 error = sogetopt(so, &sopt);
1407 *valsize = sopt.sopt_valsize;
1414 * getsockname1() - Get socket name.
1418 getsockname1(td, uap, compat)
1420 struct getsockname_args /* {
1422 struct sockaddr * __restrict asa;
1423 socklen_t * __restrict alen;
1427 struct sockaddr *sa;
1431 error = copyin(uap->alen, &len, sizeof(len));
1435 error = kern_getsockname(td, uap->fdes, &sa, &len);
1440 #ifdef COMPAT_OLDSOCK
1442 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1444 error = copyout(sa, uap->asa, (u_int)len);
1448 error = copyout(&len, uap->alen, sizeof(len));
1453 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1464 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
1469 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1475 len = MIN(*alen, (*sa)->sa_len);
1480 free(*sa, M_SONAME);
1487 getsockname(td, uap)
1489 struct getsockname_args *uap;
1492 return (getsockname1(td, uap, 0));
1495 #ifdef COMPAT_OLDSOCK
1497 ogetsockname(td, uap)
1499 struct getsockname_args *uap;
1502 return (getsockname1(td, uap, 1));
1504 #endif /* COMPAT_OLDSOCK */
1507 * getpeername1() - Get name of peer for connected socket.
1511 getpeername1(td, uap, compat)
1513 struct getpeername_args /* {
1515 struct sockaddr * __restrict asa;
1516 socklen_t * __restrict alen;
1520 struct sockaddr *sa;
1524 error = copyin(uap->alen, &len, sizeof (len));
1528 error = kern_getpeername(td, uap->fdes, &sa, &len);
1533 #ifdef COMPAT_OLDSOCK
1535 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1537 error = copyout(sa, uap->asa, (u_int)len);
1541 error = copyout(&len, uap->alen, sizeof(len));
1546 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1557 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
1561 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1566 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1572 len = MIN(*alen, (*sa)->sa_len);
1576 free(*sa, M_SONAME);
1585 getpeername(td, uap)
1587 struct getpeername_args *uap;
1590 return (getpeername1(td, uap, 0));
1593 #ifdef COMPAT_OLDSOCK
1595 ogetpeername(td, uap)
1597 struct ogetpeername_args *uap;
1600 /* XXX uap should have type `getpeername_args *' to begin with. */
1601 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1603 #endif /* COMPAT_OLDSOCK */
1606 sockargs(mp, buf, buflen, type)
1611 struct sockaddr *sa;
1615 if ((u_int)buflen > MLEN) {
1616 #ifdef COMPAT_OLDSOCK
1617 if (type == MT_SONAME && (u_int)buflen <= 112)
1618 buflen = MLEN; /* unix domain compat. hack */
1621 if ((u_int)buflen > MCLBYTES)
1624 m = m_get(M_TRYWAIT, type);
1627 if ((u_int)buflen > MLEN) {
1628 MCLGET(m, M_TRYWAIT);
1629 if ((m->m_flags & M_EXT) == 0) {
1635 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1640 if (type == MT_SONAME) {
1641 sa = mtod(m, struct sockaddr *);
1643 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1644 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1645 sa->sa_family = sa->sa_len;
1647 sa->sa_len = buflen;
1654 getsockaddr(namp, uaddr, len)
1655 struct sockaddr **namp;
1659 struct sockaddr *sa;
1662 if (len > SOCK_MAXADDRLEN)
1663 return (ENAMETOOLONG);
1664 if (len < offsetof(struct sockaddr, sa_data[0]))
1666 MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1667 error = copyin(uaddr, sa, len);
1671 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1672 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1673 sa->sa_family = sa->sa_len;
1682 * Detach mapped page and release resources back to the system.
1685 sf_buf_mext(void *addr, void *args)
1689 m = sf_buf_page(args);
1691 vm_page_lock_queues();
1692 vm_page_unwire(m, 0);
1694 * Check for the object going away on us. This can
1695 * happen since we don't hold a reference to it.
1696 * If so, we're responsible for freeing the page.
1698 if (m->wire_count == 0 && m->object == NULL)
1700 vm_page_unlock_queues();
1706 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1707 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1709 * Send a file specified by 'fd' and starting at 'offset' to a socket
1710 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1711 * 0. Optionally add a header and/or trailer to the socket output. If
1712 * specified, write the total number of bytes sent into *sbytes.
1715 sendfile(struct thread *td, struct sendfile_args *uap)
1718 return (do_sendfile(td, uap, 0));
1722 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1724 struct sf_hdtr hdtr;
1725 struct uio *hdr_uio, *trl_uio;
1728 hdr_uio = trl_uio = NULL;
1730 if (uap->hdtr != NULL) {
1731 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1734 if (hdtr.headers != NULL) {
1735 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
1739 if (hdtr.trailers != NULL) {
1740 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
1747 error = kern_sendfile(td, uap, hdr_uio, trl_uio, compat);
1750 free(hdr_uio, M_IOV);
1752 free(trl_uio, M_IOV);
1756 #ifdef COMPAT_FREEBSD4
1758 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1760 struct sendfile_args args;
1764 args.offset = uap->offset;
1765 args.nbytes = uap->nbytes;
1766 args.hdtr = uap->hdtr;
1767 args.sbytes = uap->sbytes;
1768 args.flags = uap->flags;
1770 return (do_sendfile(td, &args, 1));
1772 #endif /* COMPAT_FREEBSD4 */
1775 kern_sendfile(struct thread *td, struct sendfile_args *uap,
1776 struct uio *hdr_uio, struct uio *trl_uio, int compat)
1778 struct file *sock_fp;
1780 struct vm_object *obj = NULL;
1781 struct socket *so = NULL;
1782 struct mbuf *m = NULL;
1785 off_t off, xfsize, fsbytes = 0, sbytes = 0, rem = 0;
1786 int error, hdrlen = 0, mnw = 0;
1790 * The file descriptor must be a regular file and have a
1791 * backing VM object.
1792 * File offset must be positive. If it goes beyond EOF
1793 * we send only the header/trailer and no payload data.
1795 if ((error = fgetvp_read(td, uap->fd, &vp)) != 0)
1797 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1798 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1802 * Temporarily increase the backing VM object's reference
1803 * count so that a forced reclamation of its vnode does not
1804 * immediately destroy it.
1806 VM_OBJECT_LOCK(obj);
1807 if ((obj->flags & OBJ_DEAD) == 0) {
1808 vm_object_reference_locked(obj);
1809 VM_OBJECT_UNLOCK(obj);
1811 VM_OBJECT_UNLOCK(obj);
1815 VOP_UNLOCK(vp, 0, td);
1816 VFS_UNLOCK_GIANT(vfslocked);
1821 if (uap->offset < 0) {
1827 * The socket must be a stream socket and connected.
1828 * Remember if it a blocking or non-blocking socket.
1830 if ((error = getsock(td->td_proc->p_fd, uap->s, &sock_fp,
1833 so = sock_fp->f_data;
1834 if (so->so_type != SOCK_STREAM) {
1838 if ((so->so_state & SS_ISCONNECTED) == 0) {
1843 * Do not wait on memory allocations but return ENOMEM for
1844 * caller to retry later.
1845 * XXX: Experimental.
1847 if (uap->flags & SF_MNOWAIT)
1852 error = mac_socket_check_send(td->td_ucred, so);
1858 /* If headers are specified copy them into mbufs. */
1859 if (hdr_uio != NULL) {
1860 hdr_uio->uio_td = td;
1861 hdr_uio->uio_rw = UIO_WRITE;
1862 if (hdr_uio->uio_resid > 0) {
1864 * In FBSD < 5.0 the nbytes to send also included
1865 * the header. If compat is specified subtract the
1866 * header size from nbytes.
1869 if (uap->nbytes > hdr_uio->uio_resid)
1870 uap->nbytes -= hdr_uio->uio_resid;
1874 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
1877 error = mnw ? EAGAIN : ENOBUFS;
1880 hdrlen = m_length(m, NULL);
1884 /* Protect against multiple writers to the socket. */
1885 (void) sblock(&so->so_snd, M_WAITOK);
1888 * Loop through the pages of the file, starting with the requested
1889 * offset. Get a file page (do I/O if necessary), map the file page
1890 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1892 * This is done in two loops. The inner loop turns as many pages
1893 * as it can, up to available socket buffer space, without blocking
1894 * into mbufs to have it bulk delivered into the socket send buffer.
1895 * The outer loop checks the state and available space of the socket
1896 * and takes care of the overall progress.
1898 for (off = uap->offset, rem = uap->nbytes; ; ) {
1904 * Check the socket state for ongoing connection,
1905 * no errors and space in socket buffer.
1906 * If space is low allow for the remainder of the
1907 * file to be processed if it fits the socket buffer.
1908 * Otherwise block in waiting for sufficient space
1909 * to proceed, or if the socket is nonblocking, return
1910 * to userland with EAGAIN while reporting how far
1912 * We wait until the socket buffer has significant free
1913 * space to do bulk sends. This makes good use of file
1914 * system read ahead and allows packet segmentation
1915 * offloading hardware to take over lots of work. If
1916 * we were not careful here we would send off only one
1919 SOCKBUF_LOCK(&so->so_snd);
1920 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
1921 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
1923 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1925 SOCKBUF_UNLOCK(&so->so_snd);
1927 } else if (so->so_error) {
1928 error = so->so_error;
1930 SOCKBUF_UNLOCK(&so->so_snd);
1933 space = sbspace(&so->so_snd);
1936 space < so->so_snd.sb_lowat)) {
1937 if (so->so_state & SS_NBIO) {
1938 SOCKBUF_UNLOCK(&so->so_snd);
1943 * sbwait drops the lock while sleeping.
1944 * When we loop back to retry_space the
1945 * state may have changed and we retest
1948 error = sbwait(&so->so_snd);
1950 * An error from sbwait usually indicates that we've
1951 * been interrupted by a signal. If we've sent anything
1952 * then return bytes sent, otherwise return the error.
1955 SOCKBUF_UNLOCK(&so->so_snd);
1960 SOCKBUF_UNLOCK(&so->so_snd);
1963 * Reduce space in the socket buffer by the size of
1964 * the header mbuf chain.
1965 * hdrlen is set to 0 after the first loop.
1970 * Loop and construct maximum sized mbuf chain to be bulk
1971 * dumped into socket buffer.
1973 while(space > loopbytes) {
1978 VM_OBJECT_LOCK(obj);
1980 * Calculate the amount to transfer.
1981 * Not to exceed a page, the EOF,
1982 * or the passed in nbytes.
1984 pgoff = (vm_offset_t)(off & PAGE_MASK);
1985 xfsize = omin(PAGE_SIZE - pgoff,
1986 obj->un_pager.vnp.vnp_size - uap->offset -
1987 fsbytes - loopbytes);
1989 rem = (uap->nbytes - fsbytes - loopbytes);
1991 rem = obj->un_pager.vnp.vnp_size -
1992 uap->offset - fsbytes - loopbytes;
1993 xfsize = omin(rem, xfsize);
1995 VM_OBJECT_UNLOCK(obj);
1996 done = 1; /* all data sent */
2000 * Don't overflow the send buffer.
2001 * Stop here and send out what we've
2004 if (space < loopbytes + xfsize) {
2005 VM_OBJECT_UNLOCK(obj);
2010 * Attempt to look up the page. Allocate
2011 * if not found or wait and loop if busy.
2013 pindex = OFF_TO_IDX(off);
2014 pg = vm_page_grab(obj, pindex, VM_ALLOC_NOBUSY |
2015 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_RETRY);
2018 * Check if page is valid for what we need,
2019 * otherwise initiate I/O.
2020 * If we already turned some pages into mbufs,
2021 * send them off before we come here again and
2024 if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
2025 VM_OBJECT_UNLOCK(obj);
2027 error = EAGAIN; /* send what we already got */
2028 else if (uap->flags & SF_NODISKIO)
2034 * Ensure that our page is still around
2035 * when the I/O completes.
2037 vm_page_io_start(pg);
2038 VM_OBJECT_UNLOCK(obj);
2041 * Get the page from backing store.
2043 bsize = vp->v_mount->mnt_stat.f_iosize;
2044 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2045 vn_lock(vp, LK_SHARED | LK_RETRY, td);
2048 * XXXMAC: Because we don't have fp->f_cred
2049 * here, we pass in NOCRED. This is probably
2050 * wrong, but is consistent with our original
2053 error = vn_rdwr(UIO_READ, vp, NULL, MAXBSIZE,
2054 trunc_page(off), UIO_NOCOPY, IO_NODELOCKED |
2055 IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT),
2056 td->td_ucred, NOCRED, &resid, td);
2057 VOP_UNLOCK(vp, 0, td);
2058 VFS_UNLOCK_GIANT(vfslocked);
2059 VM_OBJECT_LOCK(obj);
2060 vm_page_io_finish(pg);
2062 VM_OBJECT_UNLOCK(obj);
2066 vm_page_lock_queues();
2067 vm_page_unwire(pg, 0);
2069 * See if anyone else might know about
2070 * this page. If not and it is not valid,
2073 if (pg->wire_count == 0 && pg->valid == 0 &&
2074 pg->busy == 0 && !(pg->oflags & VPO_BUSY) &&
2075 pg->hold_count == 0) {
2078 vm_page_unlock_queues();
2079 VM_OBJECT_UNLOCK(obj);
2080 if (error == EAGAIN)
2081 error = 0; /* not a real error */
2086 * Get a sendfile buf. We usually wait as long
2087 * as necessary, but this wait can be interrupted.
2089 if ((sf = sf_buf_alloc(pg,
2090 (mnw ? SFB_NOWAIT : SFB_CATCH))) == NULL) {
2091 mbstat.sf_allocfail++;
2092 vm_page_lock_queues();
2093 vm_page_unwire(pg, 0);
2095 * XXX: Not same check as above!?
2097 if (pg->wire_count == 0 && pg->object == NULL)
2099 vm_page_unlock_queues();
2100 error = (mnw ? EAGAIN : EINTR);
2105 * Get an mbuf and set it up as having
2108 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
2110 error = (mnw ? EAGAIN : ENOBUFS);
2111 sf_buf_mext((void *)sf_buf_kva(sf), sf);
2114 MEXTADD(m0, sf_buf_kva(sf), PAGE_SIZE, sf_buf_mext,
2115 sf, M_RDONLY, EXT_SFBUF);
2116 m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
2119 /* Append to mbuf chain. */
2125 /* Keep track of bits processed. */
2126 loopbytes += xfsize;
2130 /* Add the buffer chain to the socket buffer. */
2134 mlen = m_length(m, NULL);
2135 SOCKBUF_LOCK(&so->so_snd);
2136 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2138 SOCKBUF_UNLOCK(&so->so_snd);
2141 SOCKBUF_UNLOCK(&so->so_snd);
2142 /* Avoid error aliasing. */
2143 err = (*so->so_proto->pr_usrreqs->pru_send)
2144 (so, 0, m, NULL, NULL, td);
2147 * We need two counters to get the
2148 * file offset and nbytes to send
2150 * - sbytes contains the total amount
2151 * of bytes sent, including headers.
2152 * - fsbytes contains the total amount
2153 * of bytes sent from the file.
2161 } else if (error == 0)
2163 m = NULL; /* pru_send always consumes */
2166 /* Quit outer loop on error or when we're done. */
2172 * Send trailers. Wimp out and use writev(2).
2174 if (trl_uio != NULL) {
2175 error = kern_writev(td, uap->s, trl_uio);
2178 sbytes += td->td_retval[0];
2182 sbunlock(&so->so_snd);
2185 * If there was no error we have to clear td->td_retval[0]
2186 * because it may have been set by writev.
2189 td->td_retval[0] = 0;
2191 if (uap->sbytes != NULL) {
2192 copyout(&sbytes, uap->sbytes, sizeof(off_t));
2195 vm_object_deallocate(obj);
2197 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2199 VFS_UNLOCK_GIANT(vfslocked);
2206 if (error == ERESTART)
2214 * Functionality only compiled in if SCTP is defined in the kernel Makefile,
2215 * otherwise all return EOPNOTSUPP.
2216 * XXX: We should make this loadable one day.
2219 sctp_peeloff(td, uap)
2221 struct sctp_peeloff_args /* {
2227 struct filedesc *fdp;
2228 struct file *nfp = NULL;
2230 struct socket *head, *so;
2234 fdp = td->td_proc->p_fd;
2235 error = fgetsock(td, uap->sd, &head, &fflag);
2238 error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
2242 * At this point we know we do have a assoc to pull
2243 * we proceed to get the fd setup. This may block
2247 error = falloc(td, &nfp, &fd);
2250 td->td_retval[0] = fd;
2252 so = sonewconn(head, SS_ISCONNECTED);
2256 * Before changing the flags on the socket, we have to bump the
2257 * reference count. Otherwise, if the protocol calls sofree(),
2258 * the socket will be released due to a zero refcount.
2261 soref(so); /* file descriptor reference */
2266 TAILQ_REMOVE(&head->so_comp, so, so_list);
2268 so->so_state |= (head->so_state & SS_NBIO);
2269 so->so_state &= ~SS_NOFDREF;
2270 so->so_qstate &= ~SQ_COMP;
2275 nfp->f_flag = fflag;
2276 nfp->f_type = DTYPE_SOCKET;
2277 nfp->f_ops = &socketops;
2279 error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
2282 if (head->so_sigio != NULL)
2283 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
2287 * close the new descriptor, assuming someone hasn't ripped it
2288 * out from under us.
2291 fdclose(fdp, nfp, fd, td);
2294 * Release explicitly held references before returning.
2303 return (EOPNOTSUPP);
2308 sctp_generic_sendmsg (td, uap)
2310 struct sctp_generic_sendmsg_args /* {
2316 struct sctp_sndrcvinfo *sinfo,
2321 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2323 struct file *fp = NULL;
2324 int use_rcvinfo = 1;
2326 struct sockaddr *to = NULL;
2328 struct uio *ktruio = NULL;
2331 struct iovec iov[1];
2334 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2340 error = getsockaddr(&to, uap->to, uap->tolen);
2347 error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
2351 iov[0].iov_base = uap->msg;
2352 iov[0].iov_len = uap->mlen;
2354 so = (struct socket *)fp->f_data;
2357 error = mac_socket_check_send(td->td_ucred, so);
2364 auio.uio_iovcnt = 1;
2365 auio.uio_segflg = UIO_USERSPACE;
2366 auio.uio_rw = UIO_WRITE;
2368 auio.uio_offset = 0; /* XXX */
2370 len = auio.uio_resid = uap->mlen;
2371 error = sctp_lower_sosend(so, to, &auio,
2372 (struct mbuf *)NULL, (struct mbuf *)NULL,
2373 uap->flags, use_rcvinfo, u_sinfo, td);
2375 if (auio.uio_resid != len && (error == ERESTART ||
2376 error == EINTR || error == EWOULDBLOCK))
2378 /* Generation of SIGPIPE can be controlled per socket. */
2379 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2380 !(uap->flags & MSG_NOSIGNAL)) {
2381 PROC_LOCK(td->td_proc);
2382 psignal(td->td_proc, SIGPIPE);
2383 PROC_UNLOCK(td->td_proc);
2387 td->td_retval[0] = len - auio.uio_resid;
2389 if (ktruio != NULL) {
2390 ktruio->uio_resid = td->td_retval[0];
2391 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2402 return (EOPNOTSUPP);
2407 sctp_generic_sendmsg_iov(td, uap)
2409 struct sctp_generic_sendmsg_iov_args /* {
2415 struct sctp_sndrcvinfo *sinfo,
2420 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2422 struct file *fp = NULL;
2423 int use_rcvinfo = 1;
2424 int error=0, len, i;
2425 struct sockaddr *to = NULL;
2427 struct uio *ktruio = NULL;
2430 struct iovec *iov, *tiov;
2433 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2439 error = getsockaddr(&to, uap->to, uap->tolen);
2446 error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
2450 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2454 so = (struct socket *)fp->f_data;
2457 error = mac_socket_check_send(td->td_ucred, so);
2464 auio.uio_iovcnt = uap->iovlen;
2465 auio.uio_segflg = UIO_USERSPACE;
2466 auio.uio_rw = UIO_WRITE;
2468 auio.uio_offset = 0; /* XXX */
2471 for (i = 0; i <uap->iovlen; i++, tiov++) {
2472 if ((auio.uio_resid += tiov->iov_len) < 0) {
2477 len = auio.uio_resid;
2478 error = sctp_lower_sosend(so, to, &auio,
2479 (struct mbuf *)NULL, (struct mbuf *)NULL,
2480 uap->flags, use_rcvinfo, u_sinfo, td);
2482 if (auio.uio_resid != len && (error == ERESTART ||
2483 error == EINTR || error == EWOULDBLOCK))
2485 /* Generation of SIGPIPE can be controlled per socket */
2486 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2487 !(uap->flags & MSG_NOSIGNAL)) {
2488 PROC_LOCK(td->td_proc);
2489 psignal(td->td_proc, SIGPIPE);
2490 PROC_UNLOCK(td->td_proc);
2494 td->td_retval[0] = len - auio.uio_resid;
2496 if (ktruio != NULL) {
2497 ktruio->uio_resid = td->td_retval[0];
2498 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2511 return (EOPNOTSUPP);
2516 sctp_generic_recvmsg(td, uap)
2518 struct sctp_generic_recvmsg_args /* {
2522 struct sockaddr *from,
2523 __socklen_t *fromlenaddr,
2524 struct sctp_sndrcvinfo *sinfo,
2529 u_int8_t sockbufstore[256];
2531 struct iovec *iov, *tiov;
2532 struct sctp_sndrcvinfo sinfo;
2534 struct file *fp = NULL;
2535 struct sockaddr *fromsa;
2537 int len, i, msg_flags;
2540 struct uio *ktruio = NULL;
2542 error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
2546 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2554 error = mac_socket_check_receive(td->td_ucred, so);
2562 if (uap->fromlenaddr) {
2563 error = copyin(uap->fromlenaddr,
2564 &fromlen, sizeof (fromlen));
2571 if(uap->msg_flags) {
2572 error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
2580 auio.uio_iovcnt = uap->iovlen;
2581 auio.uio_segflg = UIO_USERSPACE;
2582 auio.uio_rw = UIO_READ;
2584 auio.uio_offset = 0; /* XXX */
2587 for (i = 0; i <uap->iovlen; i++, tiov++) {
2588 if ((auio.uio_resid += tiov->iov_len) < 0) {
2593 len = auio.uio_resid;
2594 fromsa = (struct sockaddr *)sockbufstore;
2597 if (KTRPOINT(td, KTR_GENIO))
2598 ktruio = cloneuio(&auio);
2600 error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
2601 fromsa, fromlen, &msg_flags,
2602 (struct sctp_sndrcvinfo *)&sinfo, 1);
2604 if (auio.uio_resid != (int)len && (error == ERESTART ||
2605 error == EINTR || error == EWOULDBLOCK))
2609 error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
2612 if (ktruio != NULL) {
2613 ktruio->uio_resid = (int)len - auio.uio_resid;
2614 ktrgenio(uap->sd, UIO_READ, ktruio, error);
2619 td->td_retval[0] = (int)len - auio.uio_resid;
2621 if (fromlen && uap->from) {
2623 if (len <= 0 || fromsa == 0)
2626 len = MIN(len, fromsa->sa_len);
2627 error = copyout(fromsa, uap->from, (unsigned)len);
2631 error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
2636 if (uap->msg_flags) {
2637 error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
2650 return (EOPNOTSUPP);