2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_capsicum.h"
40 #include "opt_inet6.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/capability.h>
48 #include <sys/kernel.h>
50 #include <sys/mutex.h>
51 #include <sys/sysproto.h>
52 #include <sys/malloc.h>
53 #include <sys/filedesc.h>
54 #include <sys/event.h>
56 #include <sys/fcntl.h>
58 #include <sys/filio.h>
60 #include <sys/mount.h>
62 #include <sys/protosw.h>
63 #include <sys/sf_buf.h>
64 #include <sys/sysent.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/signalvar.h>
68 #include <sys/syscallsubr.h>
69 #include <sys/sysctl.h>
71 #include <sys/vnode.h>
73 #include <sys/ktrace.h>
75 #ifdef COMPAT_FREEBSD32
76 #include <compat/freebsd32/freebsd32_util.h>
81 #include <security/audit/audit.h>
82 #include <security/mac/mac_framework.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
92 #if defined(INET) || defined(INET6)
94 #include <netinet/sctp.h>
95 #include <netinet/sctp_peeloff.h>
97 #endif /* INET || INET6 */
99 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
100 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
102 static int accept1(struct thread *td, struct accept_args *uap, int compat);
103 static int do_sendfile(struct thread *td, struct sendfile_args *uap, int compat);
104 static int getsockname1(struct thread *td, struct getsockname_args *uap,
106 static int getpeername1(struct thread *td, struct getpeername_args *uap,
110 * NSFBUFS-related variables and associated sysctls
116 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
117 "Maximum number of sendfile(2) sf_bufs available");
118 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
119 "Number of sendfile(2) sf_bufs at peak usage");
120 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
121 "Number of sendfile(2) sf_bufs in use");
124 * Convert a user file descriptor to a kernel file entry and check that, if
125 * it is a capability, the right rights are present. A reference on the file
126 * entry is held upon returning.
129 getsock_cap(struct filedesc *fdp, int fd, cap_rights_t rights,
130 struct file **fpp, u_int *fflagp)
134 struct file *fp_fromcap;
139 if ((fdp == NULL) || ((fp = fget_unlocked(fdp, fd)) == NULL))
143 * If the file descriptor is for a capability, test rights and use
144 * the file descriptor referenced by the capability.
146 error = cap_funwrap(fp, rights, &fp_fromcap);
148 fdrop(fp, curthread);
151 if (fp != fp_fromcap) {
153 fdrop(fp, curthread);
156 #endif /* CAPABILITIES */
157 if (fp->f_type != DTYPE_SOCKET) {
158 fdrop(fp, curthread);
162 *fflagp = fp->f_flag;
168 * System call interface to the socket abstraction.
170 #if defined(COMPAT_43)
171 #define COMPAT_OLDSOCK
177 struct socket_args /* {
183 struct filedesc *fdp;
188 AUDIT_ARG_SOCKET(uap->domain, uap->type, uap->protocol);
190 error = mac_socket_check_create(td->td_ucred, uap->domain, uap->type,
195 fdp = td->td_proc->p_fd;
196 error = falloc(td, &fp, &fd, 0);
199 /* An extra reference on `fp' has been held for us by falloc(). */
200 error = socreate(uap->domain, &so, uap->type, uap->protocol,
203 fdclose(fdp, fp, fd, td);
205 finit(fp, FREAD | FWRITE, DTYPE_SOCKET, so, &socketops);
206 td->td_retval[0] = fd;
216 struct bind_args /* {
225 if ((error = getsockaddr(&sa, uap->name, uap->namelen)) != 0)
228 error = kern_bind(td, uap->s, sa);
234 kern_bind(td, fd, sa)
244 error = getsock_cap(td->td_proc->p_fd, fd, CAP_BIND, &fp, NULL);
249 if (KTRPOINT(td, KTR_STRUCT))
253 error = mac_socket_check_bind(td->td_ucred, so, sa);
256 error = sobind(so, sa, td);
265 struct listen_args /* {
274 AUDIT_ARG_FD(uap->s);
275 error = getsock_cap(td->td_proc->p_fd, uap->s, CAP_LISTEN, &fp, NULL);
279 error = mac_socket_check_listen(td->td_ucred, so);
282 error = solisten(so, uap->backlog, td);
292 accept1(td, uap, compat)
294 struct accept_args /* {
296 struct sockaddr * __restrict name;
297 socklen_t * __restrict anamelen;
301 struct sockaddr *name;
306 if (uap->name == NULL)
307 return (kern_accept(td, uap->s, NULL, NULL, NULL));
309 error = copyin(uap->anamelen, &namelen, sizeof (namelen));
313 error = kern_accept(td, uap->s, &name, &namelen, &fp);
316 * return a namelen of zero for older code which might
317 * ignore the return value from accept.
320 (void) copyout(&namelen,
321 uap->anamelen, sizeof(*uap->anamelen));
325 if (error == 0 && name != NULL) {
326 #ifdef COMPAT_OLDSOCK
328 ((struct osockaddr *)name)->sa_family =
331 error = copyout(name, uap->name, namelen);
334 error = copyout(&namelen, uap->anamelen,
337 fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
339 free(name, M_SONAME);
344 kern_accept(struct thread *td, int s, struct sockaddr **name,
345 socklen_t *namelen, struct file **fp)
347 struct filedesc *fdp;
348 struct file *headfp, *nfp = NULL;
349 struct sockaddr *sa = NULL;
351 struct socket *head, *so;
364 fdp = td->td_proc->p_fd;
365 error = getsock_cap(fdp, s, CAP_ACCEPT, &headfp, &fflag);
368 head = headfp->f_data;
369 if ((head->so_options & SO_ACCEPTCONN) == 0) {
374 error = mac_socket_check_accept(td->td_ucred, head);
378 error = falloc(td, &nfp, &fd, 0);
382 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
387 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
388 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
389 head->so_error = ECONNABORTED;
392 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
399 if (head->so_error) {
400 error = head->so_error;
405 so = TAILQ_FIRST(&head->so_comp);
406 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
407 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
410 * Before changing the flags on the socket, we have to bump the
411 * reference count. Otherwise, if the protocol calls sofree(),
412 * the socket will be released due to a zero refcount.
414 SOCK_LOCK(so); /* soref() and so_state update */
415 soref(so); /* file descriptor reference */
417 TAILQ_REMOVE(&head->so_comp, so, so_list);
419 so->so_state |= (head->so_state & SS_NBIO);
420 so->so_qstate &= ~SQ_COMP;
426 /* An extra reference on `nfp' has been held for us by falloc(). */
427 td->td_retval[0] = fd;
429 /* connection has been removed from the listen queue */
430 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
432 pgid = fgetown(&head->so_sigio);
434 fsetown(pgid, &so->so_sigio);
436 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
437 /* Sync socket nonblocking/async state with file flags */
438 tmp = fflag & FNONBLOCK;
439 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
440 tmp = fflag & FASYNC;
441 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
443 error = soaccept(so, &sa);
446 * return a namelen of zero for older code which might
447 * ignore the return value from accept.
459 /* check sa_len before it is destroyed */
460 if (*namelen > sa->sa_len)
461 *namelen = sa->sa_len;
463 if (KTRPOINT(td, KTR_STRUCT))
474 * close the new descriptor, assuming someone hasn't ripped it
478 fdclose(fdp, nfp, fd, td);
481 * Release explicitly held references before returning. We return
482 * a reference on nfp to the caller on success if they request it.
501 struct accept_args *uap;
504 return (accept1(td, uap, 0));
507 #ifdef COMPAT_OLDSOCK
511 struct accept_args *uap;
514 return (accept1(td, uap, 1));
516 #endif /* COMPAT_OLDSOCK */
522 struct connect_args /* {
531 error = getsockaddr(&sa, uap->name, uap->namelen);
535 error = kern_connect(td, uap->s, sa);
542 kern_connect(td, fd, sa)
553 error = getsock_cap(td->td_proc->p_fd, fd, CAP_CONNECT, &fp, NULL);
557 if (so->so_state & SS_ISCONNECTING) {
562 if (KTRPOINT(td, KTR_STRUCT))
566 error = mac_socket_check_connect(td->td_ucred, so, sa);
570 error = soconnect(so, sa, td);
573 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
578 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
579 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
582 if (error == EINTR || error == ERESTART)
588 error = so->so_error;
594 so->so_state &= ~SS_ISCONNECTING;
595 if (error == ERESTART)
603 kern_socketpair(struct thread *td, int domain, int type, int protocol,
606 struct filedesc *fdp = td->td_proc->p_fd;
607 struct file *fp1, *fp2;
608 struct socket *so1, *so2;
611 AUDIT_ARG_SOCKET(domain, type, protocol);
613 /* We might want to have a separate check for socket pairs. */
614 error = mac_socket_check_create(td->td_ucred, domain, type,
619 error = socreate(domain, &so1, type, protocol, td->td_ucred, td);
622 error = socreate(domain, &so2, type, protocol, td->td_ucred, td);
625 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
626 error = falloc(td, &fp1, &fd, 0);
630 fp1->f_data = so1; /* so1 already has ref count */
631 error = falloc(td, &fp2, &fd, 0);
634 fp2->f_data = so2; /* so2 already has ref count */
636 error = soconnect2(so1, so2);
639 if (type == SOCK_DGRAM) {
641 * Datagram socket connection is asymmetric.
643 error = soconnect2(so2, so1);
647 finit(fp1, FREAD | FWRITE, DTYPE_SOCKET, fp1->f_data, &socketops);
648 finit(fp2, FREAD | FWRITE, DTYPE_SOCKET, fp2->f_data, &socketops);
653 fdclose(fdp, fp2, rsv[1], td);
656 fdclose(fdp, fp1, rsv[0], td);
668 sys_socketpair(struct thread *td, struct socketpair_args *uap)
672 error = kern_socketpair(td, uap->domain, uap->type,
676 error = copyout(sv, uap->rsv, 2 * sizeof(int));
678 (void)kern_close(td, sv[0]);
679 (void)kern_close(td, sv[1]);
685 sendit(td, s, mp, flags)
691 struct mbuf *control;
695 #ifdef CAPABILITY_MODE
696 if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL))
700 if (mp->msg_name != NULL) {
701 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
711 if (mp->msg_control) {
712 if (mp->msg_controllen < sizeof(struct cmsghdr)
713 #ifdef COMPAT_OLDSOCK
714 && mp->msg_flags != MSG_COMPAT
720 error = sockargs(&control, mp->msg_control,
721 mp->msg_controllen, MT_CONTROL);
724 #ifdef COMPAT_OLDSOCK
725 if (mp->msg_flags == MSG_COMPAT) {
728 M_PREPEND(control, sizeof(*cm), M_WAIT);
729 cm = mtod(control, struct cmsghdr *);
730 cm->cmsg_len = control->m_len;
731 cm->cmsg_level = SOL_SOCKET;
732 cm->cmsg_type = SCM_RIGHTS;
739 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
748 kern_sendit(td, s, mp, flags, control, segflg)
753 struct mbuf *control;
764 struct uio *ktruio = NULL;
769 if (mp->msg_name != NULL)
770 rights |= CAP_CONNECT;
771 error = getsock_cap(td->td_proc->p_fd, s, rights, &fp, NULL);
774 so = (struct socket *)fp->f_data;
777 if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT))
778 ktrsockaddr(mp->msg_name);
781 if (mp->msg_name != NULL) {
782 error = mac_socket_check_connect(td->td_ucred, so,
787 error = mac_socket_check_send(td->td_ucred, so);
792 auio.uio_iov = mp->msg_iov;
793 auio.uio_iovcnt = mp->msg_iovlen;
794 auio.uio_segflg = segflg;
795 auio.uio_rw = UIO_WRITE;
797 auio.uio_offset = 0; /* XXX */
800 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
801 if ((auio.uio_resid += iov->iov_len) < 0) {
807 if (KTRPOINT(td, KTR_GENIO))
808 ktruio = cloneuio(&auio);
810 len = auio.uio_resid;
811 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
813 if (auio.uio_resid != len && (error == ERESTART ||
814 error == EINTR || error == EWOULDBLOCK))
816 /* Generation of SIGPIPE can be controlled per socket */
817 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
818 !(flags & MSG_NOSIGNAL)) {
819 PROC_LOCK(td->td_proc);
820 tdsignal(td, SIGPIPE);
821 PROC_UNLOCK(td->td_proc);
825 td->td_retval[0] = len - auio.uio_resid;
827 if (ktruio != NULL) {
828 ktruio->uio_resid = td->td_retval[0];
829 ktrgenio(s, UIO_WRITE, ktruio, error);
840 struct sendto_args /* {
853 msg.msg_name = uap->to;
854 msg.msg_namelen = uap->tolen;
858 #ifdef COMPAT_OLDSOCK
861 aiov.iov_base = uap->buf;
862 aiov.iov_len = uap->len;
863 error = sendit(td, uap->s, &msg, uap->flags);
867 #ifdef COMPAT_OLDSOCK
871 struct osend_args /* {
886 aiov.iov_base = uap->buf;
887 aiov.iov_len = uap->len;
890 error = sendit(td, uap->s, &msg, uap->flags);
897 struct osendmsg_args /* {
907 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
910 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
914 msg.msg_flags = MSG_COMPAT;
915 error = sendit(td, uap->s, &msg, uap->flags);
924 struct sendmsg_args /* {
934 error = copyin(uap->msg, &msg, sizeof (msg));
937 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
941 #ifdef COMPAT_OLDSOCK
944 error = sendit(td, uap->s, &msg, uap->flags);
950 kern_recvit(td, s, mp, fromseg, controlp)
954 enum uio_seg fromseg;
955 struct mbuf **controlp;
962 struct mbuf *m, *control = 0;
966 struct sockaddr *fromsa = 0;
968 struct uio *ktruio = NULL;
971 if (controlp != NULL)
975 error = getsock_cap(td->td_proc->p_fd, s, CAP_READ, &fp, NULL);
981 error = mac_socket_check_receive(td->td_ucred, so);
988 auio.uio_iov = mp->msg_iov;
989 auio.uio_iovcnt = mp->msg_iovlen;
990 auio.uio_segflg = UIO_USERSPACE;
991 auio.uio_rw = UIO_READ;
993 auio.uio_offset = 0; /* XXX */
996 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
997 if ((auio.uio_resid += iov->iov_len) < 0) {
1003 if (KTRPOINT(td, KTR_GENIO))
1004 ktruio = cloneuio(&auio);
1006 len = auio.uio_resid;
1007 error = soreceive(so, &fromsa, &auio, (struct mbuf **)0,
1008 (mp->msg_control || controlp) ? &control : (struct mbuf **)0,
1011 if (auio.uio_resid != len && (error == ERESTART ||
1012 error == EINTR || error == EWOULDBLOCK))
1016 if (ktruio != NULL) {
1017 ktruio->uio_resid = len - auio.uio_resid;
1018 ktrgenio(s, UIO_READ, ktruio, error);
1023 td->td_retval[0] = len - auio.uio_resid;
1025 len = mp->msg_namelen;
1026 if (len <= 0 || fromsa == 0)
1029 /* save sa_len before it is destroyed by MSG_COMPAT */
1030 len = MIN(len, fromsa->sa_len);
1031 #ifdef COMPAT_OLDSOCK
1032 if (mp->msg_flags & MSG_COMPAT)
1033 ((struct osockaddr *)fromsa)->sa_family =
1036 if (fromseg == UIO_USERSPACE) {
1037 error = copyout(fromsa, mp->msg_name,
1042 bcopy(fromsa, mp->msg_name, len);
1044 mp->msg_namelen = len;
1046 if (mp->msg_control && controlp == NULL) {
1047 #ifdef COMPAT_OLDSOCK
1049 * We assume that old recvmsg calls won't receive access
1050 * rights and other control info, esp. as control info
1051 * is always optional and those options didn't exist in 4.3.
1052 * If we receive rights, trim the cmsghdr; anything else
1055 if (control && mp->msg_flags & MSG_COMPAT) {
1056 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1058 mtod(control, struct cmsghdr *)->cmsg_type !=
1060 mp->msg_controllen = 0;
1063 control->m_len -= sizeof (struct cmsghdr);
1064 control->m_data += sizeof (struct cmsghdr);
1067 len = mp->msg_controllen;
1069 mp->msg_controllen = 0;
1070 ctlbuf = mp->msg_control;
1072 while (m && len > 0) {
1073 unsigned int tocopy;
1075 if (len >= m->m_len)
1078 mp->msg_flags |= MSG_CTRUNC;
1082 if ((error = copyout(mtod(m, caddr_t),
1083 ctlbuf, tocopy)) != 0)
1090 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1095 if (fromsa && KTRPOINT(td, KTR_STRUCT))
1096 ktrsockaddr(fromsa);
1099 free(fromsa, M_SONAME);
1101 if (error == 0 && controlp != NULL)
1102 *controlp = control;
1110 recvit(td, s, mp, namelenp)
1118 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1122 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1123 #ifdef COMPAT_OLDSOCK
1124 if (mp->msg_flags & MSG_COMPAT)
1125 error = 0; /* old recvfrom didn't check */
1132 sys_recvfrom(td, uap)
1134 struct recvfrom_args /* {
1139 struct sockaddr * __restrict from;
1140 socklen_t * __restrict fromlenaddr;
1147 if (uap->fromlenaddr) {
1148 error = copyin(uap->fromlenaddr,
1149 &msg.msg_namelen, sizeof (msg.msg_namelen));
1153 msg.msg_namelen = 0;
1155 msg.msg_name = uap->from;
1156 msg.msg_iov = &aiov;
1158 aiov.iov_base = uap->buf;
1159 aiov.iov_len = uap->len;
1160 msg.msg_control = 0;
1161 msg.msg_flags = uap->flags;
1162 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1167 #ifdef COMPAT_OLDSOCK
1171 struct recvfrom_args *uap;
1174 uap->flags |= MSG_COMPAT;
1175 return (sys_recvfrom(td, uap));
1179 #ifdef COMPAT_OLDSOCK
1183 struct orecv_args /* {
1195 msg.msg_namelen = 0;
1196 msg.msg_iov = &aiov;
1198 aiov.iov_base = uap->buf;
1199 aiov.iov_len = uap->len;
1200 msg.msg_control = 0;
1201 msg.msg_flags = uap->flags;
1202 error = recvit(td, uap->s, &msg, NULL);
1207 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1208 * overlays the new one, missing only the flags, and with the (old) access
1209 * rights where the control fields are now.
1214 struct orecvmsg_args /* {
1216 struct omsghdr *msg;
1224 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1227 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1230 msg.msg_flags = uap->flags | MSG_COMPAT;
1232 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1233 if (msg.msg_controllen && error == 0)
1234 error = copyout(&msg.msg_controllen,
1235 &uap->msg->msg_accrightslen, sizeof (int));
1242 sys_recvmsg(td, uap)
1244 struct recvmsg_args /* {
1251 struct iovec *uiov, *iov;
1254 error = copyin(uap->msg, &msg, sizeof (msg));
1257 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1260 msg.msg_flags = uap->flags;
1261 #ifdef COMPAT_OLDSOCK
1262 msg.msg_flags &= ~MSG_COMPAT;
1266 error = recvit(td, uap->s, &msg, NULL);
1269 error = copyout(&msg, uap->msg, sizeof(msg));
1277 sys_shutdown(td, uap)
1279 struct shutdown_args /* {
1288 AUDIT_ARG_FD(uap->s);
1289 error = getsock_cap(td->td_proc->p_fd, uap->s, CAP_SHUTDOWN, &fp,
1293 error = soshutdown(so, uap->how);
1301 sys_setsockopt(td, uap)
1303 struct setsockopt_args /* {
1312 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1313 uap->val, UIO_USERSPACE, uap->valsize));
1317 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1323 enum uio_seg valseg;
1329 struct sockopt sopt;
1331 if (val == NULL && valsize != 0)
1333 if ((int)valsize < 0)
1336 sopt.sopt_dir = SOPT_SET;
1337 sopt.sopt_level = level;
1338 sopt.sopt_name = name;
1339 sopt.sopt_val = val;
1340 sopt.sopt_valsize = valsize;
1346 sopt.sopt_td = NULL;
1349 panic("kern_setsockopt called with bad valseg");
1353 error = getsock_cap(td->td_proc->p_fd, s, CAP_SETSOCKOPT, &fp, NULL);
1356 error = sosetopt(so, &sopt);
1364 sys_getsockopt(td, uap)
1366 struct getsockopt_args /* {
1370 void * __restrict val;
1371 socklen_t * __restrict avalsize;
1378 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1383 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1384 uap->val, UIO_USERSPACE, &valsize);
1387 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1392 * Kernel version of getsockopt.
1393 * optval can be a userland or userspace. optlen is always a kernel pointer.
1396 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1402 enum uio_seg valseg;
1408 struct sockopt sopt;
1412 if ((int)*valsize < 0)
1415 sopt.sopt_dir = SOPT_GET;
1416 sopt.sopt_level = level;
1417 sopt.sopt_name = name;
1418 sopt.sopt_val = val;
1419 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1425 sopt.sopt_td = NULL;
1428 panic("kern_getsockopt called with bad valseg");
1432 error = getsock_cap(td->td_proc->p_fd, s, CAP_GETSOCKOPT, &fp, NULL);
1435 error = sogetopt(so, &sopt);
1436 *valsize = sopt.sopt_valsize;
1443 * getsockname1() - Get socket name.
1447 getsockname1(td, uap, compat)
1449 struct getsockname_args /* {
1451 struct sockaddr * __restrict asa;
1452 socklen_t * __restrict alen;
1456 struct sockaddr *sa;
1460 error = copyin(uap->alen, &len, sizeof(len));
1464 error = kern_getsockname(td, uap->fdes, &sa, &len);
1469 #ifdef COMPAT_OLDSOCK
1471 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1473 error = copyout(sa, uap->asa, (u_int)len);
1477 error = copyout(&len, uap->alen, sizeof(len));
1482 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1494 error = getsock_cap(td->td_proc->p_fd, fd, CAP_GETSOCKNAME, &fp, NULL);
1499 CURVNET_SET(so->so_vnet);
1500 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1507 len = MIN(*alen, (*sa)->sa_len);
1510 if (KTRPOINT(td, KTR_STRUCT))
1516 free(*sa, M_SONAME);
1523 sys_getsockname(td, uap)
1525 struct getsockname_args *uap;
1528 return (getsockname1(td, uap, 0));
1531 #ifdef COMPAT_OLDSOCK
1533 ogetsockname(td, uap)
1535 struct getsockname_args *uap;
1538 return (getsockname1(td, uap, 1));
1540 #endif /* COMPAT_OLDSOCK */
1543 * getpeername1() - Get name of peer for connected socket.
1547 getpeername1(td, uap, compat)
1549 struct getpeername_args /* {
1551 struct sockaddr * __restrict asa;
1552 socklen_t * __restrict alen;
1556 struct sockaddr *sa;
1560 error = copyin(uap->alen, &len, sizeof (len));
1564 error = kern_getpeername(td, uap->fdes, &sa, &len);
1569 #ifdef COMPAT_OLDSOCK
1571 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1573 error = copyout(sa, uap->asa, (u_int)len);
1577 error = copyout(&len, uap->alen, sizeof(len));
1582 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1594 error = getsock_cap(td->td_proc->p_fd, fd, CAP_GETPEERNAME, &fp, NULL);
1598 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1603 CURVNET_SET(so->so_vnet);
1604 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1611 len = MIN(*alen, (*sa)->sa_len);
1614 if (KTRPOINT(td, KTR_STRUCT))
1619 free(*sa, M_SONAME);
1628 sys_getpeername(td, uap)
1630 struct getpeername_args *uap;
1633 return (getpeername1(td, uap, 0));
1636 #ifdef COMPAT_OLDSOCK
1638 ogetpeername(td, uap)
1640 struct ogetpeername_args *uap;
1643 /* XXX uap should have type `getpeername_args *' to begin with. */
1644 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1646 #endif /* COMPAT_OLDSOCK */
1649 sockargs(mp, buf, buflen, type)
1654 struct sockaddr *sa;
1658 if ((u_int)buflen > MLEN) {
1659 #ifdef COMPAT_OLDSOCK
1660 if (type == MT_SONAME && (u_int)buflen <= 112)
1661 buflen = MLEN; /* unix domain compat. hack */
1664 if ((u_int)buflen > MCLBYTES)
1667 m = m_get(M_WAIT, type);
1668 if ((u_int)buflen > MLEN)
1671 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1676 if (type == MT_SONAME) {
1677 sa = mtod(m, struct sockaddr *);
1679 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1680 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1681 sa->sa_family = sa->sa_len;
1683 sa->sa_len = buflen;
1690 getsockaddr(namp, uaddr, len)
1691 struct sockaddr **namp;
1695 struct sockaddr *sa;
1698 if (len > SOCK_MAXADDRLEN)
1699 return (ENAMETOOLONG);
1700 if (len < offsetof(struct sockaddr, sa_data[0]))
1702 sa = malloc(len, M_SONAME, M_WAITOK);
1703 error = copyin(uaddr, sa, len);
1707 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1708 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1709 sa->sa_family = sa->sa_len;
1717 #include <sys/condvar.h>
1719 struct sendfile_sync {
1726 * Detach mapped page and release resources back to the system.
1729 sf_buf_mext(void *addr, void *args)
1732 struct sendfile_sync *sfs;
1734 m = sf_buf_page(args);
1737 vm_page_unwire(m, 0);
1739 * Check for the object going away on us. This can
1740 * happen since we don't hold a reference to it.
1741 * If so, we're responsible for freeing the page.
1743 if (m->wire_count == 0 && m->object == NULL)
1749 mtx_lock(&sfs->mtx);
1750 KASSERT(sfs->count> 0, ("Sendfile sync botchup count == 0"));
1751 if (--sfs->count == 0)
1752 cv_signal(&sfs->cv);
1753 mtx_unlock(&sfs->mtx);
1759 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1760 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1762 * Send a file specified by 'fd' and starting at 'offset' to a socket
1763 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1764 * 0. Optionally add a header and/or trailer to the socket output. If
1765 * specified, write the total number of bytes sent into *sbytes.
1768 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1771 return (do_sendfile(td, uap, 0));
1775 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1777 struct sf_hdtr hdtr;
1778 struct uio *hdr_uio, *trl_uio;
1781 hdr_uio = trl_uio = NULL;
1783 if (uap->hdtr != NULL) {
1784 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1787 if (hdtr.headers != NULL) {
1788 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
1792 if (hdtr.trailers != NULL) {
1793 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
1800 error = kern_sendfile(td, uap, hdr_uio, trl_uio, compat);
1803 free(hdr_uio, M_IOV);
1805 free(trl_uio, M_IOV);
1809 #ifdef COMPAT_FREEBSD4
1811 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1813 struct sendfile_args args;
1817 args.offset = uap->offset;
1818 args.nbytes = uap->nbytes;
1819 args.hdtr = uap->hdtr;
1820 args.sbytes = uap->sbytes;
1821 args.flags = uap->flags;
1823 return (do_sendfile(td, &args, 1));
1825 #endif /* COMPAT_FREEBSD4 */
1828 kern_sendfile(struct thread *td, struct sendfile_args *uap,
1829 struct uio *hdr_uio, struct uio *trl_uio, int compat)
1831 struct file *sock_fp;
1833 struct vm_object *obj = NULL;
1834 struct socket *so = NULL;
1835 struct mbuf *m = NULL;
1839 off_t off, xfsize, fsbytes = 0, sbytes = 0, rem = 0;
1840 int error, hdrlen = 0, mnw = 0;
1843 struct sendfile_sync *sfs = NULL;
1846 * The file descriptor must be a regular file and have a
1847 * backing VM object.
1848 * File offset must be positive. If it goes beyond EOF
1849 * we send only the header/trailer and no payload data.
1851 AUDIT_ARG_FD(uap->fd);
1852 if ((error = fgetvp_read(td, uap->fd, CAP_READ, &vp)) != 0)
1854 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1855 vn_lock(vp, LK_SHARED | LK_RETRY);
1856 if (vp->v_type == VREG) {
1857 bsize = vp->v_mount->mnt_stat.f_iosize;
1858 if (uap->nbytes == 0) {
1859 error = VOP_GETATTR(vp, &va, td->td_ucred);
1862 VFS_UNLOCK_GIANT(vfslocked);
1872 * Temporarily increase the backing VM
1873 * object's reference count so that a forced
1874 * reclamation of its vnode does not
1875 * immediately destroy it.
1877 VM_OBJECT_LOCK(obj);
1878 if ((obj->flags & OBJ_DEAD) == 0) {
1879 vm_object_reference_locked(obj);
1880 VM_OBJECT_UNLOCK(obj);
1882 VM_OBJECT_UNLOCK(obj);
1887 bsize = 0; /* silence gcc */
1889 VFS_UNLOCK_GIANT(vfslocked);
1894 if (uap->offset < 0) {
1900 * The socket must be a stream socket and connected.
1901 * Remember if it a blocking or non-blocking socket.
1903 if ((error = getsock_cap(td->td_proc->p_fd, uap->s, CAP_WRITE,
1904 &sock_fp, NULL)) != 0)
1906 so = sock_fp->f_data;
1907 if (so->so_type != SOCK_STREAM) {
1911 if ((so->so_state & SS_ISCONNECTED) == 0) {
1916 * Do not wait on memory allocations but return ENOMEM for
1917 * caller to retry later.
1918 * XXX: Experimental.
1920 if (uap->flags & SF_MNOWAIT)
1923 if (uap->flags & SF_SYNC) {
1924 sfs = malloc(sizeof *sfs, M_TEMP, M_WAITOK | M_ZERO);
1925 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
1926 cv_init(&sfs->cv, "sendfile");
1930 error = mac_socket_check_send(td->td_ucred, so);
1935 /* If headers are specified copy them into mbufs. */
1936 if (hdr_uio != NULL) {
1937 hdr_uio->uio_td = td;
1938 hdr_uio->uio_rw = UIO_WRITE;
1939 if (hdr_uio->uio_resid > 0) {
1941 * In FBSD < 5.0 the nbytes to send also included
1942 * the header. If compat is specified subtract the
1943 * header size from nbytes.
1946 if (uap->nbytes > hdr_uio->uio_resid)
1947 uap->nbytes -= hdr_uio->uio_resid;
1951 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
1954 error = mnw ? EAGAIN : ENOBUFS;
1957 hdrlen = m_length(m, NULL);
1962 * Protect against multiple writers to the socket.
1964 * XXXRW: Historically this has assumed non-interruptibility, so now
1965 * we implement that, but possibly shouldn't.
1967 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
1970 * Loop through the pages of the file, starting with the requested
1971 * offset. Get a file page (do I/O if necessary), map the file page
1972 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1974 * This is done in two loops. The inner loop turns as many pages
1975 * as it can, up to available socket buffer space, without blocking
1976 * into mbufs to have it bulk delivered into the socket send buffer.
1977 * The outer loop checks the state and available space of the socket
1978 * and takes care of the overall progress.
1980 for (off = uap->offset; ; ) {
1986 if ((uap->nbytes != 0 && uap->nbytes == fsbytes) ||
1987 (uap->nbytes == 0 && va.va_size == fsbytes))
1996 * Check the socket state for ongoing connection,
1997 * no errors and space in socket buffer.
1998 * If space is low allow for the remainder of the
1999 * file to be processed if it fits the socket buffer.
2000 * Otherwise block in waiting for sufficient space
2001 * to proceed, or if the socket is nonblocking, return
2002 * to userland with EAGAIN while reporting how far
2004 * We wait until the socket buffer has significant free
2005 * space to do bulk sends. This makes good use of file
2006 * system read ahead and allows packet segmentation
2007 * offloading hardware to take over lots of work. If
2008 * we were not careful here we would send off only one
2011 SOCKBUF_LOCK(&so->so_snd);
2012 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
2013 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
2015 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2017 SOCKBUF_UNLOCK(&so->so_snd);
2019 } else if (so->so_error) {
2020 error = so->so_error;
2022 SOCKBUF_UNLOCK(&so->so_snd);
2025 space = sbspace(&so->so_snd);
2028 space < so->so_snd.sb_lowat)) {
2029 if (so->so_state & SS_NBIO) {
2030 SOCKBUF_UNLOCK(&so->so_snd);
2035 * sbwait drops the lock while sleeping.
2036 * When we loop back to retry_space the
2037 * state may have changed and we retest
2040 error = sbwait(&so->so_snd);
2042 * An error from sbwait usually indicates that we've
2043 * been interrupted by a signal. If we've sent anything
2044 * then return bytes sent, otherwise return the error.
2047 SOCKBUF_UNLOCK(&so->so_snd);
2052 SOCKBUF_UNLOCK(&so->so_snd);
2055 * Reduce space in the socket buffer by the size of
2056 * the header mbuf chain.
2057 * hdrlen is set to 0 after the first loop.
2061 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2062 error = vn_lock(vp, LK_SHARED);
2064 VFS_UNLOCK_GIANT(vfslocked);
2067 error = VOP_GETATTR(vp, &va, td->td_ucred);
2068 if (error != 0 || off >= va.va_size) {
2070 VFS_UNLOCK_GIANT(vfslocked);
2073 VFS_UNLOCK_GIANT(vfslocked);
2076 * Loop and construct maximum sized mbuf chain to be bulk
2077 * dumped into socket buffer.
2079 while (space > loopbytes) {
2085 * Calculate the amount to transfer.
2086 * Not to exceed a page, the EOF,
2087 * or the passed in nbytes.
2089 pgoff = (vm_offset_t)(off & PAGE_MASK);
2091 rem = (uap->nbytes - fsbytes - loopbytes);
2094 uap->offset - fsbytes - loopbytes;
2095 xfsize = omin(PAGE_SIZE - pgoff, rem);
2096 xfsize = omin(space - loopbytes, xfsize);
2098 done = 1; /* all data sent */
2103 * Attempt to look up the page. Allocate
2104 * if not found or wait and loop if busy.
2106 pindex = OFF_TO_IDX(off);
2107 VM_OBJECT_LOCK(obj);
2108 pg = vm_page_grab(obj, pindex, VM_ALLOC_NOBUSY |
2109 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_RETRY);
2112 * Check if page is valid for what we need,
2113 * otherwise initiate I/O.
2114 * If we already turned some pages into mbufs,
2115 * send them off before we come here again and
2118 if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
2119 VM_OBJECT_UNLOCK(obj);
2121 error = EAGAIN; /* send what we already got */
2122 else if (uap->flags & SF_NODISKIO)
2128 * Ensure that our page is still around
2129 * when the I/O completes.
2131 vm_page_io_start(pg);
2132 VM_OBJECT_UNLOCK(obj);
2135 * Get the page from backing store.
2136 * XXXMAC: Because we don't have fp->f_cred
2137 * here, we pass in NOCRED. This is probably
2138 * wrong, but is consistent with our original
2141 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2142 error = vn_rdwr(UIO_READ, vp, NULL, MAXBSIZE,
2143 trunc_page(off), UIO_NOCOPY, IO_NODELOCKED |
2144 IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT),
2145 td->td_ucred, NOCRED, &resid, td);
2146 VFS_UNLOCK_GIANT(vfslocked);
2147 VM_OBJECT_LOCK(obj);
2148 vm_page_io_finish(pg);
2150 VM_OBJECT_UNLOCK(obj);
2155 vm_page_unwire(pg, 0);
2157 * See if anyone else might know about
2158 * this page. If not and it is not valid,
2161 if (pg->wire_count == 0 && pg->valid == 0 &&
2162 pg->busy == 0 && !(pg->oflags & VPO_BUSY))
2165 VM_OBJECT_UNLOCK(obj);
2166 if (error == EAGAIN)
2167 error = 0; /* not a real error */
2172 * Get a sendfile buf. When allocating the
2173 * first buffer for mbuf chain, we usually
2174 * wait as long as necessary, but this wait
2175 * can be interrupted. For consequent
2176 * buffers, do not sleep, since several
2177 * threads might exhaust the buffers and then
2180 sf = sf_buf_alloc(pg, (mnw || m != NULL) ? SFB_NOWAIT :
2183 mbstat.sf_allocfail++;
2185 vm_page_unwire(pg, 0);
2186 KASSERT(pg->object != NULL,
2187 ("kern_sendfile: object disappeared"));
2190 error = (mnw ? EAGAIN : EINTR);
2195 * Get an mbuf and set it up as having
2198 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
2200 error = (mnw ? EAGAIN : ENOBUFS);
2201 sf_buf_mext(NULL, sf);
2204 MEXTADD(m0, sf_buf_kva(sf), PAGE_SIZE, sf_buf_mext,
2205 sfs, sf, M_RDONLY, EXT_SFBUF);
2206 m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
2209 /* Append to mbuf chain. */
2213 m_last(m)->m_next = m0;
2218 /* Keep track of bits processed. */
2219 loopbytes += xfsize;
2223 mtx_lock(&sfs->mtx);
2225 mtx_unlock(&sfs->mtx);
2231 /* Add the buffer chain to the socket buffer. */
2235 mlen = m_length(m, NULL);
2236 SOCKBUF_LOCK(&so->so_snd);
2237 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2239 SOCKBUF_UNLOCK(&so->so_snd);
2242 SOCKBUF_UNLOCK(&so->so_snd);
2243 CURVNET_SET(so->so_vnet);
2244 /* Avoid error aliasing. */
2245 err = (*so->so_proto->pr_usrreqs->pru_send)
2246 (so, 0, m, NULL, NULL, td);
2250 * We need two counters to get the
2251 * file offset and nbytes to send
2253 * - sbytes contains the total amount
2254 * of bytes sent, including headers.
2255 * - fsbytes contains the total amount
2256 * of bytes sent from the file.
2264 } else if (error == 0)
2266 m = NULL; /* pru_send always consumes */
2269 /* Quit outer loop on error or when we're done. */
2277 * Send trailers. Wimp out and use writev(2).
2279 if (trl_uio != NULL) {
2280 sbunlock(&so->so_snd);
2281 error = kern_writev(td, uap->s, trl_uio);
2283 sbytes += td->td_retval[0];
2288 sbunlock(&so->so_snd);
2291 * If there was no error we have to clear td->td_retval[0]
2292 * because it may have been set by writev.
2295 td->td_retval[0] = 0;
2297 if (uap->sbytes != NULL) {
2298 copyout(&sbytes, uap->sbytes, sizeof(off_t));
2301 vm_object_deallocate(obj);
2303 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2305 VFS_UNLOCK_GIANT(vfslocked);
2313 mtx_lock(&sfs->mtx);
2314 if (sfs->count != 0)
2315 cv_wait(&sfs->cv, &sfs->mtx);
2316 KASSERT(sfs->count == 0, ("sendfile sync still busy"));
2317 cv_destroy(&sfs->cv);
2318 mtx_destroy(&sfs->mtx);
2322 if (error == ERESTART)
2330 * Functionality only compiled in if SCTP is defined in the kernel Makefile,
2331 * otherwise all return EOPNOTSUPP.
2332 * XXX: We should make this loadable one day.
2335 sys_sctp_peeloff(td, uap)
2337 struct sctp_peeloff_args /* {
2342 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2343 struct filedesc *fdp;
2344 struct file *nfp = NULL;
2346 struct socket *head, *so;
2350 fdp = td->td_proc->p_fd;
2351 AUDIT_ARG_FD(uap->sd);
2352 error = fgetsock(td, uap->sd, CAP_PEELOFF, &head, &fflag);
2355 if (head->so_proto->pr_protocol != IPPROTO_SCTP) {
2359 error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
2363 * At this point we know we do have a assoc to pull
2364 * we proceed to get the fd setup. This may block
2368 error = falloc(td, &nfp, &fd, 0);
2371 td->td_retval[0] = fd;
2373 CURVNET_SET(head->so_vnet);
2374 so = sonewconn(head, SS_ISCONNECTED);
2380 * Before changing the flags on the socket, we have to bump the
2381 * reference count. Otherwise, if the protocol calls sofree(),
2382 * the socket will be released due to a zero refcount.
2385 soref(so); /* file descriptor reference */
2390 TAILQ_REMOVE(&head->so_comp, so, so_list);
2392 so->so_state |= (head->so_state & SS_NBIO);
2393 so->so_state &= ~SS_NOFDREF;
2394 so->so_qstate &= ~SQ_COMP;
2397 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops);
2398 error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
2401 if (head->so_sigio != NULL)
2402 fsetown(fgetown(&head->so_sigio), &so->so_sigio);
2406 * close the new descriptor, assuming someone hasn't ripped it
2407 * out from under us.
2410 fdclose(fdp, nfp, fd, td);
2413 * Release explicitly held references before returning.
2423 return (EOPNOTSUPP);
2428 sys_sctp_generic_sendmsg (td, uap)
2430 struct sctp_generic_sendmsg_args /* {
2436 struct sctp_sndrcvinfo *sinfo,
2440 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2441 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2443 struct file *fp = NULL;
2445 struct sockaddr *to = NULL;
2447 struct uio *ktruio = NULL;
2450 struct iovec iov[1];
2451 cap_rights_t rights;
2454 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2462 error = getsockaddr(&to, uap->to, uap->tolen);
2467 rights |= CAP_CONNECT;
2470 AUDIT_ARG_FD(uap->sd);
2471 error = getsock_cap(td->td_proc->p_fd, uap->sd, rights, &fp, NULL);
2475 if (to && (KTRPOINT(td, KTR_STRUCT)))
2479 iov[0].iov_base = uap->msg;
2480 iov[0].iov_len = uap->mlen;
2482 so = (struct socket *)fp->f_data;
2483 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
2488 error = mac_socket_check_send(td->td_ucred, so);
2494 auio.uio_iovcnt = 1;
2495 auio.uio_segflg = UIO_USERSPACE;
2496 auio.uio_rw = UIO_WRITE;
2498 auio.uio_offset = 0; /* XXX */
2500 len = auio.uio_resid = uap->mlen;
2501 CURVNET_SET(so->so_vnet);
2502 error = sctp_lower_sosend(so, to, &auio,
2503 (struct mbuf *)NULL, (struct mbuf *)NULL,
2504 uap->flags, u_sinfo, td);
2507 if (auio.uio_resid != len && (error == ERESTART ||
2508 error == EINTR || error == EWOULDBLOCK))
2510 /* Generation of SIGPIPE can be controlled per socket. */
2511 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2512 !(uap->flags & MSG_NOSIGNAL)) {
2513 PROC_LOCK(td->td_proc);
2514 tdsignal(td, SIGPIPE);
2515 PROC_UNLOCK(td->td_proc);
2519 td->td_retval[0] = len - auio.uio_resid;
2521 if (ktruio != NULL) {
2522 ktruio->uio_resid = td->td_retval[0];
2523 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2534 return (EOPNOTSUPP);
2539 sys_sctp_generic_sendmsg_iov(td, uap)
2541 struct sctp_generic_sendmsg_iov_args /* {
2547 struct sctp_sndrcvinfo *sinfo,
2551 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2552 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2554 struct file *fp = NULL;
2557 struct sockaddr *to = NULL;
2559 struct uio *ktruio = NULL;
2562 struct iovec *iov, *tiov;
2563 cap_rights_t rights;
2566 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2573 error = getsockaddr(&to, uap->to, uap->tolen);
2578 rights |= CAP_CONNECT;
2581 AUDIT_ARG_FD(uap->sd);
2582 error = getsock_cap(td->td_proc->p_fd, uap->sd, rights, &fp, NULL);
2586 #ifdef COMPAT_FREEBSD32
2587 if (SV_CURPROC_FLAG(SV_ILP32))
2588 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
2589 uap->iovlen, &iov, EMSGSIZE);
2592 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2596 if (to && (KTRPOINT(td, KTR_STRUCT)))
2600 so = (struct socket *)fp->f_data;
2601 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
2606 error = mac_socket_check_send(td->td_ucred, so);
2612 auio.uio_iovcnt = uap->iovlen;
2613 auio.uio_segflg = UIO_USERSPACE;
2614 auio.uio_rw = UIO_WRITE;
2616 auio.uio_offset = 0; /* XXX */
2619 for (i = 0; i <uap->iovlen; i++, tiov++) {
2620 if ((auio.uio_resid += tiov->iov_len) < 0) {
2625 len = auio.uio_resid;
2626 CURVNET_SET(so->so_vnet);
2627 error = sctp_lower_sosend(so, to, &auio,
2628 (struct mbuf *)NULL, (struct mbuf *)NULL,
2629 uap->flags, u_sinfo, td);
2632 if (auio.uio_resid != len && (error == ERESTART ||
2633 error == EINTR || error == EWOULDBLOCK))
2635 /* Generation of SIGPIPE can be controlled per socket */
2636 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2637 !(uap->flags & MSG_NOSIGNAL)) {
2638 PROC_LOCK(td->td_proc);
2639 tdsignal(td, SIGPIPE);
2640 PROC_UNLOCK(td->td_proc);
2644 td->td_retval[0] = len - auio.uio_resid;
2646 if (ktruio != NULL) {
2647 ktruio->uio_resid = td->td_retval[0];
2648 ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2661 return (EOPNOTSUPP);
2666 sys_sctp_generic_recvmsg(td, uap)
2668 struct sctp_generic_recvmsg_args /* {
2672 struct sockaddr *from,
2673 __socklen_t *fromlenaddr,
2674 struct sctp_sndrcvinfo *sinfo,
2678 #if (defined(INET) || defined(INET6)) && defined(SCTP)
2679 uint8_t sockbufstore[256];
2681 struct iovec *iov, *tiov;
2682 struct sctp_sndrcvinfo sinfo;
2684 struct file *fp = NULL;
2685 struct sockaddr *fromsa;
2691 struct uio *ktruio = NULL;
2694 AUDIT_ARG_FD(uap->sd);
2695 error = getsock_cap(td->td_proc->p_fd, uap->sd, CAP_READ, &fp, NULL);
2699 #ifdef COMPAT_FREEBSD32
2700 if (SV_CURPROC_FLAG(SV_ILP32))
2701 error = freebsd32_copyiniov((struct iovec32 *)uap->iov,
2702 uap->iovlen, &iov, EMSGSIZE);
2705 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2710 if (so->so_proto->pr_protocol != IPPROTO_SCTP) {
2715 error = mac_socket_check_receive(td->td_ucred, so);
2721 if (uap->fromlenaddr) {
2722 error = copyin(uap->fromlenaddr,
2723 &fromlen, sizeof (fromlen));
2730 if (uap->msg_flags) {
2731 error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
2739 auio.uio_iovcnt = uap->iovlen;
2740 auio.uio_segflg = UIO_USERSPACE;
2741 auio.uio_rw = UIO_READ;
2743 auio.uio_offset = 0; /* XXX */
2746 for (i = 0; i <uap->iovlen; i++, tiov++) {
2747 if ((auio.uio_resid += tiov->iov_len) < 0) {
2752 len = auio.uio_resid;
2753 fromsa = (struct sockaddr *)sockbufstore;
2756 if (KTRPOINT(td, KTR_GENIO))
2757 ktruio = cloneuio(&auio);
2759 memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo));
2760 CURVNET_SET(so->so_vnet);
2761 error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
2762 fromsa, fromlen, &msg_flags,
2763 (struct sctp_sndrcvinfo *)&sinfo, 1);
2766 if (auio.uio_resid != len && (error == ERESTART ||
2767 error == EINTR || error == EWOULDBLOCK))
2771 error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
2774 if (ktruio != NULL) {
2775 ktruio->uio_resid = len - auio.uio_resid;
2776 ktrgenio(uap->sd, UIO_READ, ktruio, error);
2781 td->td_retval[0] = len - auio.uio_resid;
2783 if (fromlen && uap->from) {
2785 if (len <= 0 || fromsa == 0)
2788 len = MIN(len, fromsa->sa_len);
2789 error = copyout(fromsa, uap->from, (size_t)len);
2793 error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
2799 if (KTRPOINT(td, KTR_STRUCT))
2800 ktrsockaddr(fromsa);
2802 if (uap->msg_flags) {
2803 error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
2816 return (EOPNOTSUPP);