2 * Copyright (c) 1982, 1986, 1989, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * sendfile(2) and related extensions:
6 * Copyright (c) 1998, David Greenman. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_compat.h"
39 #include "opt_ktrace.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
47 #include <sys/mutex.h>
48 #include <sys/sysproto.h>
49 #include <sys/malloc.h>
50 #include <sys/filedesc.h>
51 #include <sys/event.h>
53 #include <sys/fcntl.h>
55 #include <sys/filio.h>
56 #include <sys/mount.h>
58 #include <sys/protosw.h>
59 #include <sys/sf_buf.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/signalvar.h>
63 #include <sys/syscallsubr.h>
64 #include <sys/sysctl.h>
66 #include <sys/vnode.h>
68 #include <sys/ktrace.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pageout.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_extern.h>
78 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
79 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
81 static int accept1(struct thread *td, struct accept_args *uap, int compat);
82 static int do_sendfile(struct thread *td, struct sendfile_args *uap, int compat);
83 static int getsockname1(struct thread *td, struct getsockname_args *uap,
85 static int getpeername1(struct thread *td, struct getpeername_args *uap,
89 * NSFBUFS-related variables and associated sysctls
95 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
96 "Maximum number of sendfile(2) sf_bufs available");
97 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
98 "Number of sendfile(2) sf_bufs at peak usage");
99 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
100 "Number of sendfile(2) sf_bufs in use");
103 * Convert a user file descriptor to a kernel file entry. A reference on the
104 * file entry is held upon returning. This is lighter weight than
105 * fgetsock(), which bumps the socket reference drops the file reference
106 * count instead, as this approach avoids several additional mutex operations
107 * associated with the additional reference count. If requested, return the
111 getsock(struct filedesc *fdp, int fd, struct file **fpp, u_int *fflagp)
120 FILEDESC_LOCK_FAST(fdp);
121 fp = fget_locked(fdp, fd);
124 else if (fp->f_type != DTYPE_SOCKET) {
130 *fflagp = fp->f_flag;
133 FILEDESC_UNLOCK_FAST(fdp);
140 * System call interface to the socket abstraction.
142 #if defined(COMPAT_43)
143 #define COMPAT_OLDSOCK
152 register struct socket_args /* {
158 struct filedesc *fdp;
164 error = mac_check_socket_create(td->td_ucred, uap->domain, uap->type,
169 fdp = td->td_proc->p_fd;
170 error = falloc(td, &fp, &fd);
173 /* An extra reference on `fp' has been held for us by falloc(). */
175 error = socreate(uap->domain, &so, uap->type, uap->protocol,
179 fdclose(fdp, fp, fd, td);
181 FILEDESC_LOCK_FAST(fdp);
182 fp->f_data = so; /* already has ref count */
183 fp->f_flag = FREAD|FWRITE;
184 fp->f_ops = &socketops;
185 fp->f_type = DTYPE_SOCKET;
186 FILEDESC_UNLOCK_FAST(fdp);
187 td->td_retval[0] = fd;
200 register struct bind_args /* {
209 if ((error = getsockaddr(&sa, uap->name, uap->namelen)) != 0)
212 error = kern_bind(td, uap->s, sa);
218 kern_bind(td, fd, sa)
228 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
234 error = mac_check_socket_bind(td->td_ucred, so, sa);
239 error = sobind(so, sa, td);
256 register struct listen_args /* {
266 error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
271 error = mac_check_socket_listen(td->td_ucred, so);
276 error = solisten(so, uap->backlog, td);
291 accept1(td, uap, compat)
293 register struct accept_args /* {
295 struct sockaddr * __restrict name;
296 socklen_t * __restrict anamelen;
300 struct sockaddr *name;
305 if (uap->name == NULL)
306 return (kern_accept(td, uap->s, NULL, NULL, NULL));
308 error = copyin(uap->anamelen, &namelen, sizeof (namelen));
312 error = kern_accept(td, uap->s, &name, &namelen, &fp);
315 * return a namelen of zero for older code which might
316 * ignore the return value from accept.
319 (void) copyout(&namelen,
320 uap->anamelen, sizeof(*uap->anamelen));
324 if (error == 0 && name != NULL) {
325 #ifdef COMPAT_OLDSOCK
327 ((struct osockaddr *)name)->sa_family =
330 error = copyout(name, uap->name, namelen);
333 error = copyout(&namelen, uap->anamelen,
336 fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
338 free(name, M_SONAME);
343 kern_accept(struct thread *td, int s, struct sockaddr **name,
344 socklen_t *namelen, struct file **fp)
346 struct filedesc *fdp;
347 struct file *headfp, *nfp = NULL;
348 struct sockaddr *sa = NULL;
350 struct socket *head, *so;
362 fdp = td->td_proc->p_fd;
364 error = getsock(fdp, s, &headfp, &fflag);
367 head = headfp->f_data;
368 if ((head->so_options & SO_ACCEPTCONN) == 0) {
374 error = mac_check_socket_accept(td->td_ucred, head);
379 error = falloc(td, &nfp, &fd);
383 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
388 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
389 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
390 head->so_error = ECONNABORTED;
393 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
400 if (head->so_error) {
401 error = head->so_error;
406 so = TAILQ_FIRST(&head->so_comp);
407 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
408 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
411 * Before changing the flags on the socket, we have to bump the
412 * reference count. Otherwise, if the protocol calls sofree(),
413 * the socket will be released due to a zero refcount.
415 SOCK_LOCK(so); /* soref() and so_state update */
416 soref(so); /* file descriptor reference */
418 TAILQ_REMOVE(&head->so_comp, so, so_list);
420 so->so_state |= (head->so_state & SS_NBIO);
421 so->so_qstate &= ~SQ_COMP;
427 /* An extra reference on `nfp' has been held for us by falloc(). */
428 td->td_retval[0] = fd;
430 /* connection has been removed from the listen queue */
431 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
433 pgid = fgetown(&head->so_sigio);
435 fsetown(pgid, &so->so_sigio);
438 nfp->f_data = so; /* nfp has ref count from falloc */
440 nfp->f_ops = &socketops;
441 nfp->f_type = DTYPE_SOCKET;
443 /* Sync socket nonblocking/async state with file flags */
444 tmp = fflag & FNONBLOCK;
445 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
446 tmp = fflag & FASYNC;
447 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
449 error = soaccept(so, &sa);
452 * return a namelen of zero for older code which might
453 * ignore the return value from accept.
465 /* check sa_len before it is destroyed */
466 if (*namelen > sa->sa_len)
467 *namelen = sa->sa_len;
476 * close the new descriptor, assuming someone hasn't ripped it
480 fdclose(fdp, nfp, fd, td);
483 * Release explicitly held references before returning. We return
484 * a reference on nfp to the caller on success if they request it.
503 * MPSAFE (accept1() is MPSAFE)
508 struct accept_args *uap;
511 return (accept1(td, uap, 0));
514 #ifdef COMPAT_OLDSOCK
516 * MPSAFE (accept1() is MPSAFE)
521 struct accept_args *uap;
524 return (accept1(td, uap, 1));
526 #endif /* COMPAT_OLDSOCK */
535 register struct connect_args /* {
544 error = getsockaddr(&sa, uap->name, uap->namelen);
548 error = kern_connect(td, uap->s, sa);
555 kern_connect(td, fd, sa)
566 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
570 if (so->so_state & SS_ISCONNECTING) {
576 error = mac_check_socket_connect(td->td_ucred, so, sa);
581 error = soconnect(so, sa, td);
584 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
589 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
590 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
593 if (error == EINTR || error == ERESTART)
599 error = so->so_error;
605 so->so_state &= ~SS_ISCONNECTING;
606 if (error == ERESTART)
621 register struct socketpair_args /* {
628 register struct filedesc *fdp = td->td_proc->p_fd;
629 struct file *fp1, *fp2;
630 struct socket *so1, *so2;
631 int fd, error, sv[2];
634 /* We might want to have a separate check for socket pairs. */
635 error = mac_check_socket_create(td->td_ucred, uap->domain, uap->type,
642 error = socreate(uap->domain, &so1, uap->type, uap->protocol,
646 error = socreate(uap->domain, &so2, uap->type, uap->protocol,
650 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */
651 error = falloc(td, &fp1, &fd);
655 fp1->f_data = so1; /* so1 already has ref count */
656 error = falloc(td, &fp2, &fd);
659 fp2->f_data = so2; /* so2 already has ref count */
661 error = soconnect2(so1, so2);
664 if (uap->type == SOCK_DGRAM) {
666 * Datagram socket connection is asymmetric.
668 error = soconnect2(so2, so1);
673 fp1->f_flag = FREAD|FWRITE;
674 fp1->f_ops = &socketops;
675 fp1->f_type = DTYPE_SOCKET;
678 fp2->f_flag = FREAD|FWRITE;
679 fp2->f_ops = &socketops;
680 fp2->f_type = DTYPE_SOCKET;
682 error = copyout(sv, uap->rsv, 2 * sizeof (int));
687 fdclose(fdp, fp2, sv[1], td);
690 fdclose(fdp, fp1, sv[0], td);
702 sendit(td, s, mp, flags)
703 register struct thread *td;
705 register struct msghdr *mp;
708 struct mbuf *control;
712 if (mp->msg_name != NULL) {
713 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
723 if (mp->msg_control) {
724 if (mp->msg_controllen < sizeof(struct cmsghdr)
725 #ifdef COMPAT_OLDSOCK
726 && mp->msg_flags != MSG_COMPAT
732 error = sockargs(&control, mp->msg_control,
733 mp->msg_controllen, MT_CONTROL);
736 #ifdef COMPAT_OLDSOCK
737 if (mp->msg_flags == MSG_COMPAT) {
738 register struct cmsghdr *cm;
740 M_PREPEND(control, sizeof(*cm), M_TRYWAIT);
745 cm = mtod(control, struct cmsghdr *);
746 cm->cmsg_len = control->m_len;
747 cm->cmsg_level = SOL_SOCKET;
748 cm->cmsg_type = SCM_RIGHTS;
756 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
765 kern_sendit(td, s, mp, flags, control, segflg)
770 struct mbuf *control;
780 struct uio *ktruio = NULL;
784 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
787 so = (struct socket *)fp->f_data;
791 error = mac_check_socket_send(td->td_ucred, so);
797 auio.uio_iov = mp->msg_iov;
798 auio.uio_iovcnt = mp->msg_iovlen;
799 auio.uio_segflg = segflg;
800 auio.uio_rw = UIO_WRITE;
802 auio.uio_offset = 0; /* XXX */
805 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
806 if ((auio.uio_resid += iov->iov_len) < 0) {
812 if (KTRPOINT(td, KTR_GENIO))
813 ktruio = cloneuio(&auio);
815 len = auio.uio_resid;
816 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
818 if (auio.uio_resid != len && (error == ERESTART ||
819 error == EINTR || error == EWOULDBLOCK))
821 /* Generation of SIGPIPE can be controlled per socket */
822 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
823 !(flags & MSG_NOSIGNAL)) {
824 PROC_LOCK(td->td_proc);
825 psignal(td->td_proc, SIGPIPE);
826 PROC_UNLOCK(td->td_proc);
830 td->td_retval[0] = len - auio.uio_resid;
832 if (ktruio != NULL) {
833 ktruio->uio_resid = td->td_retval[0];
834 ktrgenio(s, UIO_WRITE, ktruio, error);
850 register struct sendto_args /* {
863 msg.msg_name = uap->to;
864 msg.msg_namelen = uap->tolen;
868 #ifdef COMPAT_OLDSOCK
871 aiov.iov_base = uap->buf;
872 aiov.iov_len = uap->len;
873 error = sendit(td, uap->s, &msg, uap->flags);
877 #ifdef COMPAT_OLDSOCK
884 register struct osend_args /* {
899 aiov.iov_base = uap->buf;
900 aiov.iov_len = uap->len;
903 error = sendit(td, uap->s, &msg, uap->flags);
913 struct osendmsg_args /* {
923 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
926 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
930 msg.msg_flags = MSG_COMPAT;
931 error = sendit(td, uap->s, &msg, uap->flags);
943 struct sendmsg_args /* {
953 error = copyin(uap->msg, &msg, sizeof (msg));
956 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
960 #ifdef COMPAT_OLDSOCK
963 error = sendit(td, uap->s, &msg, uap->flags);
969 kern_recvit(td, s, mp, fromseg, controlp)
973 enum uio_seg fromseg;
974 struct mbuf **controlp;
981 struct mbuf *m, *control = 0;
985 struct sockaddr *fromsa = 0;
987 struct uio *ktruio = NULL;
994 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
1003 error = mac_check_socket_receive(td->td_ucred, so);
1012 auio.uio_iov = mp->msg_iov;
1013 auio.uio_iovcnt = mp->msg_iovlen;
1014 auio.uio_segflg = UIO_USERSPACE;
1015 auio.uio_rw = UIO_READ;
1017 auio.uio_offset = 0; /* XXX */
1020 for (i = 0; i < mp->msg_iovlen; i++, iov++) {
1021 if ((auio.uio_resid += iov->iov_len) < 0) {
1028 if (KTRPOINT(td, KTR_GENIO))
1029 ktruio = cloneuio(&auio);
1031 len = auio.uio_resid;
1032 error = soreceive(so, &fromsa, &auio, (struct mbuf **)0,
1033 (mp->msg_control || controlp) ? &control : (struct mbuf **)0,
1036 if (auio.uio_resid != (int)len && (error == ERESTART ||
1037 error == EINTR || error == EWOULDBLOCK))
1041 if (ktruio != NULL) {
1042 ktruio->uio_resid = (int)len - auio.uio_resid;
1043 ktrgenio(s, UIO_READ, ktruio, error);
1048 td->td_retval[0] = (int)len - auio.uio_resid;
1050 len = mp->msg_namelen;
1051 if (len <= 0 || fromsa == 0)
1054 /* save sa_len before it is destroyed by MSG_COMPAT */
1055 len = MIN(len, fromsa->sa_len);
1056 #ifdef COMPAT_OLDSOCK
1057 if (mp->msg_flags & MSG_COMPAT)
1058 ((struct osockaddr *)fromsa)->sa_family =
1061 if (fromseg == UIO_USERSPACE) {
1062 error = copyout(fromsa, mp->msg_name,
1067 bcopy(fromsa, mp->msg_name, len);
1069 mp->msg_namelen = len;
1071 if (mp->msg_control && controlp == NULL) {
1072 #ifdef COMPAT_OLDSOCK
1074 * We assume that old recvmsg calls won't receive access
1075 * rights and other control info, esp. as control info
1076 * is always optional and those options didn't exist in 4.3.
1077 * If we receive rights, trim the cmsghdr; anything else
1080 if (control && mp->msg_flags & MSG_COMPAT) {
1081 if (mtod(control, struct cmsghdr *)->cmsg_level !=
1083 mtod(control, struct cmsghdr *)->cmsg_type !=
1085 mp->msg_controllen = 0;
1088 control->m_len -= sizeof (struct cmsghdr);
1089 control->m_data += sizeof (struct cmsghdr);
1092 len = mp->msg_controllen;
1094 mp->msg_controllen = 0;
1095 ctlbuf = mp->msg_control;
1097 while (m && len > 0) {
1098 unsigned int tocopy;
1100 if (len >= m->m_len)
1103 mp->msg_flags |= MSG_CTRUNC;
1107 if ((error = copyout(mtod(m, caddr_t),
1108 ctlbuf, tocopy)) != 0)
1115 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1121 FREE(fromsa, M_SONAME);
1123 if (error == 0 && controlp != NULL)
1124 *controlp = control;
1132 recvit(td, s, mp, namelenp)
1140 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1144 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1145 #ifdef COMPAT_OLDSOCK
1146 if (mp->msg_flags & MSG_COMPAT)
1147 error = 0; /* old recvfrom didn't check */
1159 register struct recvfrom_args /* {
1164 struct sockaddr * __restrict from;
1165 socklen_t * __restrict fromlenaddr;
1172 if (uap->fromlenaddr) {
1173 error = copyin(uap->fromlenaddr,
1174 &msg.msg_namelen, sizeof (msg.msg_namelen));
1178 msg.msg_namelen = 0;
1180 msg.msg_name = uap->from;
1181 msg.msg_iov = &aiov;
1183 aiov.iov_base = uap->buf;
1184 aiov.iov_len = uap->len;
1185 msg.msg_control = 0;
1186 msg.msg_flags = uap->flags;
1187 error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1192 #ifdef COMPAT_OLDSOCK
1199 struct recvfrom_args *uap;
1202 uap->flags |= MSG_COMPAT;
1203 return (recvfrom(td, uap));
1208 #ifdef COMPAT_OLDSOCK
1215 register struct orecv_args /* {
1227 msg.msg_namelen = 0;
1228 msg.msg_iov = &aiov;
1230 aiov.iov_base = uap->buf;
1231 aiov.iov_len = uap->len;
1232 msg.msg_control = 0;
1233 msg.msg_flags = uap->flags;
1234 error = recvit(td, uap->s, &msg, NULL);
1239 * Old recvmsg. This code takes advantage of the fact that the old msghdr
1240 * overlays the new one, missing only the flags, and with the (old) access
1241 * rights where the control fields are now.
1248 struct orecvmsg_args /* {
1250 struct omsghdr *msg;
1258 error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1261 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1264 msg.msg_flags = uap->flags | MSG_COMPAT;
1266 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1267 if (msg.msg_controllen && error == 0)
1268 error = copyout(&msg.msg_controllen,
1269 &uap->msg->msg_accrightslen, sizeof (int));
1281 struct recvmsg_args /* {
1288 struct iovec *uiov, *iov;
1291 error = copyin(uap->msg, &msg, sizeof (msg));
1294 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1297 msg.msg_flags = uap->flags;
1298 #ifdef COMPAT_OLDSOCK
1299 msg.msg_flags &= ~MSG_COMPAT;
1303 error = recvit(td, uap->s, &msg, NULL);
1306 error = copyout(&msg, uap->msg, sizeof(msg));
1319 register struct shutdown_args /* {
1329 error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
1332 error = soshutdown(so, uap->how);
1346 register struct setsockopt_args /* {
1355 return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1356 uap->val, UIO_USERSPACE, uap->valsize));
1360 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1366 enum uio_seg valseg;
1372 struct sockopt sopt;
1374 if (val == NULL && valsize != 0)
1376 if ((int)valsize < 0)
1379 sopt.sopt_dir = SOPT_SET;
1380 sopt.sopt_level = level;
1381 sopt.sopt_name = name;
1382 sopt.sopt_val = val;
1383 sopt.sopt_valsize = valsize;
1389 sopt.sopt_td = NULL;
1392 panic("kern_setsockopt called with bad valseg");
1396 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
1399 error = sosetopt(so, &sopt);
1413 register struct getsockopt_args /* {
1417 void * __restrict val;
1418 socklen_t * __restrict avalsize;
1425 error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1430 error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1431 uap->val, UIO_USERSPACE, &valsize);
1434 error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1439 * Kernel version of getsockopt.
1440 * optval can be a userland or userspace. optlen is always a kernel pointer.
1443 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1449 enum uio_seg valseg;
1455 struct sockopt sopt;
1459 if ((int)*valsize < 0)
1462 sopt.sopt_dir = SOPT_GET;
1463 sopt.sopt_level = level;
1464 sopt.sopt_name = name;
1465 sopt.sopt_val = val;
1466 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1472 sopt.sopt_td = NULL;
1475 panic("kern_getsockopt called with bad valseg");
1479 error = getsock(td->td_proc->p_fd, s, &fp, NULL);
1482 error = sogetopt(so, &sopt);
1483 *valsize = sopt.sopt_valsize;
1491 * getsockname1() - Get socket name.
1497 getsockname1(td, uap, compat)
1499 register struct getsockname_args /* {
1501 struct sockaddr * __restrict asa;
1502 socklen_t * __restrict alen;
1506 struct sockaddr *sa;
1510 error = copyin(uap->alen, &len, sizeof(len));
1514 error = kern_getsockname(td, uap->fdes, &sa, &len);
1519 #ifdef COMPAT_OLDSOCK
1521 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1523 error = copyout(sa, uap->asa, (u_int)len);
1527 error = copyout(&len, uap->alen, sizeof(len));
1532 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1544 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
1549 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1555 len = MIN(*alen, (*sa)->sa_len);
1560 free(*sa, M_SONAME);
1572 getsockname(td, uap)
1574 struct getsockname_args *uap;
1577 return (getsockname1(td, uap, 0));
1580 #ifdef COMPAT_OLDSOCK
1585 ogetsockname(td, uap)
1587 struct getsockname_args *uap;
1590 return (getsockname1(td, uap, 1));
1592 #endif /* COMPAT_OLDSOCK */
1595 * getpeername1() - Get name of peer for connected socket.
1601 getpeername1(td, uap, compat)
1603 register struct getpeername_args /* {
1605 struct sockaddr * __restrict asa;
1606 socklen_t * __restrict alen;
1610 struct sockaddr *sa;
1614 error = copyin(uap->alen, &len, sizeof (len));
1618 error = kern_getpeername(td, uap->fdes, &sa, &len);
1623 #ifdef COMPAT_OLDSOCK
1625 ((struct osockaddr *)sa)->sa_family = sa->sa_family;
1627 error = copyout(sa, uap->asa, (u_int)len);
1631 error = copyout(&len, uap->alen, sizeof(len));
1636 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1648 error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
1652 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1657 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1663 len = MIN(*alen, (*sa)->sa_len);
1667 free(*sa, M_SONAME);
1681 getpeername(td, uap)
1683 struct getpeername_args *uap;
1686 return (getpeername1(td, uap, 0));
1689 #ifdef COMPAT_OLDSOCK
1694 ogetpeername(td, uap)
1696 struct ogetpeername_args *uap;
1699 /* XXX uap should have type `getpeername_args *' to begin with. */
1700 return (getpeername1(td, (struct getpeername_args *)uap, 1));
1702 #endif /* COMPAT_OLDSOCK */
1705 sockargs(mp, buf, buflen, type)
1710 register struct sockaddr *sa;
1711 register struct mbuf *m;
1714 if ((u_int)buflen > MLEN) {
1715 #ifdef COMPAT_OLDSOCK
1716 if (type == MT_SONAME && (u_int)buflen <= 112)
1717 buflen = MLEN; /* unix domain compat. hack */
1720 if ((u_int)buflen > MCLBYTES)
1723 m = m_get(M_TRYWAIT, type);
1726 if ((u_int)buflen > MLEN) {
1727 MCLGET(m, M_TRYWAIT);
1728 if ((m->m_flags & M_EXT) == 0) {
1734 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1739 if (type == MT_SONAME) {
1740 sa = mtod(m, struct sockaddr *);
1742 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1743 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1744 sa->sa_family = sa->sa_len;
1746 sa->sa_len = buflen;
1753 getsockaddr(namp, uaddr, len)
1754 struct sockaddr **namp;
1758 struct sockaddr *sa;
1761 if (len > SOCK_MAXADDRLEN)
1762 return (ENAMETOOLONG);
1763 if (len < offsetof(struct sockaddr, sa_data[0]))
1765 MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1766 error = copyin(uaddr, sa, len);
1770 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1771 if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1772 sa->sa_family = sa->sa_len;
1781 * Detach mapped page and release resources back to the system.
1784 sf_buf_mext(void *addr, void *args)
1788 m = sf_buf_page(args);
1790 vm_page_lock_queues();
1791 vm_page_unwire(m, 0);
1793 * Check for the object going away on us. This can
1794 * happen since we don't hold a reference to it.
1795 * If so, we're responsible for freeing the page.
1797 if (m->wire_count == 0 && m->object == NULL)
1799 vm_page_unlock_queues();
1807 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1808 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1810 * Send a file specified by 'fd' and starting at 'offset' to a socket
1811 * specified by 's'. Send only 'nbytes' of the file or until EOF if
1812 * nbytes == 0. Optionally add a header and/or trailer to the socket
1813 * output. If specified, write the total number of bytes sent into *sbytes.
1817 sendfile(struct thread *td, struct sendfile_args *uap)
1820 return (do_sendfile(td, uap, 0));
1824 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1826 struct sf_hdtr hdtr;
1827 struct uio *hdr_uio, *trl_uio;
1830 hdr_uio = trl_uio = NULL;
1832 if (uap->hdtr != NULL) {
1833 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1836 if (hdtr.headers != NULL) {
1837 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
1841 if (hdtr.trailers != NULL) {
1842 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
1849 error = kern_sendfile(td, uap, hdr_uio, trl_uio, compat);
1852 free(hdr_uio, M_IOV);
1854 free(trl_uio, M_IOV);
1858 #ifdef COMPAT_FREEBSD4
1860 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1862 struct sendfile_args args;
1866 args.offset = uap->offset;
1867 args.nbytes = uap->nbytes;
1868 args.hdtr = uap->hdtr;
1869 args.sbytes = uap->sbytes;
1870 args.flags = uap->flags;
1872 return (do_sendfile(td, &args, 1));
1874 #endif /* COMPAT_FREEBSD4 */
1877 kern_sendfile(struct thread *td, struct sendfile_args *uap,
1878 struct uio *hdr_uio, struct uio *trl_uio, int compat)
1880 struct file *sock_fp;
1882 struct vm_object *obj = NULL;
1883 struct socket *so = NULL;
1884 struct mbuf *m, *m_header = NULL;
1887 off_t off, xfsize, hdtr_size, sbytes = 0;
1888 int error, headersize = 0, headersent = 0;
1896 * The descriptor must be a regular file and have a backing VM object.
1898 if ((error = fgetvp_read(td, uap->fd, &vp)) != 0)
1900 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1901 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1905 * Temporarily increase the backing VM object's reference
1906 * count so that a forced reclamation of its vnode does not
1907 * immediately destroy it.
1909 VM_OBJECT_LOCK(obj);
1910 if ((obj->flags & OBJ_DEAD) == 0) {
1911 vm_object_reference_locked(obj);
1912 VM_OBJECT_UNLOCK(obj);
1914 VM_OBJECT_UNLOCK(obj);
1918 VOP_UNLOCK(vp, 0, td);
1919 VFS_UNLOCK_GIANT(vfslocked);
1924 if ((error = getsock(td->td_proc->p_fd, uap->s, &sock_fp, NULL)) != 0)
1926 so = sock_fp->f_data;
1927 if (so->so_type != SOCK_STREAM) {
1931 if ((so->so_state & SS_ISCONNECTED) == 0) {
1935 if (uap->offset < 0) {
1942 error = mac_check_socket_send(td->td_ucred, so);
1949 * If specified, get the pointer to the sf_hdtr struct for
1950 * any headers/trailers.
1952 if (hdr_uio != NULL) {
1953 hdr_uio->uio_td = td;
1954 hdr_uio->uio_rw = UIO_WRITE;
1955 if (hdr_uio->uio_resid > 0) {
1956 m_header = m_uiotombuf(hdr_uio, M_DONTWAIT, 0, 0);
1957 if (m_header == NULL)
1959 headersize = m_header->m_pkthdr.len;
1961 sbytes += headersize;
1966 * Protect against multiple writers to the socket.
1968 SOCKBUF_LOCK(&so->so_snd);
1969 (void) sblock(&so->so_snd, M_WAITOK);
1970 SOCKBUF_UNLOCK(&so->so_snd);
1973 * Loop through the pages in the file, starting with the requested
1974 * offset. Get a file page (do I/O if necessary), map the file page
1975 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1978 for (off = uap->offset; ; off += xfsize, sbytes += xfsize) {
1982 pindex = OFF_TO_IDX(off);
1983 VM_OBJECT_LOCK(obj);
1986 * Calculate the amount to transfer. Not to exceed a page,
1987 * the EOF, or the passed in nbytes.
1989 xfsize = obj->un_pager.vnp.vnp_size - off;
1990 VM_OBJECT_UNLOCK(obj);
1991 if (xfsize > PAGE_SIZE)
1993 pgoff = (vm_offset_t)(off & PAGE_MASK);
1994 if (PAGE_SIZE - pgoff < xfsize)
1995 xfsize = PAGE_SIZE - pgoff;
1996 if (uap->nbytes && xfsize > (uap->nbytes - sbytes))
1997 xfsize = uap->nbytes - sbytes;
1999 if (m_header != NULL) {
2002 SOCKBUF_LOCK(&so->so_snd);
2008 * Optimize the non-blocking case by looking at the socket space
2009 * before going to the extra work of constituting the sf_buf.
2011 SOCKBUF_LOCK(&so->so_snd);
2012 if ((so->so_state & SS_NBIO) && sbspace(&so->so_snd) <= 0) {
2013 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
2017 sbunlock(&so->so_snd);
2018 SOCKBUF_UNLOCK(&so->so_snd);
2021 SOCKBUF_UNLOCK(&so->so_snd);
2022 VM_OBJECT_LOCK(obj);
2024 * Attempt to look up the page.
2026 * Allocate if not found
2028 * Wait and loop if busy.
2030 pg = vm_page_lookup(obj, pindex);
2033 pg = vm_page_alloc(obj, pindex, VM_ALLOC_NOBUSY |
2034 VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
2036 VM_OBJECT_UNLOCK(obj);
2038 VM_OBJECT_LOCK(obj);
2041 } else if (vm_page_sleep_if_busy(pg, TRUE, "sfpbsy"))
2045 * Wire the page so it does not get ripped out from
2048 vm_page_lock_queues();
2050 vm_page_unlock_queues();
2054 * If page is not valid for what we need, initiate I/O
2057 if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize)) {
2058 VM_OBJECT_UNLOCK(obj);
2059 } else if (uap->flags & SF_NODISKIO) {
2065 * Ensure that our page is still around when the I/O
2068 vm_page_io_start(pg);
2069 VM_OBJECT_UNLOCK(obj);
2072 * Get the page from backing store.
2074 bsize = vp->v_mount->mnt_stat.f_iosize;
2075 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2076 vn_lock(vp, LK_SHARED | LK_RETRY, td);
2078 * XXXMAC: Because we don't have fp->f_cred here,
2079 * we pass in NOCRED. This is probably wrong, but
2080 * is consistent with our original implementation.
2082 error = vn_rdwr(UIO_READ, vp, NULL, MAXBSIZE,
2083 trunc_page(off), UIO_NOCOPY, IO_NODELOCKED |
2084 IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT),
2085 td->td_ucred, NOCRED, &resid, td);
2086 VOP_UNLOCK(vp, 0, td);
2087 VFS_UNLOCK_GIANT(vfslocked);
2088 VM_OBJECT_LOCK(obj);
2089 vm_page_io_finish(pg);
2091 VM_OBJECT_UNLOCK(obj);
2096 vm_page_lock_queues();
2097 vm_page_unwire(pg, 0);
2099 * See if anyone else might know about this page.
2100 * If not and it is not valid, then free it.
2102 if (pg->wire_count == 0 && pg->valid == 0 &&
2103 pg->busy == 0 && !(pg->flags & PG_BUSY) &&
2104 pg->hold_count == 0) {
2107 vm_page_unlock_queues();
2108 VM_OBJECT_UNLOCK(obj);
2109 SOCKBUF_LOCK(&so->so_snd);
2110 sbunlock(&so->so_snd);
2111 SOCKBUF_UNLOCK(&so->so_snd);
2116 * Get a sendfile buf. We usually wait as long as necessary,
2117 * but this wait can be interrupted.
2119 if ((sf = sf_buf_alloc(pg, SFB_CATCH)) == NULL) {
2120 mbstat.sf_allocfail++;
2121 vm_page_lock_queues();
2122 vm_page_unwire(pg, 0);
2123 if (pg->wire_count == 0 && pg->object == NULL)
2125 vm_page_unlock_queues();
2126 SOCKBUF_LOCK(&so->so_snd);
2127 sbunlock(&so->so_snd);
2128 SOCKBUF_UNLOCK(&so->so_snd);
2134 * Get an mbuf header and set it up as having external storage.
2137 MGET(m, M_TRYWAIT, MT_DATA);
2139 MGETHDR(m, M_TRYWAIT, MT_DATA);
2142 sf_buf_mext((void *)sf_buf_kva(sf), sf);
2143 SOCKBUF_LOCK(&so->so_snd);
2144 sbunlock(&so->so_snd);
2145 SOCKBUF_UNLOCK(&so->so_snd);
2149 * Setup external storage for mbuf.
2151 MEXTADD(m, sf_buf_kva(sf), PAGE_SIZE, sf_buf_mext, sf, M_RDONLY,
2153 m->m_data = (char *)sf_buf_kva(sf) + pgoff;
2154 m->m_pkthdr.len = m->m_len = xfsize;
2164 * Add the buffer to the socket buffer chain.
2166 SOCKBUF_LOCK(&so->so_snd);
2169 * Make sure that the socket is still able to take more data.
2170 * CANTSENDMORE being true usually means that the connection
2171 * was closed. so_error is true when an error was sensed after
2173 * The state is checked after the page mapping and buffer
2174 * allocation above since those operations may block and make
2175 * any socket checks stale. From this point forward, nothing
2176 * blocks before the pru_send (or more accurately, any blocking
2177 * results in a loop back to here to re-check).
2179 SOCKBUF_LOCK_ASSERT(&so->so_snd);
2180 if ((so->so_snd.sb_state & SBS_CANTSENDMORE) || so->so_error) {
2181 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2184 error = so->so_error;
2188 sbunlock(&so->so_snd);
2189 SOCKBUF_UNLOCK(&so->so_snd);
2193 * Wait for socket space to become available. We do this just
2194 * after checking the connection state above in order to avoid
2195 * a race condition with sbwait().
2197 if (sbspace(&so->so_snd) < so->so_snd.sb_lowat) {
2198 if (so->so_state & SS_NBIO) {
2200 sbunlock(&so->so_snd);
2201 SOCKBUF_UNLOCK(&so->so_snd);
2205 error = sbwait(&so->so_snd);
2207 * An error from sbwait usually indicates that we've
2208 * been interrupted by a signal. If we've sent anything
2209 * then return bytes sent, otherwise return the error.
2213 sbunlock(&so->so_snd);
2214 SOCKBUF_UNLOCK(&so->so_snd);
2219 SOCKBUF_UNLOCK(&so->so_snd);
2220 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, td);
2222 SOCKBUF_LOCK(&so->so_snd);
2223 sbunlock(&so->so_snd);
2224 SOCKBUF_UNLOCK(&so->so_snd);
2229 SOCKBUF_LOCK(&so->so_snd);
2230 sbunlock(&so->so_snd);
2231 SOCKBUF_UNLOCK(&so->so_snd);
2234 * Send trailers. Wimp out and use writev(2).
2236 if (trl_uio != NULL) {
2237 error = kern_writev(td, uap->s, trl_uio);
2241 sbytes += td->td_retval[0];
2243 hdtr_size += td->td_retval[0];
2249 hdtr_size += headersize;
2252 sbytes -= headersize;
2255 * If there was no error we have to clear td->td_retval[0]
2256 * because it may have been set by writev.
2259 td->td_retval[0] = 0;
2261 if (uap->sbytes != NULL) {
2263 sbytes += hdtr_size;
2264 copyout(&sbytes, uap->sbytes, sizeof(off_t));
2267 vm_object_deallocate(obj);
2269 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2271 VFS_UNLOCK_GIANT(vfslocked);
2280 if (error == ERESTART)