2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/fcntl.h>
43 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/domain.h>
47 #include <sys/file.h> /* for struct knote */
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/event.h>
53 #include <sys/protosw.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/resourcevar.h>
57 #include <sys/signalvar.h>
58 #include <sys/sysctl.h>
64 #include <machine/limits.h>
67 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
70 static void filt_sordetach(struct knote *kn);
71 static int filt_soread(struct knote *kn, long hint);
72 static void filt_sowdetach(struct knote *kn);
73 static int filt_sowrite(struct knote *kn, long hint);
74 static int filt_solisten(struct knote *kn, long hint);
76 static struct filterops solisten_filtops =
77 { 1, NULL, filt_sordetach, filt_solisten };
78 static struct filterops soread_filtops =
79 { 1, NULL, filt_sordetach, filt_soread };
80 static struct filterops sowrite_filtops =
81 { 1, NULL, filt_sowdetach, filt_sowrite };
83 uma_zone_t socket_zone;
84 so_gen_t so_gencnt; /* generation count for sockets */
86 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
87 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
89 SYSCTL_DECL(_kern_ipc);
91 static int somaxconn = SOMAXCONN;
92 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
93 &somaxconn, 0, "Maximum pending socket connection queue size");
94 static int numopensockets;
95 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
96 &numopensockets, 0, "Number of open sockets");
100 * Socket operation routines.
101 * These routines are called by the routines in
102 * sys_socket.c or from a system process, and
103 * implement the semantics of socket operations by
104 * switching out to the protocol specific routines.
108 * Get a socket structure from our zone, and initialize it.
109 * Note that it would probably be better to allocate socket
110 * and PCB at the same time, but I'm not convinced that all
111 * the protocols can be easily modified to do this.
113 * soalloc() returns a socket with a ref count of 0.
127 so = uma_zalloc(socket_zone, flag);
129 /* XXX race condition for reentrant kernel */
130 bzero(so, sizeof *so);
131 so->so_gencnt = ++so_gencnt;
132 /* sx_init(&so->so_sxlock, "socket sxlock"); */
133 TAILQ_INIT(&so->so_aiojobq);
140 * socreate returns a socket with a ref count of 1. The socket should be
141 * closed with soclose().
144 socreate(dom, aso, type, proto, cred, td)
152 register struct protosw *prp;
153 register struct socket *so;
157 prp = pffindproto(dom, proto, type);
159 prp = pffindtype(dom, type);
161 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
162 return (EPROTONOSUPPORT);
164 if (jailed(td->td_ucred) && jail_socket_unixiproute_only &&
165 prp->pr_domain->dom_family != PF_LOCAL &&
166 prp->pr_domain->dom_family != PF_INET &&
167 prp->pr_domain->dom_family != PF_ROUTE) {
168 return (EPROTONOSUPPORT);
171 if (prp->pr_type != type)
173 so = soalloc(td != 0);
177 TAILQ_INIT(&so->so_incomp);
178 TAILQ_INIT(&so->so_comp);
180 so->so_cred = crhold(cred);
183 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
185 so->so_state |= SS_NOFDREF;
196 struct sockaddr *nam;
202 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
208 sodealloc(struct socket *so)
211 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
212 so->so_gencnt = ++so_gencnt;
213 if (so->so_rcv.sb_hiwat)
214 (void)chgsbsize(so->so_cred->cr_uidinfo,
215 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
216 if (so->so_snd.sb_hiwat)
217 (void)chgsbsize(so->so_cred->cr_uidinfo,
218 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
220 if (so->so_accf != NULL) {
221 if (so->so_accf->so_accept_filter != NULL &&
222 so->so_accf->so_accept_filter->accf_destroy != NULL) {
223 so->so_accf->so_accept_filter->accf_destroy(so);
225 if (so->so_accf->so_accept_filter_str != NULL)
226 FREE(so->so_accf->so_accept_filter_str, M_ACCF);
227 FREE(so->so_accf, M_ACCF);
231 /* sx_destroy(&so->so_sxlock); */
232 uma_zfree(socket_zone, so);
237 solisten(so, backlog, td)
238 register struct socket *so;
245 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td);
250 if (TAILQ_EMPTY(&so->so_comp))
251 so->so_options |= SO_ACCEPTCONN;
252 if (backlog < 0 || backlog > somaxconn)
254 so->so_qlimit = backlog;
261 register struct socket *so;
263 struct socket *head = so->so_head;
265 KASSERT(so->so_count == 0, ("socket %p so_count not 0", so));
267 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
270 if (so->so_state & SS_INCOMP) {
271 TAILQ_REMOVE(&head->so_incomp, so, so_list);
273 } else if (so->so_state & SS_COMP) {
275 * We must not decommission a socket that's
276 * on the accept(2) queue. If we do, then
277 * accept(2) may hang after select(2) indicated
278 * that the listening socket was ready.
282 panic("sofree: not queued");
284 so->so_state &= ~SS_INCOMP;
287 sbrelease(&so->so_snd, so);
293 * Close a socket on last file table reference removal.
294 * Initiate disconnect if connected.
295 * Free socket when disconnect complete.
297 * This function will sorele() the socket. Note that soclose() may be
298 * called prior to the ref count reaching zero. The actual socket
299 * structure will not be freed until the ref count reaches zero.
303 register struct socket *so;
305 int s = splnet(); /* conservative */
308 funsetown(&so->so_sigio);
309 if (so->so_options & SO_ACCEPTCONN) {
310 struct socket *sp, *sonext;
312 sp = TAILQ_FIRST(&so->so_incomp);
313 for (; sp != NULL; sp = sonext) {
314 sonext = TAILQ_NEXT(sp, so_list);
317 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
318 sonext = TAILQ_NEXT(sp, so_list);
319 /* Dequeue from so_comp since sofree() won't do it */
320 TAILQ_REMOVE(&so->so_comp, sp, so_list);
322 sp->so_state &= ~SS_COMP;
329 if (so->so_state & SS_ISCONNECTED) {
330 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
331 error = sodisconnect(so);
335 if (so->so_options & SO_LINGER) {
336 if ((so->so_state & SS_ISDISCONNECTING) &&
337 (so->so_state & SS_NBIO))
339 while (so->so_state & SS_ISCONNECTED) {
340 error = tsleep((caddr_t)&so->so_timeo,
341 PSOCK | PCATCH, "soclos", so->so_linger * hz);
349 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
354 if (so->so_state & SS_NOFDREF)
355 panic("soclose: NOFDREF");
356 so->so_state |= SS_NOFDREF;
363 * Must be called at splnet...
371 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
373 sotryfree(so); /* note: does not decrement the ref count */
381 register struct socket *so;
382 struct sockaddr **nam;
387 if ((so->so_state & SS_NOFDREF) == 0)
388 panic("soaccept: !NOFDREF");
389 so->so_state &= ~SS_NOFDREF;
390 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
396 soconnect(so, nam, td)
397 register struct socket *so;
398 struct sockaddr *nam;
404 if (so->so_options & SO_ACCEPTCONN)
408 * If protocol is connection-based, can only connect once.
409 * Otherwise, if connected, try to disconnect first.
410 * This allows user to disconnect by connecting to, e.g.,
413 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
414 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
415 (error = sodisconnect(so))))
418 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
425 register struct socket *so1;
431 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
438 register struct socket *so;
443 if ((so->so_state & SS_ISCONNECTED) == 0) {
447 if (so->so_state & SS_ISDISCONNECTING) {
451 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
457 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
460 * If send must go all at once and message is larger than
461 * send buffering, then hard error.
462 * Lock against other senders.
463 * If must go all at once and not enough room now, then
464 * inform user that this would block and do nothing.
465 * Otherwise, if nonblocking, send as much as possible.
466 * The data to be sent is described by "uio" if nonzero,
467 * otherwise by the mbuf chain "top" (which must be null
468 * if uio is not). Data provided in mbuf chain must be small
469 * enough to send all at once.
471 * Returns nonzero on error, timeout or signal; callers
472 * must check for short counts if EINTR/ERESTART are returned.
473 * Data and control buffers are freed on return.
476 sosend(so, addr, uio, top, control, flags, td)
477 register struct socket *so;
478 struct sockaddr *addr;
481 struct mbuf *control;
486 register struct mbuf *m;
487 register long space, len, resid;
488 int clen = 0, error, s, dontroute, mlen;
489 int atomic = sosendallatonce(so) || top;
492 resid = uio->uio_resid;
494 resid = top->m_pkthdr.len;
496 * In theory resid should be unsigned.
497 * However, space must be signed, as it might be less than 0
498 * if we over-committed, and we must use a signed comparison
499 * of space and resid. On the other hand, a negative resid
500 * causes us to loop sending 0-length segments to the protocol.
502 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
503 * type sockets since that's an error.
505 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
511 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
512 (so->so_proto->pr_flags & PR_ATOMIC);
514 td->td_proc->p_stats->p_ru.ru_msgsnd++;
516 clen = control->m_len;
517 #define snderr(errno) { error = errno; splx(s); goto release; }
520 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
525 if (so->so_state & SS_CANTSENDMORE)
528 error = so->so_error;
533 if ((so->so_state & SS_ISCONNECTED) == 0) {
535 * `sendto' and `sendmsg' is allowed on a connection-
536 * based socket if it supports implied connect.
537 * Return ENOTCONN if not connected and no address is
540 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
541 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
542 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
543 !(resid == 0 && clen != 0))
545 } else if (addr == 0)
546 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
547 ENOTCONN : EDESTADDRREQ);
549 space = sbspace(&so->so_snd);
552 if ((atomic && resid > so->so_snd.sb_hiwat) ||
553 clen > so->so_snd.sb_hiwat)
555 if (space < resid + clen &&
556 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
557 if (so->so_state & SS_NBIO)
559 sbunlock(&so->so_snd);
560 error = sbwait(&so->so_snd);
572 * Data is prepackaged in "top".
576 top->m_flags |= M_EOR;
579 MGETHDR(m, M_TRYWAIT, MT_DATA);
586 m->m_pkthdr.rcvif = (struct ifnet *)0;
588 MGET(m, M_TRYWAIT, MT_DATA);
595 if (resid >= MINCLSIZE) {
596 MCLGET(m, M_TRYWAIT);
597 if ((m->m_flags & M_EXT) == 0)
600 len = min(min(mlen, resid), space);
603 len = min(min(mlen, resid), space);
605 * For datagram protocols, leave room
606 * for protocol headers in first mbuf.
608 if (atomic && top == 0 && len < mlen)
612 error = uiomove(mtod(m, caddr_t), (int)len, uio);
613 resid = uio->uio_resid;
616 top->m_pkthdr.len += len;
622 top->m_flags |= M_EOR;
625 } while (space > 0 && atomic);
627 so->so_options |= SO_DONTROUTE;
628 s = splnet(); /* XXX */
630 * XXX all the SS_CANTSENDMORE checks previously
631 * done could be out of date. We could have recieved
632 * a reset packet in an interrupt or maybe we slept
633 * while doing page faults in uiomove() etc. We could
634 * probably recheck again inside the splnet() protection
635 * here, but there are probably other places that this
636 * also happens. We must rethink this.
638 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
639 (flags & MSG_OOB) ? PRUS_OOB :
641 * If the user set MSG_EOF, the protocol
642 * understands this flag and nothing left to
643 * send then use PRU_SEND_EOF instead of PRU_SEND.
645 ((flags & MSG_EOF) &&
646 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
649 /* If there is more to send set PRUS_MORETOCOME */
650 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
651 top, addr, control, td);
654 so->so_options &= ~SO_DONTROUTE;
661 } while (resid && space > 0);
665 sbunlock(&so->so_snd);
675 * Implement receive operations on a socket.
676 * We depend on the way that records are added to the sockbuf
677 * by sbappend*. In particular, each record (mbufs linked through m_next)
678 * must begin with an address if the protocol so specifies,
679 * followed by an optional mbuf or mbufs containing ancillary data,
680 * and then zero or more mbufs of data.
681 * In order to avoid blocking network interrupts for the entire time here,
682 * we splx() while doing the actual copy to user space.
683 * Although the sockbuf is locked, new data may still be appended,
684 * and thus we must maintain consistency of the sockbuf during that time.
686 * The caller may receive the data as a single mbuf chain by supplying
687 * an mbuf **mp0 for use in returning the chain. The uio is then used
688 * only for the count in uio_resid.
691 soreceive(so, psa, uio, mp0, controlp, flagsp)
692 register struct socket *so;
693 struct sockaddr **psa;
696 struct mbuf **controlp;
699 struct mbuf *m, **mp;
700 register int flags, len, error, s, offset;
701 struct protosw *pr = so->so_proto;
702 struct mbuf *nextrecord;
704 int orig_resid = uio->uio_resid;
712 flags = *flagsp &~ MSG_EOR;
715 if (flags & MSG_OOB) {
716 m = m_get(M_TRYWAIT, MT_DATA);
719 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
723 error = uiomove(mtod(m, caddr_t),
724 (int) min(uio->uio_resid, m->m_len), uio);
726 } while (uio->uio_resid && error == 0 && m);
733 *mp = (struct mbuf *)0;
734 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
735 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
738 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
743 m = so->so_rcv.sb_mb;
745 * If we have less data than requested, block awaiting more
746 * (subject to any timeout) if:
747 * 1. the current count is less than the low water mark, or
748 * 2. MSG_WAITALL is set, and it is possible to do the entire
749 * receive operation at once if we block (resid <= hiwat).
750 * 3. MSG_DONTWAIT is not set
751 * If MSG_WAITALL is set but resid is larger than the receive buffer,
752 * we have to do the receive in sections, and thus risk returning
753 * a short count if a timeout or signal occurs after we start.
755 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
756 so->so_rcv.sb_cc < uio->uio_resid) &&
757 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
758 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
759 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
760 KASSERT(m != 0 || !so->so_rcv.sb_cc,
761 ("receive: m == %p so->so_rcv.sb_cc == %lu",
762 m, so->so_rcv.sb_cc));
766 error = so->so_error;
767 if ((flags & MSG_PEEK) == 0)
771 if (so->so_state & SS_CANTRCVMORE) {
777 for (; m; m = m->m_next)
778 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
779 m = so->so_rcv.sb_mb;
782 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
783 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
787 if (uio->uio_resid == 0)
789 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
793 sbunlock(&so->so_rcv);
794 error = sbwait(&so->so_rcv);
802 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
803 nextrecord = m->m_nextpkt;
804 if (pr->pr_flags & PR_ADDR) {
805 KASSERT(m->m_type == MT_SONAME,
806 ("m->m_type == %d", m->m_type));
809 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
811 if (flags & MSG_PEEK) {
814 sbfree(&so->so_rcv, m);
815 so->so_rcv.sb_mb = m_free(m);
816 m = so->so_rcv.sb_mb;
819 while (m && m->m_type == MT_CONTROL && error == 0) {
820 if (flags & MSG_PEEK) {
822 *controlp = m_copy(m, 0, m->m_len);
825 sbfree(&so->so_rcv, m);
826 so->so_rcv.sb_mb = m->m_next;
828 if (pr->pr_domain->dom_externalize)
830 (*pr->pr_domain->dom_externalize)(m, controlp);
835 m = so->so_rcv.sb_mb;
840 controlp = &(*controlp)->m_next;
841 while (*controlp != NULL);
845 if ((flags & MSG_PEEK) == 0)
846 m->m_nextpkt = nextrecord;
848 if (type == MT_OOBDATA)
853 while (m && uio->uio_resid > 0 && error == 0) {
854 if (m->m_type == MT_OOBDATA) {
855 if (type != MT_OOBDATA)
857 } else if (type == MT_OOBDATA)
860 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
861 ("m->m_type == %d", m->m_type));
862 so->so_state &= ~SS_RCVATMARK;
863 len = uio->uio_resid;
864 if (so->so_oobmark && len > so->so_oobmark - offset)
865 len = so->so_oobmark - offset;
866 if (len > m->m_len - moff)
867 len = m->m_len - moff;
869 * If mp is set, just pass back the mbufs.
870 * Otherwise copy them out via the uio, then free.
871 * Sockbuf must be consistent here (points to current mbuf,
872 * it points to next record) when we drop priority;
873 * we must note any additions to the sockbuf when we
874 * block interrupts again.
878 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
883 uio->uio_resid -= len;
884 if (len == m->m_len - moff) {
885 if (m->m_flags & M_EOR)
887 if (flags & MSG_PEEK) {
891 nextrecord = m->m_nextpkt;
892 sbfree(&so->so_rcv, m);
896 so->so_rcv.sb_mb = m = m->m_next;
897 *mp = (struct mbuf *)0;
899 so->so_rcv.sb_mb = m_free(m);
900 m = so->so_rcv.sb_mb;
903 m->m_nextpkt = nextrecord;
906 if (flags & MSG_PEEK)
910 *mp = m_copym(m, 0, len, M_TRYWAIT);
913 so->so_rcv.sb_cc -= len;
916 if (so->so_oobmark) {
917 if ((flags & MSG_PEEK) == 0) {
918 so->so_oobmark -= len;
919 if (so->so_oobmark == 0) {
920 so->so_state |= SS_RCVATMARK;
925 if (offset == so->so_oobmark)
932 * If the MSG_WAITALL flag is set (for non-atomic socket),
933 * we must not quit until "uio->uio_resid == 0" or an error
934 * termination. If a signal/timeout occurs, return
935 * with a short count but without error.
936 * Keep sockbuf locked against other readers.
938 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
939 !sosendallatonce(so) && !nextrecord) {
940 if (so->so_error || so->so_state & SS_CANTRCVMORE)
943 * Notify the protocol that some data has been
944 * drained before blocking.
946 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
947 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
948 error = sbwait(&so->so_rcv);
950 sbunlock(&so->so_rcv);
954 m = so->so_rcv.sb_mb;
956 nextrecord = m->m_nextpkt;
960 if (m && pr->pr_flags & PR_ATOMIC) {
962 if ((flags & MSG_PEEK) == 0)
963 (void) sbdroprecord(&so->so_rcv);
965 if ((flags & MSG_PEEK) == 0) {
967 so->so_rcv.sb_mb = nextrecord;
968 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
969 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
971 if (orig_resid == uio->uio_resid && orig_resid &&
972 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
973 sbunlock(&so->so_rcv);
981 sbunlock(&so->so_rcv);
988 register struct socket *so;
991 register struct protosw *pr = so->so_proto;
993 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
999 return ((*pr->pr_usrreqs->pru_shutdown)(so));
1005 register struct socket *so;
1007 register struct sockbuf *sb = &so->so_rcv;
1008 register struct protosw *pr = so->so_proto;
1012 sb->sb_flags |= SB_NOINTR;
1013 (void) sblock(sb, M_WAITOK);
1018 bzero((caddr_t)sb, sizeof (*sb));
1020 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1021 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1022 sbrelease(&asb, so);
1027 do_setopt_accept_filter(so, sopt)
1029 struct sockopt *sopt;
1031 struct accept_filter_arg *afap = NULL;
1032 struct accept_filter *afp;
1033 struct so_accf *af = so->so_accf;
1036 /* do not set/remove accept filters on non listen sockets */
1037 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1042 /* removing the filter */
1045 if (af->so_accept_filter != NULL &&
1046 af->so_accept_filter->accf_destroy != NULL) {
1047 af->so_accept_filter->accf_destroy(so);
1049 if (af->so_accept_filter_str != NULL) {
1050 FREE(af->so_accept_filter_str, M_ACCF);
1055 so->so_options &= ~SO_ACCEPTFILTER;
1058 /* adding a filter */
1059 /* must remove previous filter first */
1064 /* don't put large objects on the kernel stack */
1065 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1066 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1067 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1068 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1071 afp = accept_filt_get(afap->af_name);
1076 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO);
1077 if (afp->accf_create != NULL) {
1078 if (afap->af_name[0] != '\0') {
1079 int len = strlen(afap->af_name) + 1;
1081 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1082 strcpy(af->so_accept_filter_str, afap->af_name);
1084 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1085 if (af->so_accept_filter_arg == NULL) {
1086 FREE(af->so_accept_filter_str, M_ACCF);
1093 af->so_accept_filter = afp;
1095 so->so_options |= SO_ACCEPTFILTER;
1104 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1105 * an additional variant to handle the case where the option value needs
1106 * to be some kind of integer, but not a specific size.
1107 * In addition to their use here, these functions are also called by the
1108 * protocol-level pr_ctloutput() routines.
1111 sooptcopyin(sopt, buf, len, minlen)
1112 struct sockopt *sopt;
1120 * If the user gives us more than we wanted, we ignore it,
1121 * but if we don't get the minimum length the caller
1122 * wants, we return EINVAL. On success, sopt->sopt_valsize
1123 * is set to however much we actually retrieved.
1125 if ((valsize = sopt->sopt_valsize) < minlen)
1128 sopt->sopt_valsize = valsize = len;
1130 if (sopt->sopt_td != 0)
1131 return (copyin(sopt->sopt_val, buf, valsize));
1133 bcopy(sopt->sopt_val, buf, valsize);
1140 struct sockopt *sopt;
1148 if (sopt->sopt_level != SOL_SOCKET) {
1149 if (so->so_proto && so->so_proto->pr_ctloutput)
1150 return ((*so->so_proto->pr_ctloutput)
1152 error = ENOPROTOOPT;
1154 switch (sopt->sopt_name) {
1156 case SO_ACCEPTFILTER:
1157 error = do_setopt_accept_filter(so, sopt);
1163 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1167 so->so_linger = l.l_linger;
1169 so->so_options |= SO_LINGER;
1171 so->so_options &= ~SO_LINGER;
1177 case SO_USELOOPBACK:
1183 error = sooptcopyin(sopt, &optval, sizeof optval,
1188 so->so_options |= sopt->sopt_name;
1190 so->so_options &= ~sopt->sopt_name;
1197 error = sooptcopyin(sopt, &optval, sizeof optval,
1203 * Values < 1 make no sense for any of these
1204 * options, so disallow them.
1211 switch (sopt->sopt_name) {
1214 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1215 &so->so_snd : &so->so_rcv, (u_long)optval,
1216 so, curthread) == 0) {
1223 * Make sure the low-water is never greater than
1227 so->so_snd.sb_lowat =
1228 (optval > so->so_snd.sb_hiwat) ?
1229 so->so_snd.sb_hiwat : optval;
1232 so->so_rcv.sb_lowat =
1233 (optval > so->so_rcv.sb_hiwat) ?
1234 so->so_rcv.sb_hiwat : optval;
1241 error = sooptcopyin(sopt, &tv, sizeof tv,
1246 /* assert(hz > 0); */
1247 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1248 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1252 /* assert(tick > 0); */
1253 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1254 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1255 if (val > SHRT_MAX) {
1260 switch (sopt->sopt_name) {
1262 so->so_snd.sb_timeo = val;
1265 so->so_rcv.sb_timeo = val;
1270 error = ENOPROTOOPT;
1273 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1274 (void) ((*so->so_proto->pr_ctloutput)
1282 /* Helper routine for getsockopt */
1284 sooptcopyout(sopt, buf, len)
1285 struct sockopt *sopt;
1295 * Documented get behavior is that we always return a value,
1296 * possibly truncated to fit in the user's buffer.
1297 * Traditional behavior is that we always tell the user
1298 * precisely how much we copied, rather than something useful
1299 * like the total amount we had available for her.
1300 * Note that this interface is not idempotent; the entire answer must
1301 * generated ahead of time.
1303 valsize = min(len, sopt->sopt_valsize);
1304 sopt->sopt_valsize = valsize;
1305 if (sopt->sopt_val != 0) {
1306 if (sopt->sopt_td != 0)
1307 error = copyout(buf, sopt->sopt_val, valsize);
1309 bcopy(buf, sopt->sopt_val, valsize);
1317 struct sockopt *sopt;
1323 struct accept_filter_arg *afap;
1327 if (sopt->sopt_level != SOL_SOCKET) {
1328 if (so->so_proto && so->so_proto->pr_ctloutput) {
1329 return ((*so->so_proto->pr_ctloutput)
1332 return (ENOPROTOOPT);
1334 switch (sopt->sopt_name) {
1336 case SO_ACCEPTFILTER:
1337 if ((so->so_options & SO_ACCEPTCONN) == 0)
1339 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1340 M_TEMP, M_WAITOK | M_ZERO);
1341 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1342 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1343 if (so->so_accf->so_accept_filter_str != NULL)
1344 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1346 error = sooptcopyout(sopt, afap, sizeof(*afap));
1352 l.l_onoff = so->so_options & SO_LINGER;
1353 l.l_linger = so->so_linger;
1354 error = sooptcopyout(sopt, &l, sizeof l);
1357 case SO_USELOOPBACK:
1366 optval = so->so_options & sopt->sopt_name;
1368 error = sooptcopyout(sopt, &optval, sizeof optval);
1372 optval = so->so_type;
1376 optval = so->so_error;
1381 optval = so->so_snd.sb_hiwat;
1385 optval = so->so_rcv.sb_hiwat;
1389 optval = so->so_snd.sb_lowat;
1393 optval = so->so_rcv.sb_lowat;
1398 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1399 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1401 tv.tv_sec = optval / hz;
1402 tv.tv_usec = (optval % hz) * tick;
1403 error = sooptcopyout(sopt, &tv, sizeof tv);
1407 error = ENOPROTOOPT;
1414 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1416 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1418 struct mbuf *m, *m_prev;
1419 int sopt_size = sopt->sopt_valsize;
1421 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
1424 if (sopt_size > MLEN) {
1425 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
1426 if ((m->m_flags & M_EXT) == 0) {
1430 m->m_len = min(MCLBYTES, sopt_size);
1432 m->m_len = min(MLEN, sopt_size);
1434 sopt_size -= m->m_len;
1439 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
1444 if (sopt_size > MLEN) {
1445 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
1446 if ((m->m_flags & M_EXT) == 0) {
1450 m->m_len = min(MCLBYTES, sopt_size);
1452 m->m_len = min(MLEN, sopt_size);
1454 sopt_size -= m->m_len;
1461 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1463 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1465 struct mbuf *m0 = m;
1467 if (sopt->sopt_val == NULL)
1469 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1470 if (sopt->sopt_td != NULL) {
1473 error = copyin(sopt->sopt_val, mtod(m, char *),
1480 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1481 sopt->sopt_valsize -= m->m_len;
1482 (caddr_t)sopt->sopt_val += m->m_len;
1485 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1486 panic("ip6_sooptmcopyin");
1490 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1492 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1494 struct mbuf *m0 = m;
1497 if (sopt->sopt_val == NULL)
1499 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1500 if (sopt->sopt_td != NULL) {
1503 error = copyout(mtod(m, char *), sopt->sopt_val,
1510 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1511 sopt->sopt_valsize -= m->m_len;
1512 (caddr_t)sopt->sopt_val += m->m_len;
1513 valsize += m->m_len;
1517 /* enough soopt buffer should be given from user-land */
1521 sopt->sopt_valsize = valsize;
1527 register struct socket *so;
1529 if (so->so_sigio != NULL)
1530 pgsigio(&so->so_sigio, SIGURG, 0);
1531 selwakeup(&so->so_rcv.sb_sel);
1535 sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
1540 if (events & (POLLIN | POLLRDNORM))
1542 revents |= events & (POLLIN | POLLRDNORM);
1544 if (events & POLLINIGNEOF)
1545 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
1546 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
1547 revents |= POLLINIGNEOF;
1549 if (events & (POLLOUT | POLLWRNORM))
1550 if (sowriteable(so))
1551 revents |= events & (POLLOUT | POLLWRNORM);
1553 if (events & (POLLPRI | POLLRDBAND))
1554 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1555 revents |= events & (POLLPRI | POLLRDBAND);
1559 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
1561 selrecord(td, &so->so_rcv.sb_sel);
1562 so->so_rcv.sb_flags |= SB_SEL;
1565 if (events & (POLLOUT | POLLWRNORM)) {
1566 selrecord(td, &so->so_snd.sb_sel);
1567 so->so_snd.sb_flags |= SB_SEL;
1576 sokqfilter(struct file *fp, struct knote *kn)
1578 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1582 switch (kn->kn_filter) {
1584 if (so->so_options & SO_ACCEPTCONN)
1585 kn->kn_fop = &solisten_filtops;
1587 kn->kn_fop = &soread_filtops;
1591 kn->kn_fop = &sowrite_filtops;
1599 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1600 sb->sb_flags |= SB_KNOTE;
1606 filt_sordetach(struct knote *kn)
1608 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1611 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1612 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1613 so->so_rcv.sb_flags &= ~SB_KNOTE;
1619 filt_soread(struct knote *kn, long hint)
1621 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1623 kn->kn_data = so->so_rcv.sb_cc;
1624 if (so->so_state & SS_CANTRCVMORE) {
1625 kn->kn_flags |= EV_EOF;
1626 kn->kn_fflags = so->so_error;
1629 if (so->so_error) /* temporary udp error */
1631 if (kn->kn_sfflags & NOTE_LOWAT)
1632 return (kn->kn_data >= kn->kn_sdata);
1633 return (kn->kn_data >= so->so_rcv.sb_lowat);
1637 filt_sowdetach(struct knote *kn)
1639 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1642 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1643 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1644 so->so_snd.sb_flags &= ~SB_KNOTE;
1650 filt_sowrite(struct knote *kn, long hint)
1652 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1654 kn->kn_data = sbspace(&so->so_snd);
1655 if (so->so_state & SS_CANTSENDMORE) {
1656 kn->kn_flags |= EV_EOF;
1657 kn->kn_fflags = so->so_error;
1660 if (so->so_error) /* temporary udp error */
1662 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1663 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1665 if (kn->kn_sfflags & NOTE_LOWAT)
1666 return (kn->kn_data >= kn->kn_sdata);
1667 return (kn->kn_data >= so->so_snd.sb_lowat);
1672 filt_solisten(struct knote *kn, long hint)
1674 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1676 kn->kn_data = so->so_qlen;
1677 return (! TAILQ_EMPTY(&so->so_comp));
1681 socheckuid(struct socket *so, uid_t uid)
1686 if (so->so_cred->cr_uid == uid)