2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/fcntl.h>
43 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/domain.h>
47 #include <sys/file.h> /* for struct knote */
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/event.h>
53 #include <sys/protosw.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/resourcevar.h>
57 #include <sys/signalvar.h>
58 #include <sys/sysctl.h>
64 #include <machine/limits.h>
67 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
70 static void filt_sordetach(struct knote *kn);
71 static int filt_soread(struct knote *kn, long hint);
72 static void filt_sowdetach(struct knote *kn);
73 static int filt_sowrite(struct knote *kn, long hint);
74 static int filt_solisten(struct knote *kn, long hint);
76 static struct filterops solisten_filtops =
77 { 1, NULL, filt_sordetach, filt_solisten };
78 static struct filterops soread_filtops =
79 { 1, NULL, filt_sordetach, filt_soread };
80 static struct filterops sowrite_filtops =
81 { 1, NULL, filt_sowdetach, filt_sowrite };
83 uma_zone_t socket_zone;
84 so_gen_t so_gencnt; /* generation count for sockets */
86 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
87 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
89 SYSCTL_DECL(_kern_ipc);
91 static int somaxconn = SOMAXCONN;
92 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
93 &somaxconn, 0, "Maximum pending socket connection queue size");
94 static int numopensockets;
95 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
96 &numopensockets, 0, "Number of open sockets");
100 * Socket operation routines.
101 * These routines are called by the routines in
102 * sys_socket.c or from a system process, and
103 * implement the semantics of socket operations by
104 * switching out to the protocol specific routines.
108 * Get a socket structure from our zone, and initialize it.
109 * Note that it would probably be better to allocate socket
110 * and PCB at the same time, but I'm not convinced that all
111 * the protocols can be easily modified to do this.
113 * soalloc() returns a socket with a ref count of 0.
127 so = uma_zalloc(socket_zone, flag);
129 /* XXX race condition for reentrant kernel */
130 bzero(so, sizeof *so);
131 so->so_gencnt = ++so_gencnt;
132 /* sx_init(&so->so_sxlock, "socket sxlock"); */
133 TAILQ_INIT(&so->so_aiojobq);
140 * socreate returns a socket with a ref count of 1. The socket should be
141 * closed with soclose().
144 socreate(dom, aso, type, proto, cred, td)
152 register struct protosw *prp;
153 register struct socket *so;
157 prp = pffindproto(dom, proto, type);
159 prp = pffindtype(dom, type);
161 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
162 return (EPROTONOSUPPORT);
164 if (jailed(td->td_ucred) && jail_socket_unixiproute_only &&
165 prp->pr_domain->dom_family != PF_LOCAL &&
166 prp->pr_domain->dom_family != PF_INET &&
167 prp->pr_domain->dom_family != PF_ROUTE) {
168 return (EPROTONOSUPPORT);
171 if (prp->pr_type != type)
173 so = soalloc(td != 0);
177 TAILQ_INIT(&so->so_incomp);
178 TAILQ_INIT(&so->so_comp);
180 so->so_cred = crhold(cred);
183 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
185 so->so_state |= SS_NOFDREF;
196 struct sockaddr *nam;
202 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
208 sodealloc(struct socket *so)
211 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
212 so->so_gencnt = ++so_gencnt;
213 if (so->so_rcv.sb_hiwat)
214 (void)chgsbsize(so->so_cred->cr_uidinfo,
215 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
216 if (so->so_snd.sb_hiwat)
217 (void)chgsbsize(so->so_cred->cr_uidinfo,
218 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
220 if (so->so_accf != NULL) {
221 if (so->so_accf->so_accept_filter != NULL &&
222 so->so_accf->so_accept_filter->accf_destroy != NULL) {
223 so->so_accf->so_accept_filter->accf_destroy(so);
225 if (so->so_accf->so_accept_filter_str != NULL)
226 FREE(so->so_accf->so_accept_filter_str, M_ACCF);
227 FREE(so->so_accf, M_ACCF);
231 /* sx_destroy(&so->so_sxlock); */
232 uma_zfree(socket_zone, so);
237 solisten(so, backlog, td)
238 register struct socket *so;
245 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td);
250 if (TAILQ_EMPTY(&so->so_comp))
251 so->so_options |= SO_ACCEPTCONN;
252 if (backlog < 0 || backlog > somaxconn)
254 so->so_qlimit = backlog;
261 register struct socket *so;
263 struct socket *head = so->so_head;
265 KASSERT(so->so_count == 0, ("socket %p so_count not 0", so));
267 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
270 if (so->so_state & SS_INCOMP) {
271 TAILQ_REMOVE(&head->so_incomp, so, so_list);
273 } else if (so->so_state & SS_COMP) {
275 * We must not decommission a socket that's
276 * on the accept(2) queue. If we do, then
277 * accept(2) may hang after select(2) indicated
278 * that the listening socket was ready.
282 panic("sofree: not queued");
285 so->so_state &= ~SS_INCOMP;
288 sbrelease(&so->so_snd, so);
294 * Close a socket on last file table reference removal.
295 * Initiate disconnect if connected.
296 * Free socket when disconnect complete.
298 * This function will sorele() the socket. Note that soclose() may be
299 * called prior to the ref count reaching zero. The actual socket
300 * structure will not be freed until the ref count reaches zero.
304 register struct socket *so;
306 int s = splnet(); /* conservative */
309 funsetown(so->so_sigio);
310 if (so->so_options & SO_ACCEPTCONN) {
311 struct socket *sp, *sonext;
313 sp = TAILQ_FIRST(&so->so_incomp);
314 for (; sp != NULL; sp = sonext) {
315 sonext = TAILQ_NEXT(sp, so_list);
318 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
319 sonext = TAILQ_NEXT(sp, so_list);
320 /* Dequeue from so_comp since sofree() won't do it */
321 TAILQ_REMOVE(&so->so_comp, sp, so_list);
323 sp->so_state &= ~SS_COMP;
330 if (so->so_state & SS_ISCONNECTED) {
331 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
332 error = sodisconnect(so);
336 if (so->so_options & SO_LINGER) {
337 if ((so->so_state & SS_ISDISCONNECTING) &&
338 (so->so_state & SS_NBIO))
340 while (so->so_state & SS_ISCONNECTED) {
341 error = tsleep((caddr_t)&so->so_timeo,
342 PSOCK | PCATCH, "soclos", so->so_linger * hz);
350 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
355 if (so->so_state & SS_NOFDREF)
356 panic("soclose: NOFDREF");
357 so->so_state |= SS_NOFDREF;
364 * Must be called at splnet...
372 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
374 sotryfree(so); /* note: does not decrement the ref count */
382 register struct socket *so;
383 struct sockaddr **nam;
388 if ((so->so_state & SS_NOFDREF) == 0)
389 panic("soaccept: !NOFDREF");
390 so->so_state &= ~SS_NOFDREF;
391 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
397 soconnect(so, nam, td)
398 register struct socket *so;
399 struct sockaddr *nam;
405 if (so->so_options & SO_ACCEPTCONN)
409 * If protocol is connection-based, can only connect once.
410 * Otherwise, if connected, try to disconnect first.
411 * This allows user to disconnect by connecting to, e.g.,
414 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
415 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
416 (error = sodisconnect(so))))
419 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
426 register struct socket *so1;
432 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
439 register struct socket *so;
444 if ((so->so_state & SS_ISCONNECTED) == 0) {
448 if (so->so_state & SS_ISDISCONNECTING) {
452 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
458 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
461 * If send must go all at once and message is larger than
462 * send buffering, then hard error.
463 * Lock against other senders.
464 * If must go all at once and not enough room now, then
465 * inform user that this would block and do nothing.
466 * Otherwise, if nonblocking, send as much as possible.
467 * The data to be sent is described by "uio" if nonzero,
468 * otherwise by the mbuf chain "top" (which must be null
469 * if uio is not). Data provided in mbuf chain must be small
470 * enough to send all at once.
472 * Returns nonzero on error, timeout or signal; callers
473 * must check for short counts if EINTR/ERESTART are returned.
474 * Data and control buffers are freed on return.
477 sosend(so, addr, uio, top, control, flags, td)
478 register struct socket *so;
479 struct sockaddr *addr;
482 struct mbuf *control;
487 register struct mbuf *m;
488 register long space, len, resid;
489 int clen = 0, error, s, dontroute, mlen;
490 int atomic = sosendallatonce(so) || top;
493 resid = uio->uio_resid;
495 resid = top->m_pkthdr.len;
497 * In theory resid should be unsigned.
498 * However, space must be signed, as it might be less than 0
499 * if we over-committed, and we must use a signed comparison
500 * of space and resid. On the other hand, a negative resid
501 * causes us to loop sending 0-length segments to the protocol.
503 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
504 * type sockets since that's an error.
506 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
512 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
513 (so->so_proto->pr_flags & PR_ATOMIC);
515 td->td_proc->p_stats->p_ru.ru_msgsnd++;
517 clen = control->m_len;
518 #define snderr(errno) { error = errno; splx(s); goto release; }
521 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
526 if (so->so_state & SS_CANTSENDMORE)
529 error = so->so_error;
534 if ((so->so_state & SS_ISCONNECTED) == 0) {
536 * `sendto' and `sendmsg' is allowed on a connection-
537 * based socket if it supports implied connect.
538 * Return ENOTCONN if not connected and no address is
541 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
542 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
543 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
544 !(resid == 0 && clen != 0))
546 } else if (addr == 0)
547 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
548 ENOTCONN : EDESTADDRREQ);
550 space = sbspace(&so->so_snd);
553 if ((atomic && resid > so->so_snd.sb_hiwat) ||
554 clen > so->so_snd.sb_hiwat)
556 if (space < resid + clen &&
557 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
558 if (so->so_state & SS_NBIO)
560 sbunlock(&so->so_snd);
561 error = sbwait(&so->so_snd);
573 * Data is prepackaged in "top".
577 top->m_flags |= M_EOR;
580 MGETHDR(m, M_TRYWAIT, MT_DATA);
587 m->m_pkthdr.rcvif = (struct ifnet *)0;
589 MGET(m, M_TRYWAIT, MT_DATA);
596 if (resid >= MINCLSIZE) {
597 MCLGET(m, M_TRYWAIT);
598 if ((m->m_flags & M_EXT) == 0)
601 len = min(min(mlen, resid), space);
604 len = min(min(mlen, resid), space);
606 * For datagram protocols, leave room
607 * for protocol headers in first mbuf.
609 if (atomic && top == 0 && len < mlen)
613 error = uiomove(mtod(m, caddr_t), (int)len, uio);
614 resid = uio->uio_resid;
617 top->m_pkthdr.len += len;
623 top->m_flags |= M_EOR;
626 } while (space > 0 && atomic);
628 so->so_options |= SO_DONTROUTE;
629 s = splnet(); /* XXX */
631 * XXX all the SS_CANTSENDMORE checks previously
632 * done could be out of date. We could have recieved
633 * a reset packet in an interrupt or maybe we slept
634 * while doing page faults in uiomove() etc. We could
635 * probably recheck again inside the splnet() protection
636 * here, but there are probably other places that this
637 * also happens. We must rethink this.
639 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
640 (flags & MSG_OOB) ? PRUS_OOB :
642 * If the user set MSG_EOF, the protocol
643 * understands this flag and nothing left to
644 * send then use PRU_SEND_EOF instead of PRU_SEND.
646 ((flags & MSG_EOF) &&
647 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
650 /* If there is more to send set PRUS_MORETOCOME */
651 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
652 top, addr, control, td);
655 so->so_options &= ~SO_DONTROUTE;
662 } while (resid && space > 0);
666 sbunlock(&so->so_snd);
676 * Implement receive operations on a socket.
677 * We depend on the way that records are added to the sockbuf
678 * by sbappend*. In particular, each record (mbufs linked through m_next)
679 * must begin with an address if the protocol so specifies,
680 * followed by an optional mbuf or mbufs containing ancillary data,
681 * and then zero or more mbufs of data.
682 * In order to avoid blocking network interrupts for the entire time here,
683 * we splx() while doing the actual copy to user space.
684 * Although the sockbuf is locked, new data may still be appended,
685 * and thus we must maintain consistency of the sockbuf during that time.
687 * The caller may receive the data as a single mbuf chain by supplying
688 * an mbuf **mp0 for use in returning the chain. The uio is then used
689 * only for the count in uio_resid.
692 soreceive(so, psa, uio, mp0, controlp, flagsp)
693 register struct socket *so;
694 struct sockaddr **psa;
697 struct mbuf **controlp;
700 struct mbuf *m, **mp;
701 register int flags, len, error, s, offset;
702 struct protosw *pr = so->so_proto;
703 struct mbuf *nextrecord;
705 int orig_resid = uio->uio_resid;
713 flags = *flagsp &~ MSG_EOR;
716 if (flags & MSG_OOB) {
717 m = m_get(M_TRYWAIT, MT_DATA);
720 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
724 error = uiomove(mtod(m, caddr_t),
725 (int) min(uio->uio_resid, m->m_len), uio);
727 } while (uio->uio_resid && error == 0 && m);
734 *mp = (struct mbuf *)0;
735 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
736 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
739 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
744 m = so->so_rcv.sb_mb;
746 * If we have less data than requested, block awaiting more
747 * (subject to any timeout) if:
748 * 1. the current count is less than the low water mark, or
749 * 2. MSG_WAITALL is set, and it is possible to do the entire
750 * receive operation at once if we block (resid <= hiwat).
751 * 3. MSG_DONTWAIT is not set
752 * If MSG_WAITALL is set but resid is larger than the receive buffer,
753 * we have to do the receive in sections, and thus risk returning
754 * a short count if a timeout or signal occurs after we start.
756 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
757 so->so_rcv.sb_cc < uio->uio_resid) &&
758 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
759 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
760 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
761 KASSERT(m != 0 || !so->so_rcv.sb_cc,
762 ("receive: m == %p so->so_rcv.sb_cc == %lu",
763 m, so->so_rcv.sb_cc));
767 error = so->so_error;
768 if ((flags & MSG_PEEK) == 0)
772 if (so->so_state & SS_CANTRCVMORE) {
778 for (; m; m = m->m_next)
779 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
780 m = so->so_rcv.sb_mb;
783 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
784 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
788 if (uio->uio_resid == 0)
790 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
794 sbunlock(&so->so_rcv);
795 error = sbwait(&so->so_rcv);
803 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
804 nextrecord = m->m_nextpkt;
805 if (pr->pr_flags & PR_ADDR) {
806 KASSERT(m->m_type == MT_SONAME,
807 ("m->m_type == %d", m->m_type));
810 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
812 if (flags & MSG_PEEK) {
815 sbfree(&so->so_rcv, m);
816 so->so_rcv.sb_mb = m_free(m);
817 m = so->so_rcv.sb_mb;
820 while (m && m->m_type == MT_CONTROL && error == 0) {
821 if (flags & MSG_PEEK) {
823 *controlp = m_copy(m, 0, m->m_len);
826 sbfree(&so->so_rcv, m);
827 so->so_rcv.sb_mb = m->m_next;
829 if (pr->pr_domain->dom_externalize)
831 (*pr->pr_domain->dom_externalize)(m, controlp);
836 m = so->so_rcv.sb_mb;
841 controlp = &(*controlp)->m_next;
842 while (*controlp != NULL);
846 if ((flags & MSG_PEEK) == 0)
847 m->m_nextpkt = nextrecord;
849 if (type == MT_OOBDATA)
854 while (m && uio->uio_resid > 0 && error == 0) {
855 if (m->m_type == MT_OOBDATA) {
856 if (type != MT_OOBDATA)
858 } else if (type == MT_OOBDATA)
861 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
862 ("m->m_type == %d", m->m_type));
863 so->so_state &= ~SS_RCVATMARK;
864 len = uio->uio_resid;
865 if (so->so_oobmark && len > so->so_oobmark - offset)
866 len = so->so_oobmark - offset;
867 if (len > m->m_len - moff)
868 len = m->m_len - moff;
870 * If mp is set, just pass back the mbufs.
871 * Otherwise copy them out via the uio, then free.
872 * Sockbuf must be consistent here (points to current mbuf,
873 * it points to next record) when we drop priority;
874 * we must note any additions to the sockbuf when we
875 * block interrupts again.
879 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
884 uio->uio_resid -= len;
885 if (len == m->m_len - moff) {
886 if (m->m_flags & M_EOR)
888 if (flags & MSG_PEEK) {
892 nextrecord = m->m_nextpkt;
893 sbfree(&so->so_rcv, m);
897 so->so_rcv.sb_mb = m = m->m_next;
898 *mp = (struct mbuf *)0;
900 so->so_rcv.sb_mb = m_free(m);
901 m = so->so_rcv.sb_mb;
904 m->m_nextpkt = nextrecord;
907 if (flags & MSG_PEEK)
911 *mp = m_copym(m, 0, len, M_TRYWAIT);
914 so->so_rcv.sb_cc -= len;
917 if (so->so_oobmark) {
918 if ((flags & MSG_PEEK) == 0) {
919 so->so_oobmark -= len;
920 if (so->so_oobmark == 0) {
921 so->so_state |= SS_RCVATMARK;
926 if (offset == so->so_oobmark)
933 * If the MSG_WAITALL flag is set (for non-atomic socket),
934 * we must not quit until "uio->uio_resid == 0" or an error
935 * termination. If a signal/timeout occurs, return
936 * with a short count but without error.
937 * Keep sockbuf locked against other readers.
939 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
940 !sosendallatonce(so) && !nextrecord) {
941 if (so->so_error || so->so_state & SS_CANTRCVMORE)
944 * Notify the protocol that some data has been
945 * drained before blocking.
947 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
948 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
949 error = sbwait(&so->so_rcv);
951 sbunlock(&so->so_rcv);
955 m = so->so_rcv.sb_mb;
957 nextrecord = m->m_nextpkt;
961 if (m && pr->pr_flags & PR_ATOMIC) {
963 if ((flags & MSG_PEEK) == 0)
964 (void) sbdroprecord(&so->so_rcv);
966 if ((flags & MSG_PEEK) == 0) {
968 so->so_rcv.sb_mb = nextrecord;
969 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
970 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
972 if (orig_resid == uio->uio_resid && orig_resid &&
973 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
974 sbunlock(&so->so_rcv);
982 sbunlock(&so->so_rcv);
989 register struct socket *so;
992 register struct protosw *pr = so->so_proto;
994 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1000 return ((*pr->pr_usrreqs->pru_shutdown)(so));
1006 register struct socket *so;
1008 register struct sockbuf *sb = &so->so_rcv;
1009 register struct protosw *pr = so->so_proto;
1013 sb->sb_flags |= SB_NOINTR;
1014 (void) sblock(sb, M_WAITOK);
1019 bzero((caddr_t)sb, sizeof (*sb));
1021 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1022 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1023 sbrelease(&asb, so);
1028 do_setopt_accept_filter(so, sopt)
1030 struct sockopt *sopt;
1032 struct accept_filter_arg *afap = NULL;
1033 struct accept_filter *afp;
1034 struct so_accf *af = so->so_accf;
1037 /* do not set/remove accept filters on non listen sockets */
1038 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1043 /* removing the filter */
1046 if (af->so_accept_filter != NULL &&
1047 af->so_accept_filter->accf_destroy != NULL) {
1048 af->so_accept_filter->accf_destroy(so);
1050 if (af->so_accept_filter_str != NULL) {
1051 FREE(af->so_accept_filter_str, M_ACCF);
1056 so->so_options &= ~SO_ACCEPTFILTER;
1059 /* adding a filter */
1060 /* must remove previous filter first */
1065 /* don't put large objects on the kernel stack */
1066 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1067 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1068 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1069 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1072 afp = accept_filt_get(afap->af_name);
1077 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO);
1078 if (afp->accf_create != NULL) {
1079 if (afap->af_name[0] != '\0') {
1080 int len = strlen(afap->af_name) + 1;
1082 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1083 strcpy(af->so_accept_filter_str, afap->af_name);
1085 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1086 if (af->so_accept_filter_arg == NULL) {
1087 FREE(af->so_accept_filter_str, M_ACCF);
1094 af->so_accept_filter = afp;
1096 so->so_options |= SO_ACCEPTFILTER;
1105 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1106 * an additional variant to handle the case where the option value needs
1107 * to be some kind of integer, but not a specific size.
1108 * In addition to their use here, these functions are also called by the
1109 * protocol-level pr_ctloutput() routines.
1112 sooptcopyin(sopt, buf, len, minlen)
1113 struct sockopt *sopt;
1121 * If the user gives us more than we wanted, we ignore it,
1122 * but if we don't get the minimum length the caller
1123 * wants, we return EINVAL. On success, sopt->sopt_valsize
1124 * is set to however much we actually retrieved.
1126 if ((valsize = sopt->sopt_valsize) < minlen)
1129 sopt->sopt_valsize = valsize = len;
1131 if (sopt->sopt_td != 0)
1132 return (copyin(sopt->sopt_val, buf, valsize));
1134 bcopy(sopt->sopt_val, buf, valsize);
1141 struct sockopt *sopt;
1149 if (sopt->sopt_level != SOL_SOCKET) {
1150 if (so->so_proto && so->so_proto->pr_ctloutput)
1151 return ((*so->so_proto->pr_ctloutput)
1153 error = ENOPROTOOPT;
1155 switch (sopt->sopt_name) {
1157 case SO_ACCEPTFILTER:
1158 error = do_setopt_accept_filter(so, sopt);
1164 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1168 so->so_linger = l.l_linger;
1170 so->so_options |= SO_LINGER;
1172 so->so_options &= ~SO_LINGER;
1178 case SO_USELOOPBACK:
1184 error = sooptcopyin(sopt, &optval, sizeof optval,
1189 so->so_options |= sopt->sopt_name;
1191 so->so_options &= ~sopt->sopt_name;
1198 error = sooptcopyin(sopt, &optval, sizeof optval,
1204 * Values < 1 make no sense for any of these
1205 * options, so disallow them.
1212 switch (sopt->sopt_name) {
1215 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1216 &so->so_snd : &so->so_rcv, (u_long)optval,
1217 so, curthread) == 0) {
1224 * Make sure the low-water is never greater than
1228 so->so_snd.sb_lowat =
1229 (optval > so->so_snd.sb_hiwat) ?
1230 so->so_snd.sb_hiwat : optval;
1233 so->so_rcv.sb_lowat =
1234 (optval > so->so_rcv.sb_hiwat) ?
1235 so->so_rcv.sb_hiwat : optval;
1242 error = sooptcopyin(sopt, &tv, sizeof tv,
1247 /* assert(hz > 0); */
1248 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1249 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1253 /* assert(tick > 0); */
1254 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1255 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1256 if (val > SHRT_MAX) {
1261 switch (sopt->sopt_name) {
1263 so->so_snd.sb_timeo = val;
1266 so->so_rcv.sb_timeo = val;
1271 error = ENOPROTOOPT;
1274 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1275 (void) ((*so->so_proto->pr_ctloutput)
1283 /* Helper routine for getsockopt */
1285 sooptcopyout(sopt, buf, len)
1286 struct sockopt *sopt;
1296 * Documented get behavior is that we always return a value,
1297 * possibly truncated to fit in the user's buffer.
1298 * Traditional behavior is that we always tell the user
1299 * precisely how much we copied, rather than something useful
1300 * like the total amount we had available for her.
1301 * Note that this interface is not idempotent; the entire answer must
1302 * generated ahead of time.
1304 valsize = min(len, sopt->sopt_valsize);
1305 sopt->sopt_valsize = valsize;
1306 if (sopt->sopt_val != 0) {
1307 if (sopt->sopt_td != 0)
1308 error = copyout(buf, sopt->sopt_val, valsize);
1310 bcopy(buf, sopt->sopt_val, valsize);
1318 struct sockopt *sopt;
1324 struct accept_filter_arg *afap;
1328 if (sopt->sopt_level != SOL_SOCKET) {
1329 if (so->so_proto && so->so_proto->pr_ctloutput) {
1330 return ((*so->so_proto->pr_ctloutput)
1333 return (ENOPROTOOPT);
1335 switch (sopt->sopt_name) {
1337 case SO_ACCEPTFILTER:
1338 if ((so->so_options & SO_ACCEPTCONN) == 0)
1340 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1341 M_TEMP, M_WAITOK | M_ZERO);
1342 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1343 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1344 if (so->so_accf->so_accept_filter_str != NULL)
1345 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1347 error = sooptcopyout(sopt, afap, sizeof(*afap));
1353 l.l_onoff = so->so_options & SO_LINGER;
1354 l.l_linger = so->so_linger;
1355 error = sooptcopyout(sopt, &l, sizeof l);
1358 case SO_USELOOPBACK:
1367 optval = so->so_options & sopt->sopt_name;
1369 error = sooptcopyout(sopt, &optval, sizeof optval);
1373 optval = so->so_type;
1377 optval = so->so_error;
1382 optval = so->so_snd.sb_hiwat;
1386 optval = so->so_rcv.sb_hiwat;
1390 optval = so->so_snd.sb_lowat;
1394 optval = so->so_rcv.sb_lowat;
1399 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1400 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1402 tv.tv_sec = optval / hz;
1403 tv.tv_usec = (optval % hz) * tick;
1404 error = sooptcopyout(sopt, &tv, sizeof tv);
1408 error = ENOPROTOOPT;
1415 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1417 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1419 struct mbuf *m, *m_prev;
1420 int sopt_size = sopt->sopt_valsize;
1422 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
1425 if (sopt_size > MLEN) {
1426 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
1427 if ((m->m_flags & M_EXT) == 0) {
1431 m->m_len = min(MCLBYTES, sopt_size);
1433 m->m_len = min(MLEN, sopt_size);
1435 sopt_size -= m->m_len;
1440 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
1445 if (sopt_size > MLEN) {
1446 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
1447 if ((m->m_flags & M_EXT) == 0) {
1451 m->m_len = min(MCLBYTES, sopt_size);
1453 m->m_len = min(MLEN, sopt_size);
1455 sopt_size -= m->m_len;
1462 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1464 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1466 struct mbuf *m0 = m;
1468 if (sopt->sopt_val == NULL)
1470 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1471 if (sopt->sopt_td != NULL) {
1474 error = copyin(sopt->sopt_val, mtod(m, char *),
1481 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1482 sopt->sopt_valsize -= m->m_len;
1483 (caddr_t)sopt->sopt_val += m->m_len;
1486 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1487 panic("ip6_sooptmcopyin");
1491 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1493 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1495 struct mbuf *m0 = m;
1498 if (sopt->sopt_val == NULL)
1500 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1501 if (sopt->sopt_td != NULL) {
1504 error = copyout(mtod(m, char *), sopt->sopt_val,
1511 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1512 sopt->sopt_valsize -= m->m_len;
1513 (caddr_t)sopt->sopt_val += m->m_len;
1514 valsize += m->m_len;
1518 /* enough soopt buffer should be given from user-land */
1522 sopt->sopt_valsize = valsize;
1528 register struct socket *so;
1530 if (so->so_sigio != NULL)
1531 pgsigio(so->so_sigio, SIGURG, 0);
1532 selwakeup(&so->so_rcv.sb_sel);
1536 sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
1541 if (events & (POLLIN | POLLRDNORM))
1543 revents |= events & (POLLIN | POLLRDNORM);
1545 if (events & POLLINIGNEOF)
1546 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
1547 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
1548 revents |= POLLINIGNEOF;
1550 if (events & (POLLOUT | POLLWRNORM))
1551 if (sowriteable(so))
1552 revents |= events & (POLLOUT | POLLWRNORM);
1554 if (events & (POLLPRI | POLLRDBAND))
1555 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1556 revents |= events & (POLLPRI | POLLRDBAND);
1560 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
1562 selrecord(td, &so->so_rcv.sb_sel);
1563 so->so_rcv.sb_flags |= SB_SEL;
1566 if (events & (POLLOUT | POLLWRNORM)) {
1567 selrecord(td, &so->so_snd.sb_sel);
1568 so->so_snd.sb_flags |= SB_SEL;
1577 sokqfilter(struct file *fp, struct knote *kn)
1579 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1583 switch (kn->kn_filter) {
1585 if (so->so_options & SO_ACCEPTCONN)
1586 kn->kn_fop = &solisten_filtops;
1588 kn->kn_fop = &soread_filtops;
1592 kn->kn_fop = &sowrite_filtops;
1600 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1601 sb->sb_flags |= SB_KNOTE;
1607 filt_sordetach(struct knote *kn)
1609 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1612 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1613 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1614 so->so_rcv.sb_flags &= ~SB_KNOTE;
1620 filt_soread(struct knote *kn, long hint)
1622 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1624 kn->kn_data = so->so_rcv.sb_cc;
1625 if (so->so_state & SS_CANTRCVMORE) {
1626 kn->kn_flags |= EV_EOF;
1627 kn->kn_fflags = so->so_error;
1630 if (so->so_error) /* temporary udp error */
1632 if (kn->kn_sfflags & NOTE_LOWAT)
1633 return (kn->kn_data >= kn->kn_sdata);
1634 return (kn->kn_data >= so->so_rcv.sb_lowat);
1638 filt_sowdetach(struct knote *kn)
1640 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1643 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1644 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1645 so->so_snd.sb_flags &= ~SB_KNOTE;
1651 filt_sowrite(struct knote *kn, long hint)
1653 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1655 kn->kn_data = sbspace(&so->so_snd);
1656 if (so->so_state & SS_CANTSENDMORE) {
1657 kn->kn_flags |= EV_EOF;
1658 kn->kn_fflags = so->so_error;
1661 if (so->so_error) /* temporary udp error */
1663 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1664 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1666 if (kn->kn_sfflags & NOTE_LOWAT)
1667 return (kn->kn_data >= kn->kn_sdata);
1668 return (kn->kn_data >= so->so_snd.sb_lowat);
1673 filt_solisten(struct knote *kn, long hint)
1675 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1677 kn->kn_data = so->so_qlen - so->so_incqlen;
1678 return (! TAILQ_EMPTY(&so->so_comp));
1682 socheckuid(struct socket *so, uid_t uid)
1687 if (so->so_cred->cr_uid == uid)