2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/fcntl.h>
40 #include <sys/malloc.h>
42 #include <sys/domain.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
52 #include <sys/sysctl.h>
54 #include <vm/vm_zone.h>
56 #include <machine/limits.h>
58 struct vm_zone *socket_zone;
59 so_gen_t so_gencnt; /* generation count for sockets */
61 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
62 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
64 SYSCTL_DECL(_kern_ipc);
66 static int somaxconn = SOMAXCONN;
67 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
68 &somaxconn, 0, "Maximum pending socket connection queue size");
71 * Socket operation routines.
72 * These routines are called by the routines in
73 * sys_socket.c or from a system process, and
74 * implement the semantics of socket operations by
75 * switching out to the protocol specific routines.
79 * Get a socket structure from our zone, and initialize it.
80 * We don't implement `waitok' yet (see comments in uipc_domain.c).
81 * Note that it would probably be better to allocate socket
82 * and PCB at the same time, but I'm not convinced that all
83 * the protocols can be easily modified to do this.
91 so = zalloci(socket_zone);
93 /* XXX race condition for reentrant kernel */
94 bzero(so, sizeof *so);
95 so->so_gencnt = ++so_gencnt;
96 so->so_zone = socket_zone;
102 socreate(dom, aso, type, proto, p)
109 register struct protosw *prp;
110 register struct socket *so;
114 prp = pffindproto(dom, proto, type);
116 prp = pffindtype(dom, type);
117 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
118 return (EPROTONOSUPPORT);
119 if (prp->pr_type != type)
121 so = soalloc(p != 0);
125 TAILQ_INIT(&so->so_incomp);
126 TAILQ_INIT(&so->so_comp);
128 so->so_cred = p->p_ucred;
131 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
133 so->so_state |= SS_NOFDREF;
144 struct sockaddr *nam;
150 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
160 so->so_gencnt = ++so_gencnt;
161 if (so->so_rcv.sb_hiwat)
162 (void)chgsbsize(so->so_cred->cr_uid,
163 -(rlim_t)so->so_rcv.sb_hiwat);
164 if (so->so_snd.sb_hiwat)
165 (void)chgsbsize(so->so_cred->cr_uid,
166 -(rlim_t)so->so_snd.sb_hiwat);
168 zfreei(so->so_zone, so);
172 solisten(so, backlog, p)
173 register struct socket *so;
180 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
185 if (so->so_comp.tqh_first == NULL)
186 so->so_options |= SO_ACCEPTCONN;
187 if (backlog < 0 || backlog > somaxconn)
189 so->so_qlimit = backlog;
196 register struct socket *so;
198 struct socket *head = so->so_head;
200 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
203 if (so->so_state & SS_INCOMP) {
204 TAILQ_REMOVE(&head->so_incomp, so, so_list);
206 } else if (so->so_state & SS_COMP) {
208 * We must not decommission a socket that's
209 * on the accept(2) queue. If we do, then
210 * accept(2) may hang after select(2) indicated
211 * that the listening socket was ready.
215 panic("sofree: not queued");
218 so->so_state &= ~SS_INCOMP;
221 sbrelease(&so->so_snd, so);
227 * Close a socket on last file table reference removal.
228 * Initiate disconnect if connected.
229 * Free socket when disconnect complete.
233 register struct socket *so;
235 int s = splnet(); /* conservative */
238 funsetown(so->so_sigio);
239 if (so->so_options & SO_ACCEPTCONN) {
240 struct socket *sp, *sonext;
242 for (sp = so->so_incomp.tqh_first; sp != NULL; sp = sonext) {
243 sonext = sp->so_list.tqe_next;
246 for (sp = so->so_comp.tqh_first; sp != NULL; sp = sonext) {
247 sonext = sp->so_list.tqe_next;
248 /* Dequeue from so_comp since sofree() won't do it */
249 TAILQ_REMOVE(&so->so_comp, sp, so_list);
251 sp->so_state &= ~SS_COMP;
258 if (so->so_state & SS_ISCONNECTED) {
259 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
260 error = sodisconnect(so);
264 if (so->so_options & SO_LINGER) {
265 if ((so->so_state & SS_ISDISCONNECTING) &&
266 (so->so_state & SS_NBIO))
268 while (so->so_state & SS_ISCONNECTED) {
269 error = tsleep((caddr_t)&so->so_timeo,
270 PSOCK | PCATCH, "soclos", so->so_linger * hz);
278 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
283 if (so->so_state & SS_NOFDREF)
284 panic("soclose: NOFDREF");
285 so->so_state |= SS_NOFDREF;
292 * Must be called at splnet...
299 return (*so->so_proto->pr_usrreqs->pru_abort)(so);
304 register struct socket *so;
305 struct sockaddr **nam;
310 if ((so->so_state & SS_NOFDREF) == 0)
311 panic("soaccept: !NOFDREF");
312 so->so_state &= ~SS_NOFDREF;
313 if ((so->so_state & SS_ISDISCONNECTED) == 0)
314 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
325 soconnect(so, nam, p)
326 register struct socket *so;
327 struct sockaddr *nam;
333 if (so->so_options & SO_ACCEPTCONN)
337 * If protocol is connection-based, can only connect once.
338 * Otherwise, if connected, try to disconnect first.
339 * This allows user to disconnect by connecting to, e.g.,
342 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
343 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
344 (error = sodisconnect(so))))
347 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
354 register struct socket *so1;
360 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
367 register struct socket *so;
372 if ((so->so_state & SS_ISCONNECTED) == 0) {
376 if (so->so_state & SS_ISDISCONNECTING) {
380 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
386 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
389 * If send must go all at once and message is larger than
390 * send buffering, then hard error.
391 * Lock against other senders.
392 * If must go all at once and not enough room now, then
393 * inform user that this would block and do nothing.
394 * Otherwise, if nonblocking, send as much as possible.
395 * The data to be sent is described by "uio" if nonzero,
396 * otherwise by the mbuf chain "top" (which must be null
397 * if uio is not). Data provided in mbuf chain must be small
398 * enough to send all at once.
400 * Returns nonzero on error, timeout or signal; callers
401 * must check for short counts if EINTR/ERESTART are returned.
402 * Data and control buffers are freed on return.
405 sosend(so, addr, uio, top, control, flags, p)
406 register struct socket *so;
407 struct sockaddr *addr;
410 struct mbuf *control;
415 register struct mbuf *m;
416 register long space, len, resid;
417 int clen = 0, error, s, dontroute, mlen;
418 int atomic = sosendallatonce(so) || top;
421 resid = uio->uio_resid;
423 resid = top->m_pkthdr.len;
425 * In theory resid should be unsigned.
426 * However, space must be signed, as it might be less than 0
427 * if we over-committed, and we must use a signed comparison
428 * of space and resid. On the other hand, a negative resid
429 * causes us to loop sending 0-length segments to the protocol.
431 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
432 * type sockets since that's an error.
434 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
440 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
441 (so->so_proto->pr_flags & PR_ATOMIC);
443 p->p_stats->p_ru.ru_msgsnd++;
445 clen = control->m_len;
446 #define snderr(errno) { error = errno; splx(s); goto release; }
449 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
454 if (so->so_state & SS_CANTSENDMORE)
457 error = so->so_error;
462 if ((so->so_state & SS_ISCONNECTED) == 0) {
464 * `sendto' and `sendmsg' is allowed on a connection-
465 * based socket if it supports implied connect.
466 * Return ENOTCONN if not connected and no address is
469 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
470 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
471 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
472 !(resid == 0 && clen != 0))
474 } else if (addr == 0)
475 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
476 ENOTCONN : EDESTADDRREQ);
478 space = sbspace(&so->so_snd);
481 if ((atomic && resid > so->so_snd.sb_hiwat) ||
482 clen > so->so_snd.sb_hiwat)
484 if (space < resid + clen && uio &&
485 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
486 if (so->so_state & SS_NBIO)
488 sbunlock(&so->so_snd);
489 error = sbwait(&so->so_snd);
501 * Data is prepackaged in "top".
505 top->m_flags |= M_EOR;
508 MGETHDR(m, M_WAIT, MT_DATA);
511 m->m_pkthdr.rcvif = (struct ifnet *)0;
513 MGET(m, M_WAIT, MT_DATA);
516 if (resid >= MINCLSIZE) {
518 if ((m->m_flags & M_EXT) == 0)
521 len = min(min(mlen, resid), space);
524 len = min(min(mlen, resid), space);
526 * For datagram protocols, leave room
527 * for protocol headers in first mbuf.
529 if (atomic && top == 0 && len < mlen)
533 error = uiomove(mtod(m, caddr_t), (int)len, uio);
534 resid = uio->uio_resid;
537 top->m_pkthdr.len += len;
543 top->m_flags |= M_EOR;
546 } while (space > 0 && atomic);
548 so->so_options |= SO_DONTROUTE;
549 s = splnet(); /* XXX */
551 * XXX all the SS_CANTSENDMORE checks previously
552 * done could be out of date. We could have recieved
553 * a reset packet in an interrupt or maybe we slept
554 * while doing page faults in uiomove() etc. We could
555 * probably recheck again inside the splnet() protection
556 * here, but there are probably other places that this
557 * also happens. We must rethink this.
559 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
560 (flags & MSG_OOB) ? PRUS_OOB :
562 * If the user set MSG_EOF, the protocol
563 * understands this flag and nothing left to
564 * send then use PRU_SEND_EOF instead of PRU_SEND.
566 ((flags & MSG_EOF) &&
567 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
570 /* If there is more to send set PRUS_MORETOCOME */
571 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
572 top, addr, control, p);
575 so->so_options &= ~SO_DONTROUTE;
582 } while (resid && space > 0);
586 sbunlock(&so->so_snd);
596 * Implement receive operations on a socket.
597 * We depend on the way that records are added to the sockbuf
598 * by sbappend*. In particular, each record (mbufs linked through m_next)
599 * must begin with an address if the protocol so specifies,
600 * followed by an optional mbuf or mbufs containing ancillary data,
601 * and then zero or more mbufs of data.
602 * In order to avoid blocking network interrupts for the entire time here,
603 * we splx() while doing the actual copy to user space.
604 * Although the sockbuf is locked, new data may still be appended,
605 * and thus we must maintain consistency of the sockbuf during that time.
607 * The caller may receive the data as a single mbuf chain by supplying
608 * an mbuf **mp0 for use in returning the chain. The uio is then used
609 * only for the count in uio_resid.
612 soreceive(so, psa, uio, mp0, controlp, flagsp)
613 register struct socket *so;
614 struct sockaddr **psa;
617 struct mbuf **controlp;
620 register struct mbuf *m, **mp;
621 register int flags, len, error, s, offset;
622 struct protosw *pr = so->so_proto;
623 struct mbuf *nextrecord;
625 int orig_resid = uio->uio_resid;
633 flags = *flagsp &~ MSG_EOR;
636 if (flags & MSG_OOB) {
637 m = m_get(M_WAIT, MT_DATA);
638 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
642 error = uiomove(mtod(m, caddr_t),
643 (int) min(uio->uio_resid, m->m_len), uio);
645 } while (uio->uio_resid && error == 0 && m);
652 *mp = (struct mbuf *)0;
653 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
654 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
657 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
662 m = so->so_rcv.sb_mb;
664 * If we have less data than requested, block awaiting more
665 * (subject to any timeout) if:
666 * 1. the current count is less than the low water mark, or
667 * 2. MSG_WAITALL is set, and it is possible to do the entire
668 * receive operation at once if we block (resid <= hiwat).
669 * 3. MSG_DONTWAIT is not set
670 * If MSG_WAITALL is set but resid is larger than the receive buffer,
671 * we have to do the receive in sections, and thus risk returning
672 * a short count if a timeout or signal occurs after we start.
674 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
675 so->so_rcv.sb_cc < uio->uio_resid) &&
676 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
677 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
678 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
679 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
683 error = so->so_error;
684 if ((flags & MSG_PEEK) == 0)
688 if (so->so_state & SS_CANTRCVMORE) {
694 for (; m; m = m->m_next)
695 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
696 m = so->so_rcv.sb_mb;
699 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
700 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
704 if (uio->uio_resid == 0)
706 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
710 sbunlock(&so->so_rcv);
711 error = sbwait(&so->so_rcv);
719 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
720 nextrecord = m->m_nextpkt;
721 if (pr->pr_flags & PR_ADDR) {
722 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
725 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
727 if (flags & MSG_PEEK) {
730 sbfree(&so->so_rcv, m);
731 MFREE(m, so->so_rcv.sb_mb);
732 m = so->so_rcv.sb_mb;
735 while (m && m->m_type == MT_CONTROL && error == 0) {
736 if (flags & MSG_PEEK) {
738 *controlp = m_copy(m, 0, m->m_len);
741 sbfree(&so->so_rcv, m);
743 if (pr->pr_domain->dom_externalize &&
744 mtod(m, struct cmsghdr *)->cmsg_type ==
746 error = (*pr->pr_domain->dom_externalize)(m);
748 so->so_rcv.sb_mb = m->m_next;
750 m = so->so_rcv.sb_mb;
752 MFREE(m, so->so_rcv.sb_mb);
753 m = so->so_rcv.sb_mb;
758 controlp = &(*controlp)->m_next;
762 if ((flags & MSG_PEEK) == 0)
763 m->m_nextpkt = nextrecord;
765 if (type == MT_OOBDATA)
770 while (m && uio->uio_resid > 0 && error == 0) {
771 if (m->m_type == MT_OOBDATA) {
772 if (type != MT_OOBDATA)
774 } else if (type == MT_OOBDATA)
777 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
779 so->so_state &= ~SS_RCVATMARK;
780 len = uio->uio_resid;
781 if (so->so_oobmark && len > so->so_oobmark - offset)
782 len = so->so_oobmark - offset;
783 if (len > m->m_len - moff)
784 len = m->m_len - moff;
786 * If mp is set, just pass back the mbufs.
787 * Otherwise copy them out via the uio, then free.
788 * Sockbuf must be consistent here (points to current mbuf,
789 * it points to next record) when we drop priority;
790 * we must note any additions to the sockbuf when we
791 * block interrupts again.
795 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
800 uio->uio_resid -= len;
801 if (len == m->m_len - moff) {
802 if (m->m_flags & M_EOR)
804 if (flags & MSG_PEEK) {
808 nextrecord = m->m_nextpkt;
809 sbfree(&so->so_rcv, m);
813 so->so_rcv.sb_mb = m = m->m_next;
814 *mp = (struct mbuf *)0;
816 MFREE(m, so->so_rcv.sb_mb);
817 m = so->so_rcv.sb_mb;
820 m->m_nextpkt = nextrecord;
823 if (flags & MSG_PEEK)
827 *mp = m_copym(m, 0, len, M_WAIT);
830 so->so_rcv.sb_cc -= len;
833 if (so->so_oobmark) {
834 if ((flags & MSG_PEEK) == 0) {
835 so->so_oobmark -= len;
836 if (so->so_oobmark == 0) {
837 so->so_state |= SS_RCVATMARK;
842 if (offset == so->so_oobmark)
849 * If the MSG_WAITALL flag is set (for non-atomic socket),
850 * we must not quit until "uio->uio_resid == 0" or an error
851 * termination. If a signal/timeout occurs, return
852 * with a short count but without error.
853 * Keep sockbuf locked against other readers.
855 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
856 !sosendallatonce(so) && !nextrecord) {
857 if (so->so_error || so->so_state & SS_CANTRCVMORE)
859 error = sbwait(&so->so_rcv);
861 sbunlock(&so->so_rcv);
865 m = so->so_rcv.sb_mb;
867 nextrecord = m->m_nextpkt;
871 if (m && pr->pr_flags & PR_ATOMIC) {
873 if ((flags & MSG_PEEK) == 0)
874 (void) sbdroprecord(&so->so_rcv);
876 if ((flags & MSG_PEEK) == 0) {
878 so->so_rcv.sb_mb = nextrecord;
879 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
880 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
882 if (orig_resid == uio->uio_resid && orig_resid &&
883 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
884 sbunlock(&so->so_rcv);
892 sbunlock(&so->so_rcv);
899 register struct socket *so;
902 register struct protosw *pr = so->so_proto;
908 return ((*pr->pr_usrreqs->pru_shutdown)(so));
914 register struct socket *so;
916 register struct sockbuf *sb = &so->so_rcv;
917 register struct protosw *pr = so->so_proto;
921 sb->sb_flags |= SB_NOINTR;
922 (void) sblock(sb, M_WAITOK);
927 bzero((caddr_t)sb, sizeof (*sb));
929 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
930 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
935 * Perhaps this routine, and sooptcopyout(), below, ought to come in
936 * an additional variant to handle the case where the option value needs
937 * to be some kind of integer, but not a specific size.
938 * In addition to their use here, these functions are also called by the
939 * protocol-level pr_ctloutput() routines.
942 sooptcopyin(sopt, buf, len, minlen)
943 struct sockopt *sopt;
951 * If the user gives us more than we wanted, we ignore it,
952 * but if we don't get the minimum length the caller
953 * wants, we return EINVAL. On success, sopt->sopt_valsize
954 * is set to however much we actually retrieved.
956 if ((valsize = sopt->sopt_valsize) < minlen)
959 sopt->sopt_valsize = valsize = len;
961 if (sopt->sopt_p != 0)
962 return (copyin(sopt->sopt_val, buf, valsize));
964 bcopy(sopt->sopt_val, buf, valsize);
971 struct sockopt *sopt;
979 if (sopt->sopt_level != SOL_SOCKET) {
980 if (so->so_proto && so->so_proto->pr_ctloutput)
981 return ((*so->so_proto->pr_ctloutput)
985 switch (sopt->sopt_name) {
987 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
991 so->so_linger = l.l_linger;
993 so->so_options |= SO_LINGER;
995 so->so_options &= ~SO_LINGER;
1001 case SO_USELOOPBACK:
1007 error = sooptcopyin(sopt, &optval, sizeof optval,
1012 so->so_options |= sopt->sopt_name;
1014 so->so_options &= ~sopt->sopt_name;
1021 error = sooptcopyin(sopt, &optval, sizeof optval,
1027 * Values < 1 make no sense for any of these
1028 * options, so disallow them.
1035 switch (sopt->sopt_name) {
1038 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1039 &so->so_snd : &so->so_rcv, (u_long)optval,
1040 so, curproc) == 0) {
1047 * Make sure the low-water is never greater than
1051 so->so_snd.sb_lowat =
1052 (optval > so->so_snd.sb_hiwat) ?
1053 so->so_snd.sb_hiwat : optval;
1056 so->so_rcv.sb_lowat =
1057 (optval > so->so_rcv.sb_hiwat) ?
1058 so->so_rcv.sb_hiwat : optval;
1065 error = sooptcopyin(sopt, &tv, sizeof tv,
1070 /* assert(hz > 0); */
1071 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1072 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1076 /* assert(tick > 0); */
1077 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1078 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1079 if (val > SHRT_MAX) {
1084 switch (sopt->sopt_name) {
1086 so->so_snd.sb_timeo = val;
1089 so->so_rcv.sb_timeo = val;
1095 error = ENOPROTOOPT;
1098 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1099 (void) ((*so->so_proto->pr_ctloutput)
1107 /* Helper routine for getsockopt */
1109 sooptcopyout(sopt, buf, len)
1110 struct sockopt *sopt;
1120 * Documented get behavior is that we always return a value,
1121 * possibly truncated to fit in the user's buffer.
1122 * Traditional behavior is that we always tell the user
1123 * precisely how much we copied, rather than something useful
1124 * like the total amount we had available for her.
1125 * Note that this interface is not idempotent; the entire answer must
1126 * generated ahead of time.
1128 valsize = min(len, sopt->sopt_valsize);
1129 sopt->sopt_valsize = valsize;
1130 if (sopt->sopt_val != 0) {
1131 if (sopt->sopt_p != 0)
1132 error = copyout(buf, sopt->sopt_val, valsize);
1134 bcopy(buf, sopt->sopt_val, valsize);
1142 struct sockopt *sopt;
1149 if (sopt->sopt_level != SOL_SOCKET) {
1150 if (so->so_proto && so->so_proto->pr_ctloutput) {
1151 return ((*so->so_proto->pr_ctloutput)
1154 return (ENOPROTOOPT);
1156 switch (sopt->sopt_name) {
1158 l.l_onoff = so->so_options & SO_LINGER;
1159 l.l_linger = so->so_linger;
1160 error = sooptcopyout(sopt, &l, sizeof l);
1163 case SO_USELOOPBACK:
1172 optval = so->so_options & sopt->sopt_name;
1174 error = sooptcopyout(sopt, &optval, sizeof optval);
1178 optval = so->so_type;
1182 optval = so->so_error;
1187 optval = so->so_snd.sb_hiwat;
1191 optval = so->so_rcv.sb_hiwat;
1195 optval = so->so_snd.sb_lowat;
1199 optval = so->so_rcv.sb_lowat;
1204 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1205 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1207 tv.tv_sec = optval / hz;
1208 tv.tv_usec = (optval % hz) * tick;
1209 error = sooptcopyout(sopt, &tv, sizeof tv);
1213 error = ENOPROTOOPT;
1222 register struct socket *so;
1224 if (so->so_sigio != NULL)
1225 pgsigio(so->so_sigio, SIGURG, 0);
1226 selwakeup(&so->so_rcv.sb_sel);
1230 sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
1235 if (events & (POLLIN | POLLRDNORM))
1237 revents |= events & (POLLIN | POLLRDNORM);
1239 if (events & (POLLOUT | POLLWRNORM))
1240 if (sowriteable(so))
1241 revents |= events & (POLLOUT | POLLWRNORM);
1243 if (events & (POLLPRI | POLLRDBAND))
1244 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1245 revents |= events & (POLLPRI | POLLRDBAND);
1248 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1249 selrecord(p, &so->so_rcv.sb_sel);
1250 so->so_rcv.sb_flags |= SB_SEL;
1253 if (events & (POLLOUT | POLLWRNORM)) {
1254 selrecord(p, &so->so_snd.sb_sel);
1255 so->so_snd.sb_flags |= SB_SEL;