2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/fcntl.h>
40 #include <sys/malloc.h>
42 #include <sys/domain.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
52 #include <sys/sysctl.h>
54 #include <vm/vm_zone.h>
56 #include <machine/limits.h>
58 struct vm_zone *socket_zone;
59 so_gen_t so_gencnt; /* generation count for sockets */
61 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
62 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
64 SYSCTL_DECL(_kern_ipc);
66 static int somaxconn = SOMAXCONN;
67 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
68 &somaxconn, 0, "Maximum pending socket connection queue size");
71 * Socket operation routines.
72 * These routines are called by the routines in
73 * sys_socket.c or from a system process, and
74 * implement the semantics of socket operations by
75 * switching out to the protocol specific routines.
79 * Get a socket structure from our zone, and initialize it.
80 * We don't implement `waitok' yet (see comments in uipc_domain.c).
81 * Note that it would probably be better to allocate socket
82 * and PCB at the same time, but I'm not convinced that all
83 * the protocols can be easily modified to do this.
91 so = zalloci(socket_zone);
93 /* XXX race condition for reentrant kernel */
94 bzero(so, sizeof *so);
95 so->so_gencnt = ++so_gencnt;
96 so->so_zone = socket_zone;
102 socreate(dom, aso, type, proto, p)
109 register struct protosw *prp;
110 register struct socket *so;
114 prp = pffindproto(dom, proto, type);
116 prp = pffindtype(dom, type);
117 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
118 return (EPROTONOSUPPORT);
119 if (prp->pr_type != type)
121 so = soalloc(p != 0);
125 TAILQ_INIT(&so->so_incomp);
126 TAILQ_INIT(&so->so_comp);
128 so->so_cred = p->p_ucred;
131 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
133 so->so_state |= SS_NOFDREF;
144 struct sockaddr *nam;
150 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
160 so->so_gencnt = ++so_gencnt;
162 zfreei(so->so_zone, so);
166 solisten(so, backlog, p)
167 register struct socket *so;
174 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
179 if (so->so_comp.tqh_first == NULL)
180 so->so_options |= SO_ACCEPTCONN;
181 if (backlog < 0 || backlog > somaxconn)
183 so->so_qlimit = backlog;
190 register struct socket *so;
192 struct socket *head = so->so_head;
194 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
197 if (so->so_state & SS_INCOMP) {
198 TAILQ_REMOVE(&head->so_incomp, so, so_list);
200 } else if (so->so_state & SS_COMP) {
202 * We must not decommission a socket that's
203 * on the accept(2) queue. If we do, then
204 * accept(2) may hang after select(2) indicated
205 * that the listening socket was ready.
209 panic("sofree: not queued");
212 so->so_state &= ~SS_INCOMP;
215 sbrelease(&so->so_snd);
221 * Close a socket on last file table reference removal.
222 * Initiate disconnect if connected.
223 * Free socket when disconnect complete.
227 register struct socket *so;
229 int s = splnet(); /* conservative */
232 funsetown(so->so_sigio);
233 if (so->so_options & SO_ACCEPTCONN) {
234 struct socket *sp, *sonext;
236 for (sp = so->so_incomp.tqh_first; sp != NULL; sp = sonext) {
237 sonext = sp->so_list.tqe_next;
240 for (sp = so->so_comp.tqh_first; sp != NULL; sp = sonext) {
241 sonext = sp->so_list.tqe_next;
242 /* Dequeue from so_comp since sofree() won't do it */
243 TAILQ_REMOVE(&so->so_comp, sp, so_list);
245 sp->so_state &= ~SS_COMP;
252 if (so->so_state & SS_ISCONNECTED) {
253 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
254 error = sodisconnect(so);
258 if (so->so_options & SO_LINGER) {
259 if ((so->so_state & SS_ISDISCONNECTING) &&
260 (so->so_state & SS_NBIO))
262 while (so->so_state & SS_ISCONNECTED) {
263 error = tsleep((caddr_t)&so->so_timeo,
264 PSOCK | PCATCH, "soclos", so->so_linger * hz);
272 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
277 if (so->so_state & SS_NOFDREF)
278 panic("soclose: NOFDREF");
279 so->so_state |= SS_NOFDREF;
286 * Must be called at splnet...
293 return (*so->so_proto->pr_usrreqs->pru_abort)(so);
298 register struct socket *so;
299 struct sockaddr **nam;
304 if ((so->so_state & SS_NOFDREF) == 0)
305 panic("soaccept: !NOFDREF");
306 so->so_state &= ~SS_NOFDREF;
307 if ((so->so_state & SS_ISDISCONNECTED) == 0)
308 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
319 soconnect(so, nam, p)
320 register struct socket *so;
321 struct sockaddr *nam;
327 if (so->so_options & SO_ACCEPTCONN)
331 * If protocol is connection-based, can only connect once.
332 * Otherwise, if connected, try to disconnect first.
333 * This allows user to disconnect by connecting to, e.g.,
336 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
337 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
338 (error = sodisconnect(so))))
341 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
348 register struct socket *so1;
354 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
361 register struct socket *so;
366 if ((so->so_state & SS_ISCONNECTED) == 0) {
370 if (so->so_state & SS_ISDISCONNECTING) {
374 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
380 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
383 * If send must go all at once and message is larger than
384 * send buffering, then hard error.
385 * Lock against other senders.
386 * If must go all at once and not enough room now, then
387 * inform user that this would block and do nothing.
388 * Otherwise, if nonblocking, send as much as possible.
389 * The data to be sent is described by "uio" if nonzero,
390 * otherwise by the mbuf chain "top" (which must be null
391 * if uio is not). Data provided in mbuf chain must be small
392 * enough to send all at once.
394 * Returns nonzero on error, timeout or signal; callers
395 * must check for short counts if EINTR/ERESTART are returned.
396 * Data and control buffers are freed on return.
399 sosend(so, addr, uio, top, control, flags, p)
400 register struct socket *so;
401 struct sockaddr *addr;
404 struct mbuf *control;
409 register struct mbuf *m;
410 register long space, len, resid;
411 int clen = 0, error, s, dontroute, mlen;
412 int atomic = sosendallatonce(so) || top;
415 resid = uio->uio_resid;
417 resid = top->m_pkthdr.len;
419 * In theory resid should be unsigned.
420 * However, space must be signed, as it might be less than 0
421 * if we over-committed, and we must use a signed comparison
422 * of space and resid. On the other hand, a negative resid
423 * causes us to loop sending 0-length segments to the protocol.
425 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
426 * type sockets since that's an error.
428 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
434 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
435 (so->so_proto->pr_flags & PR_ATOMIC);
437 p->p_stats->p_ru.ru_msgsnd++;
439 clen = control->m_len;
440 #define snderr(errno) { error = errno; splx(s); goto release; }
443 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
448 if (so->so_state & SS_CANTSENDMORE)
451 error = so->so_error;
456 if ((so->so_state & SS_ISCONNECTED) == 0) {
458 * `sendto' and `sendmsg' is allowed on a connection-
459 * based socket if it supports implied connect.
460 * Return ENOTCONN if not connected and no address is
463 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
464 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
465 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
466 !(resid == 0 && clen != 0))
468 } else if (addr == 0)
469 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
470 ENOTCONN : EDESTADDRREQ);
472 space = sbspace(&so->so_snd);
475 if ((atomic && resid > so->so_snd.sb_hiwat) ||
476 clen > so->so_snd.sb_hiwat)
478 if (space < resid + clen && uio &&
479 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
480 if (so->so_state & SS_NBIO)
482 sbunlock(&so->so_snd);
483 error = sbwait(&so->so_snd);
495 * Data is prepackaged in "top".
499 top->m_flags |= M_EOR;
502 MGETHDR(m, M_WAIT, MT_DATA);
505 m->m_pkthdr.rcvif = (struct ifnet *)0;
507 MGET(m, M_WAIT, MT_DATA);
510 if (resid >= MINCLSIZE) {
512 if ((m->m_flags & M_EXT) == 0)
515 len = min(min(mlen, resid), space);
518 len = min(min(mlen, resid), space);
520 * For datagram protocols, leave room
521 * for protocol headers in first mbuf.
523 if (atomic && top == 0 && len < mlen)
527 error = uiomove(mtod(m, caddr_t), (int)len, uio);
528 resid = uio->uio_resid;
531 top->m_pkthdr.len += len;
537 top->m_flags |= M_EOR;
540 } while (space > 0 && atomic);
542 so->so_options |= SO_DONTROUTE;
543 s = splnet(); /* XXX */
545 * XXX all the SS_CANTSENDMORE checks previously
546 * done could be out of date. We could have recieved
547 * a reset packet in an interrupt or maybe we slept
548 * while doing page faults in uiomove() etc. We could
549 * probably recheck again inside the splnet() protection
550 * here, but there are probably other places that this
551 * also happens. We must rethink this.
553 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
554 (flags & MSG_OOB) ? PRUS_OOB :
556 * If the user set MSG_EOF, the protocol
557 * understands this flag and nothing left to
558 * send then use PRU_SEND_EOF instead of PRU_SEND.
560 ((flags & MSG_EOF) &&
561 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
564 /* If there is more to send set PRUS_MORETOCOME */
565 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
566 top, addr, control, p);
569 so->so_options &= ~SO_DONTROUTE;
576 } while (resid && space > 0);
580 sbunlock(&so->so_snd);
590 * Implement receive operations on a socket.
591 * We depend on the way that records are added to the sockbuf
592 * by sbappend*. In particular, each record (mbufs linked through m_next)
593 * must begin with an address if the protocol so specifies,
594 * followed by an optional mbuf or mbufs containing ancillary data,
595 * and then zero or more mbufs of data.
596 * In order to avoid blocking network interrupts for the entire time here,
597 * we splx() while doing the actual copy to user space.
598 * Although the sockbuf is locked, new data may still be appended,
599 * and thus we must maintain consistency of the sockbuf during that time.
601 * The caller may receive the data as a single mbuf chain by supplying
602 * an mbuf **mp0 for use in returning the chain. The uio is then used
603 * only for the count in uio_resid.
606 soreceive(so, psa, uio, mp0, controlp, flagsp)
607 register struct socket *so;
608 struct sockaddr **psa;
611 struct mbuf **controlp;
614 register struct mbuf *m, **mp;
615 register int flags, len, error, s, offset;
616 struct protosw *pr = so->so_proto;
617 struct mbuf *nextrecord;
619 int orig_resid = uio->uio_resid;
627 flags = *flagsp &~ MSG_EOR;
630 if (flags & MSG_OOB) {
631 m = m_get(M_WAIT, MT_DATA);
632 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
636 error = uiomove(mtod(m, caddr_t),
637 (int) min(uio->uio_resid, m->m_len), uio);
639 } while (uio->uio_resid && error == 0 && m);
646 *mp = (struct mbuf *)0;
647 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
648 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
651 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
656 m = so->so_rcv.sb_mb;
658 * If we have less data than requested, block awaiting more
659 * (subject to any timeout) if:
660 * 1. the current count is less than the low water mark, or
661 * 2. MSG_WAITALL is set, and it is possible to do the entire
662 * receive operation at once if we block (resid <= hiwat).
663 * 3. MSG_DONTWAIT is not set
664 * If MSG_WAITALL is set but resid is larger than the receive buffer,
665 * we have to do the receive in sections, and thus risk returning
666 * a short count if a timeout or signal occurs after we start.
668 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
669 so->so_rcv.sb_cc < uio->uio_resid) &&
670 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
671 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
672 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
673 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
677 error = so->so_error;
678 if ((flags & MSG_PEEK) == 0)
682 if (so->so_state & SS_CANTRCVMORE) {
688 for (; m; m = m->m_next)
689 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
690 m = so->so_rcv.sb_mb;
693 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
694 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
698 if (uio->uio_resid == 0)
700 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
704 sbunlock(&so->so_rcv);
705 error = sbwait(&so->so_rcv);
713 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
714 nextrecord = m->m_nextpkt;
715 if (pr->pr_flags & PR_ADDR) {
716 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
719 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
721 if (flags & MSG_PEEK) {
724 sbfree(&so->so_rcv, m);
725 MFREE(m, so->so_rcv.sb_mb);
726 m = so->so_rcv.sb_mb;
729 while (m && m->m_type == MT_CONTROL && error == 0) {
730 if (flags & MSG_PEEK) {
732 *controlp = m_copy(m, 0, m->m_len);
735 sbfree(&so->so_rcv, m);
737 if (pr->pr_domain->dom_externalize &&
738 mtod(m, struct cmsghdr *)->cmsg_type ==
740 error = (*pr->pr_domain->dom_externalize)(m);
742 so->so_rcv.sb_mb = m->m_next;
744 m = so->so_rcv.sb_mb;
746 MFREE(m, so->so_rcv.sb_mb);
747 m = so->so_rcv.sb_mb;
752 controlp = &(*controlp)->m_next;
756 if ((flags & MSG_PEEK) == 0)
757 m->m_nextpkt = nextrecord;
759 if (type == MT_OOBDATA)
764 while (m && uio->uio_resid > 0 && error == 0) {
765 if (m->m_type == MT_OOBDATA) {
766 if (type != MT_OOBDATA)
768 } else if (type == MT_OOBDATA)
771 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
773 so->so_state &= ~SS_RCVATMARK;
774 len = uio->uio_resid;
775 if (so->so_oobmark && len > so->so_oobmark - offset)
776 len = so->so_oobmark - offset;
777 if (len > m->m_len - moff)
778 len = m->m_len - moff;
780 * If mp is set, just pass back the mbufs.
781 * Otherwise copy them out via the uio, then free.
782 * Sockbuf must be consistent here (points to current mbuf,
783 * it points to next record) when we drop priority;
784 * we must note any additions to the sockbuf when we
785 * block interrupts again.
789 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
794 uio->uio_resid -= len;
795 if (len == m->m_len - moff) {
796 if (m->m_flags & M_EOR)
798 if (flags & MSG_PEEK) {
802 nextrecord = m->m_nextpkt;
803 sbfree(&so->so_rcv, m);
807 so->so_rcv.sb_mb = m = m->m_next;
808 *mp = (struct mbuf *)0;
810 MFREE(m, so->so_rcv.sb_mb);
811 m = so->so_rcv.sb_mb;
814 m->m_nextpkt = nextrecord;
817 if (flags & MSG_PEEK)
821 *mp = m_copym(m, 0, len, M_WAIT);
824 so->so_rcv.sb_cc -= len;
827 if (so->so_oobmark) {
828 if ((flags & MSG_PEEK) == 0) {
829 so->so_oobmark -= len;
830 if (so->so_oobmark == 0) {
831 so->so_state |= SS_RCVATMARK;
836 if (offset == so->so_oobmark)
843 * If the MSG_WAITALL flag is set (for non-atomic socket),
844 * we must not quit until "uio->uio_resid == 0" or an error
845 * termination. If a signal/timeout occurs, return
846 * with a short count but without error.
847 * Keep sockbuf locked against other readers.
849 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
850 !sosendallatonce(so) && !nextrecord) {
851 if (so->so_error || so->so_state & SS_CANTRCVMORE)
853 error = sbwait(&so->so_rcv);
855 sbunlock(&so->so_rcv);
859 m = so->so_rcv.sb_mb;
861 nextrecord = m->m_nextpkt;
865 if (m && pr->pr_flags & PR_ATOMIC) {
867 if ((flags & MSG_PEEK) == 0)
868 (void) sbdroprecord(&so->so_rcv);
870 if ((flags & MSG_PEEK) == 0) {
872 so->so_rcv.sb_mb = nextrecord;
873 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
874 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
876 if (orig_resid == uio->uio_resid && orig_resid &&
877 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
878 sbunlock(&so->so_rcv);
886 sbunlock(&so->so_rcv);
893 register struct socket *so;
896 register struct protosw *pr = so->so_proto;
902 return ((*pr->pr_usrreqs->pru_shutdown)(so));
908 register struct socket *so;
910 register struct sockbuf *sb = &so->so_rcv;
911 register struct protosw *pr = so->so_proto;
915 sb->sb_flags |= SB_NOINTR;
916 (void) sblock(sb, M_WAITOK);
921 bzero((caddr_t)sb, sizeof (*sb));
923 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
924 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
929 * Perhaps this routine, and sooptcopyout(), below, ought to come in
930 * an additional variant to handle the case where the option value needs
931 * to be some kind of integer, but not a specific size.
932 * In addition to their use here, these functions are also called by the
933 * protocol-level pr_ctloutput() routines.
936 sooptcopyin(sopt, buf, len, minlen)
937 struct sockopt *sopt;
945 * If the user gives us more than we wanted, we ignore it,
946 * but if we don't get the minimum length the caller
947 * wants, we return EINVAL. On success, sopt->sopt_valsize
948 * is set to however much we actually retrieved.
950 if ((valsize = sopt->sopt_valsize) < minlen)
953 sopt->sopt_valsize = valsize = len;
955 if (sopt->sopt_p != 0)
956 return (copyin(sopt->sopt_val, buf, valsize));
958 bcopy(sopt->sopt_val, buf, valsize);
965 struct sockopt *sopt;
973 if (sopt->sopt_level != SOL_SOCKET) {
974 if (so->so_proto && so->so_proto->pr_ctloutput)
975 return ((*so->so_proto->pr_ctloutput)
979 switch (sopt->sopt_name) {
981 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
985 so->so_linger = l.l_linger;
987 so->so_options |= SO_LINGER;
989 so->so_options &= ~SO_LINGER;
1001 error = sooptcopyin(sopt, &optval, sizeof optval,
1006 so->so_options |= sopt->sopt_name;
1008 so->so_options &= ~sopt->sopt_name;
1015 error = sooptcopyin(sopt, &optval, sizeof optval,
1021 * Values < 1 make no sense for any of these
1022 * options, so disallow them.
1029 switch (sopt->sopt_name) {
1032 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1033 &so->so_snd : &so->so_rcv,
1034 (u_long) optval) == 0) {
1041 * Make sure the low-water is never greater than
1045 so->so_snd.sb_lowat =
1046 (optval > so->so_snd.sb_hiwat) ?
1047 so->so_snd.sb_hiwat : optval;
1050 so->so_rcv.sb_lowat =
1051 (optval > so->so_rcv.sb_hiwat) ?
1052 so->so_rcv.sb_hiwat : optval;
1059 error = sooptcopyin(sopt, &tv, sizeof tv,
1064 /* assert(hz > 0); */
1065 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1066 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1070 /* assert(tick > 0); */
1071 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1072 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1073 if (val > SHRT_MAX) {
1078 switch (sopt->sopt_name) {
1080 so->so_snd.sb_timeo = val;
1083 so->so_rcv.sb_timeo = val;
1089 error = ENOPROTOOPT;
1092 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1093 (void) ((*so->so_proto->pr_ctloutput)
1101 /* Helper routine for getsockopt */
1103 sooptcopyout(sopt, buf, len)
1104 struct sockopt *sopt;
1114 * Documented get behavior is that we always return a value,
1115 * possibly truncated to fit in the user's buffer.
1116 * Traditional behavior is that we always tell the user
1117 * precisely how much we copied, rather than something useful
1118 * like the total amount we had available for her.
1119 * Note that this interface is not idempotent; the entire answer must
1120 * generated ahead of time.
1122 valsize = min(len, sopt->sopt_valsize);
1123 sopt->sopt_valsize = valsize;
1124 if (sopt->sopt_val != 0) {
1125 if (sopt->sopt_p != 0)
1126 error = copyout(buf, sopt->sopt_val, valsize);
1128 bcopy(buf, sopt->sopt_val, valsize);
1136 struct sockopt *sopt;
1143 if (sopt->sopt_level != SOL_SOCKET) {
1144 if (so->so_proto && so->so_proto->pr_ctloutput) {
1145 return ((*so->so_proto->pr_ctloutput)
1148 return (ENOPROTOOPT);
1150 switch (sopt->sopt_name) {
1152 l.l_onoff = so->so_options & SO_LINGER;
1153 l.l_linger = so->so_linger;
1154 error = sooptcopyout(sopt, &l, sizeof l);
1157 case SO_USELOOPBACK:
1166 optval = so->so_options & sopt->sopt_name;
1168 error = sooptcopyout(sopt, &optval, sizeof optval);
1172 optval = so->so_type;
1176 optval = so->so_error;
1181 optval = so->so_snd.sb_hiwat;
1185 optval = so->so_rcv.sb_hiwat;
1189 optval = so->so_snd.sb_lowat;
1193 optval = so->so_rcv.sb_lowat;
1198 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1199 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1201 tv.tv_sec = optval / hz;
1202 tv.tv_usec = (optval % hz) * tick;
1203 error = sooptcopyout(sopt, &tv, sizeof tv);
1207 error = ENOPROTOOPT;
1216 register struct socket *so;
1218 if (so->so_sigio != NULL)
1219 pgsigio(so->so_sigio, SIGURG, 0);
1220 selwakeup(&so->so_rcv.sb_sel);
1224 sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
1229 if (events & (POLLIN | POLLRDNORM))
1231 revents |= events & (POLLIN | POLLRDNORM);
1233 if (events & (POLLOUT | POLLWRNORM))
1234 if (sowriteable(so))
1235 revents |= events & (POLLOUT | POLLWRNORM);
1237 if (events & (POLLPRI | POLLRDBAND))
1238 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1239 revents |= events & (POLLPRI | POLLRDBAND);
1242 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1243 selrecord(p, &so->so_rcv.sb_sel);
1244 so->so_rcv.sb_flags |= SB_SEL;
1247 if (events & (POLLOUT | POLLWRNORM)) {
1248 selrecord(p, &so->so_snd.sb_sel);
1249 so->so_snd.sb_flags |= SB_SEL;