2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
36 * Comments on the socket life cycle:
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
97 #include <sys/cdefs.h>
98 __FBSDID("$FreeBSD$");
100 #include "opt_inet.h"
101 #include "opt_inet6.h"
102 #include "opt_zero.h"
103 #include "opt_compat.h"
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/fcntl.h>
108 #include <sys/limits.h>
109 #include <sys/lock.h>
111 #include <sys/malloc.h>
112 #include <sys/mbuf.h>
113 #include <sys/mutex.h>
114 #include <sys/domain.h>
115 #include <sys/file.h> /* for struct knote */
116 #include <sys/kernel.h>
117 #include <sys/event.h>
118 #include <sys/eventhandler.h>
119 #include <sys/poll.h>
120 #include <sys/proc.h>
121 #include <sys/protosw.h>
122 #include <sys/socket.h>
123 #include <sys/socketvar.h>
124 #include <sys/resourcevar.h>
125 #include <net/route.h>
126 #include <sys/signalvar.h>
127 #include <sys/stat.h>
129 #include <sys/sysctl.h>
131 #include <sys/jail.h>
133 #include <net/vnet.h>
135 #include <security/mac/mac_framework.h>
139 #ifdef COMPAT_FREEBSD32
140 #include <sys/mount.h>
141 #include <sys/sysent.h>
142 #include <compat/freebsd32/freebsd32.h>
145 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
148 static void filt_sordetach(struct knote *kn);
149 static int filt_soread(struct knote *kn, long hint);
150 static void filt_sowdetach(struct knote *kn);
151 static int filt_sowrite(struct knote *kn, long hint);
152 static int filt_solisten(struct knote *kn, long hint);
154 static struct filterops solisten_filtops =
155 { 1, NULL, filt_sordetach, filt_solisten };
156 static struct filterops soread_filtops =
157 { 1, NULL, filt_sordetach, filt_soread };
158 static struct filterops sowrite_filtops =
159 { 1, NULL, filt_sowdetach, filt_sowrite };
161 uma_zone_t socket_zone;
162 so_gen_t so_gencnt; /* generation count for sockets */
166 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
167 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
169 static int somaxconn = SOMAXCONN;
170 static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS);
171 /* XXX: we dont have SYSCTL_USHORT */
172 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
173 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection "
175 static int numopensockets;
176 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
177 &numopensockets, 0, "Number of open sockets");
178 #ifdef ZERO_COPY_SOCKETS
179 /* These aren't static because they're used in other files. */
180 int so_zero_copy_send = 1;
181 int so_zero_copy_receive = 1;
182 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
183 "Zero copy controls");
184 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
185 &so_zero_copy_receive, 0, "Enable zero copy receive");
186 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
187 &so_zero_copy_send, 0, "Enable zero copy send");
188 #endif /* ZERO_COPY_SOCKETS */
191 * accept_mtx locks down per-socket fields relating to accept queues. See
192 * socketvar.h for an annotation of the protected fields of struct socket.
194 struct mtx accept_mtx;
195 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
198 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
201 static struct mtx so_global_mtx;
202 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
205 * General IPC sysctl name space, used by sockets and a variety of other IPC
208 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
211 * Sysctl to get and set the maximum global sockets limit. Notify protocols
212 * of the change so that they can update their dependent limits as required.
215 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
217 int error, newmaxsockets;
219 newmaxsockets = maxsockets;
220 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
221 if (error == 0 && req->newptr) {
222 if (newmaxsockets > maxsockets) {
223 maxsockets = newmaxsockets;
224 if (maxsockets > ((maxfiles / 4) * 3)) {
225 maxfiles = (maxsockets * 5) / 4;
226 maxfilesperproc = (maxfiles * 9) / 10;
228 EVENTHANDLER_INVOKE(maxsockets_change);
235 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
236 &maxsockets, 0, sysctl_maxsockets, "IU",
237 "Maximum number of sockets avaliable");
240 * Initialise maxsockets. This SYSINIT must be run after
244 init_maxsockets(void *ignored)
247 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
248 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
250 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
253 * Socket operation routines. These routines are called by the routines in
254 * sys_socket.c or from a system process, and implement the semantics of
255 * socket operations by switching out to the protocol specific routines.
259 * Get a socket structure from our zone, and initialize it. Note that it
260 * would probably be better to allocate socket and PCB at the same time, but
261 * I'm not convinced that all the protocols can be easily modified to do
264 * soalloc() returns a socket with a ref count of 0.
266 static struct socket *
267 soalloc(struct vnet *vnet)
271 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
275 if (mac_socket_init(so, M_NOWAIT) != 0) {
276 uma_zfree(socket_zone, so);
280 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
281 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
282 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
283 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
284 TAILQ_INIT(&so->so_aiojobq);
285 mtx_lock(&so_global_mtx);
286 so->so_gencnt = ++so_gencnt;
289 vnet->vnet_sockcnt++;
292 mtx_unlock(&so_global_mtx);
297 * Free the storage associated with a socket at the socket layer, tear down
298 * locks, labels, etc. All protocol state is assumed already to have been
299 * torn down (and possibly never set up) by the caller.
302 sodealloc(struct socket *so)
305 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
306 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
308 mtx_lock(&so_global_mtx);
309 so->so_gencnt = ++so_gencnt;
310 --numopensockets; /* Could be below, but faster here. */
312 so->so_vnet->vnet_sockcnt--;
314 mtx_unlock(&so_global_mtx);
315 if (so->so_rcv.sb_hiwat)
316 (void)chgsbsize(so->so_cred->cr_uidinfo,
317 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
318 if (so->so_snd.sb_hiwat)
319 (void)chgsbsize(so->so_cred->cr_uidinfo,
320 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
322 /* remove acccept filter if one is present. */
323 if (so->so_accf != NULL)
324 do_setopt_accept_filter(so, NULL);
327 mac_socket_destroy(so);
330 sx_destroy(&so->so_snd.sb_sx);
331 sx_destroy(&so->so_rcv.sb_sx);
332 SOCKBUF_LOCK_DESTROY(&so->so_snd);
333 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
334 uma_zfree(socket_zone, so);
338 * socreate returns a socket with a ref count of 1. The socket should be
339 * closed with soclose().
342 socreate(int dom, struct socket **aso, int type, int proto,
343 struct ucred *cred, struct thread *td)
350 prp = pffindproto(dom, proto, type);
352 prp = pffindtype(dom, type);
354 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
355 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
356 return (EPROTONOSUPPORT);
358 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
359 return (EPROTONOSUPPORT);
361 if (prp->pr_type != type)
363 so = soalloc(CRED_TO_VNET(cred));
367 TAILQ_INIT(&so->so_incomp);
368 TAILQ_INIT(&so->so_comp);
370 so->so_cred = crhold(cred);
371 if ((prp->pr_domain->dom_family == PF_INET) ||
372 (prp->pr_domain->dom_family == PF_ROUTE))
373 so->so_fibnum = td->td_proc->p_fibnum;
378 mac_socket_create(cred, so);
380 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
381 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
384 * Auto-sizing of socket buffers is managed by the protocols and
385 * the appropriate flags must be set in the pru_attach function.
387 CURVNET_SET(so->so_vnet);
388 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
391 KASSERT(so->so_count == 1, ("socreate: so_count %d",
402 static int regression_sonewconn_earlytest = 1;
403 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
404 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
408 * When an attempt at a new connection is noted on a socket which accepts
409 * connections, sonewconn is called. If the connection is possible (subject
410 * to space constraints, etc.) then we allocate a new structure, propoerly
411 * linked into the data structure of the original socket, and return this.
412 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
414 * Note: the ref count on the socket is 0 on return.
417 sonewconn(struct socket *head, int connstatus)
423 over = (head->so_qlen > 3 * head->so_qlimit / 2);
426 if (regression_sonewconn_earlytest && over)
431 VNET_ASSERT(head->so_vnet);
432 so = soalloc(head->so_vnet);
435 if ((head->so_options & SO_ACCEPTFILTER) != 0)
438 so->so_type = head->so_type;
439 so->so_options = head->so_options &~ SO_ACCEPTCONN;
440 so->so_linger = head->so_linger;
441 so->so_state = head->so_state | SS_NOFDREF;
442 so->so_fibnum = head->so_fibnum;
443 so->so_proto = head->so_proto;
444 so->so_cred = crhold(head->so_cred);
446 mac_socket_newconn(head, so);
448 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
449 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
450 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
451 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
455 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
456 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
457 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
458 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
459 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
460 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
461 so->so_state |= connstatus;
464 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
465 so->so_qstate |= SQ_COMP;
469 * Keep removing sockets from the head until there's room for
470 * us to insert on the tail. In pre-locking revisions, this
471 * was a simple if(), but as we could be racing with other
472 * threads and soabort() requires dropping locks, we must
473 * loop waiting for the condition to be true.
475 while (head->so_incqlen > head->so_qlimit) {
477 sp = TAILQ_FIRST(&head->so_incomp);
478 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
480 sp->so_qstate &= ~SQ_INCOMP;
486 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
487 so->so_qstate |= SQ_INCOMP;
493 wakeup_one(&head->so_timeo);
499 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
503 CURVNET_SET(so->so_vnet);
504 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
510 * solisten() transitions a socket from a non-listening state to a listening
511 * state, but can also be used to update the listen queue depth on an
512 * existing listen socket. The protocol will call back into the sockets
513 * layer using solisten_proto_check() and solisten_proto() to check and set
514 * socket-layer listen state. Call backs are used so that the protocol can
515 * acquire both protocol and socket layer locks in whatever order is required
518 * Protocol implementors are advised to hold the socket lock across the
519 * socket-layer test and set to avoid races at the socket layer.
522 solisten(struct socket *so, int backlog, struct thread *td)
525 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td));
529 solisten_proto_check(struct socket *so)
532 SOCK_LOCK_ASSERT(so);
534 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
541 solisten_proto(struct socket *so, int backlog)
544 SOCK_LOCK_ASSERT(so);
546 if (backlog < 0 || backlog > somaxconn)
548 so->so_qlimit = backlog;
549 so->so_options |= SO_ACCEPTCONN;
553 * Attempt to free a socket. This should really be sotryfree().
555 * sofree() will succeed if:
557 * - There are no outstanding file descriptor references or related consumers
560 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
562 * - The protocol does not have an outstanding strong reference on the socket
565 * - The socket is not in a completed connection queue, so a process has been
566 * notified that it is present. If it is removed, the user process may
567 * block in accept() despite select() saying the socket was ready.
569 * Otherwise, it will quietly abort so that a future call to sofree(), when
570 * conditions are right, can succeed.
573 sofree(struct socket *so)
575 struct protosw *pr = so->so_proto;
578 ACCEPT_LOCK_ASSERT();
579 SOCK_LOCK_ASSERT(so);
581 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
582 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
590 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
591 (so->so_qstate & SQ_INCOMP) != 0,
592 ("sofree: so_head != NULL, but neither SQ_COMP nor "
594 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
595 (so->so_qstate & SQ_INCOMP) == 0,
596 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
597 TAILQ_REMOVE(&head->so_incomp, so, so_list);
599 so->so_qstate &= ~SQ_INCOMP;
602 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
603 (so->so_qstate & SQ_INCOMP) == 0,
604 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
605 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
606 if (so->so_options & SO_ACCEPTCONN) {
607 KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated"));
608 KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated"));
613 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
614 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
615 if (pr->pr_usrreqs->pru_detach != NULL)
616 (*pr->pr_usrreqs->pru_detach)(so);
619 * From this point on, we assume that no other references to this
620 * socket exist anywhere else in the stack. Therefore, no locks need
621 * to be acquired or held.
623 * We used to do a lot of socket buffer and socket locking here, as
624 * well as invoke sorflush() and perform wakeups. The direct call to
625 * dom_dispose() and sbrelease_internal() are an inlining of what was
626 * necessary from sorflush().
628 * Notice that the socket buffer and kqueue state are torn down
629 * before calling pru_detach. This means that protocols shold not
630 * assume they can perform socket wakeups, etc, in their detach code.
632 sbdestroy(&so->so_snd, so);
633 sbdestroy(&so->so_rcv, so);
634 knlist_destroy(&so->so_rcv.sb_sel.si_note);
635 knlist_destroy(&so->so_snd.sb_sel.si_note);
640 * Close a socket on last file table reference removal. Initiate disconnect
641 * if connected. Free socket when disconnect complete.
643 * This function will sorele() the socket. Note that soclose() may be called
644 * prior to the ref count reaching zero. The actual socket structure will
645 * not be freed until the ref count reaches zero.
648 soclose(struct socket *so)
652 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
654 CURVNET_SET(so->so_vnet);
655 funsetown(&so->so_sigio);
656 if (so->so_state & SS_ISCONNECTED) {
657 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
658 error = sodisconnect(so);
662 if (so->so_options & SO_LINGER) {
663 if ((so->so_state & SS_ISDISCONNECTING) &&
664 (so->so_state & SS_NBIO))
666 while (so->so_state & SS_ISCONNECTED) {
667 error = tsleep(&so->so_timeo,
668 PSOCK | PCATCH, "soclos", so->so_linger * hz);
676 if (so->so_proto->pr_usrreqs->pru_close != NULL)
677 (*so->so_proto->pr_usrreqs->pru_close)(so);
678 if (so->so_options & SO_ACCEPTCONN) {
681 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
682 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
684 sp->so_qstate &= ~SQ_INCOMP;
690 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
691 TAILQ_REMOVE(&so->so_comp, sp, so_list);
693 sp->so_qstate &= ~SQ_COMP;
703 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
704 so->so_state |= SS_NOFDREF;
711 * soabort() is used to abruptly tear down a connection, such as when a
712 * resource limit is reached (listen queue depth exceeded), or if a listen
713 * socket is closed while there are sockets waiting to be accepted.
715 * This interface is tricky, because it is called on an unreferenced socket,
716 * and must be called only by a thread that has actually removed the socket
717 * from the listen queue it was on, or races with other threads are risked.
719 * This interface will call into the protocol code, so must not be called
720 * with any socket locks held. Protocols do call it while holding their own
721 * recursible protocol mutexes, but this is something that should be subject
722 * to review in the future.
725 soabort(struct socket *so)
729 * In as much as is possible, assert that no references to this
730 * socket are held. This is not quite the same as asserting that the
731 * current thread is responsible for arranging for no references, but
732 * is as close as we can get for now.
734 KASSERT(so->so_count == 0, ("soabort: so_count"));
735 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
736 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
737 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
738 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
740 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
741 (*so->so_proto->pr_usrreqs->pru_abort)(so);
748 soaccept(struct socket *so, struct sockaddr **nam)
753 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
754 so->so_state &= ~SS_NOFDREF;
756 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
761 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
765 if (so->so_options & SO_ACCEPTCONN)
768 CURVNET_SET(so->so_vnet);
770 * If protocol is connection-based, can only connect once.
771 * Otherwise, if connected, try to disconnect first. This allows
772 * user to disconnect by connecting to, e.g., a null address.
774 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
775 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
776 (error = sodisconnect(so)))) {
780 * Prevent accumulated error from previous connection from
784 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
792 soconnect2(struct socket *so1, struct socket *so2)
795 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
799 sodisconnect(struct socket *so)
803 if ((so->so_state & SS_ISCONNECTED) == 0)
805 if (so->so_state & SS_ISDISCONNECTING)
807 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
811 #ifdef ZERO_COPY_SOCKETS
812 struct so_zerocopy_stats{
817 struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
818 #include <netinet/in.h>
819 #include <net/route.h>
820 #include <netinet/in_pcb.h>
822 #include <vm/vm_page.h>
823 #include <vm/vm_object.h>
826 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise
827 * sosend_dgram() and sosend_generic() use m_uiotombuf().
829 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
830 * all of the data referenced by the uio. If desired, it uses zero-copy.
831 * *space will be updated to reflect data copied in.
833 * NB: If atomic I/O is requested, the caller must already have checked that
834 * space can hold resid bytes.
836 * NB: In the event of an error, the caller may need to free the partial
837 * chain pointed to by *mpp. The contents of both *uio and *space may be
838 * modified even in the case of an error.
841 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
844 struct mbuf *m, **mp, *top;
847 #ifdef ZERO_COPY_SOCKETS
854 resid = uio->uio_resid;
857 #ifdef ZERO_COPY_SOCKETS
859 #endif /* ZERO_COPY_SOCKETS */
860 if (resid >= MINCLSIZE) {
861 #ifdef ZERO_COPY_SOCKETS
863 m = m_gethdr(M_WAITOK, MT_DATA);
865 m->m_pkthdr.rcvif = NULL;
867 m = m_get(M_WAITOK, MT_DATA);
868 if (so_zero_copy_send &&
871 uio->uio_iov->iov_len>=PAGE_SIZE) {
872 so_zerocp_stats.size_ok++;
873 so_zerocp_stats.align_ok++;
874 cow_send = socow_setup(m, uio);
878 m_clget(m, M_WAITOK);
879 len = min(min(MCLBYTES, resid), *space);
881 #else /* ZERO_COPY_SOCKETS */
883 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
885 m->m_pkthdr.rcvif = NULL;
887 m = m_getcl(M_WAIT, MT_DATA, 0);
888 len = min(min(MCLBYTES, resid), *space);
889 #endif /* ZERO_COPY_SOCKETS */
892 m = m_gethdr(M_WAIT, MT_DATA);
894 m->m_pkthdr.rcvif = NULL;
896 len = min(min(MHLEN, resid), *space);
898 * For datagram protocols, leave room
899 * for protocol headers in first mbuf.
901 if (atomic && m && len < MHLEN)
904 m = m_get(M_WAIT, MT_DATA);
905 len = min(min(MLEN, resid), *space);
914 #ifdef ZERO_COPY_SOCKETS
918 #endif /* ZERO_COPY_SOCKETS */
919 error = uiomove(mtod(m, void *), (int)len, uio);
920 resid = uio->uio_resid;
923 top->m_pkthdr.len += len;
929 top->m_flags |= M_EOR;
932 } while (*space > 0 && atomic);
937 #endif /*ZERO_COPY_SOCKETS*/
939 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
942 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
943 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
946 int clen = 0, error, dontroute;
947 #ifdef ZERO_COPY_SOCKETS
948 int atomic = sosendallatonce(so) || top;
951 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM"));
952 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
953 ("sodgram_send: !PR_ATOMIC"));
956 resid = uio->uio_resid;
958 resid = top->m_pkthdr.len;
960 * In theory resid should be unsigned. However, space must be
961 * signed, as it might be less than 0 if we over-committed, and we
962 * must use a signed comparison of space and resid. On the other
963 * hand, a negative resid causes us to loop sending 0-length
964 * segments to the protocol.
972 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
974 td->td_ru.ru_msgsnd++;
976 clen = control->m_len;
978 SOCKBUF_LOCK(&so->so_snd);
979 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
980 SOCKBUF_UNLOCK(&so->so_snd);
985 error = so->so_error;
987 SOCKBUF_UNLOCK(&so->so_snd);
990 if ((so->so_state & SS_ISCONNECTED) == 0) {
992 * `sendto' and `sendmsg' is allowed on a connection-based
993 * socket if it supports implied connect. Return ENOTCONN if
994 * not connected and no address is supplied.
996 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
997 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
998 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
999 !(resid == 0 && clen != 0)) {
1000 SOCKBUF_UNLOCK(&so->so_snd);
1004 } else if (addr == NULL) {
1005 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1008 error = EDESTADDRREQ;
1009 SOCKBUF_UNLOCK(&so->so_snd);
1015 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1016 * problem and need fixing.
1018 space = sbspace(&so->so_snd);
1019 if (flags & MSG_OOB)
1022 SOCKBUF_UNLOCK(&so->so_snd);
1023 if (resid > space) {
1029 if (flags & MSG_EOR)
1030 top->m_flags |= M_EOR;
1032 #ifdef ZERO_COPY_SOCKETS
1033 error = sosend_copyin(uio, &top, atomic, &space, flags);
1038 * Copy the data from userland into a mbuf chain.
1039 * If no data is to be copied in, a single empty mbuf
1042 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1043 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1045 error = EFAULT; /* only possible error */
1048 space -= resid - uio->uio_resid;
1050 resid = uio->uio_resid;
1052 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1054 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1059 so->so_options |= SO_DONTROUTE;
1063 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1064 * of date. We could have recieved a reset packet in an interrupt or
1065 * maybe we slept while doing page faults in uiomove() etc. We could
1066 * probably recheck again inside the locking protection here, but
1067 * there are probably other places that this also happens. We must
1070 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1071 (flags & MSG_OOB) ? PRUS_OOB :
1073 * If the user set MSG_EOF, the protocol understands this flag and
1074 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1076 ((flags & MSG_EOF) &&
1077 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1080 /* If there is more to send set PRUS_MORETOCOME */
1081 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1082 top, addr, control, td);
1085 so->so_options &= ~SO_DONTROUTE;
1094 if (control != NULL)
1100 * Send on a socket. If send must go all at once and message is larger than
1101 * send buffering, then hard error. Lock against other senders. If must go
1102 * all at once and not enough room now, then inform user that this would
1103 * block and do nothing. Otherwise, if nonblocking, send as much as
1104 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1105 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1106 * in mbuf chain must be small enough to send all at once.
1108 * Returns nonzero on error, timeout or signal; callers must check for short
1109 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1113 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1114 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1117 int clen = 0, error, dontroute;
1118 int atomic = sosendallatonce(so) || top;
1121 resid = uio->uio_resid;
1123 resid = top->m_pkthdr.len;
1125 * In theory resid should be unsigned. However, space must be
1126 * signed, as it might be less than 0 if we over-committed, and we
1127 * must use a signed comparison of space and resid. On the other
1128 * hand, a negative resid causes us to loop sending 0-length
1129 * segments to the protocol.
1131 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1132 * type sockets since that's an error.
1134 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1140 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1141 (so->so_proto->pr_flags & PR_ATOMIC);
1143 td->td_ru.ru_msgsnd++;
1144 if (control != NULL)
1145 clen = control->m_len;
1147 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1153 SOCKBUF_LOCK(&so->so_snd);
1154 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1155 SOCKBUF_UNLOCK(&so->so_snd);
1160 error = so->so_error;
1162 SOCKBUF_UNLOCK(&so->so_snd);
1165 if ((so->so_state & SS_ISCONNECTED) == 0) {
1167 * `sendto' and `sendmsg' is allowed on a connection-
1168 * based socket if it supports implied connect.
1169 * Return ENOTCONN if not connected and no address is
1172 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1173 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1174 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1175 !(resid == 0 && clen != 0)) {
1176 SOCKBUF_UNLOCK(&so->so_snd);
1180 } else if (addr == NULL) {
1181 SOCKBUF_UNLOCK(&so->so_snd);
1182 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1185 error = EDESTADDRREQ;
1189 space = sbspace(&so->so_snd);
1190 if (flags & MSG_OOB)
1192 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1193 clen > so->so_snd.sb_hiwat) {
1194 SOCKBUF_UNLOCK(&so->so_snd);
1198 if (space < resid + clen &&
1199 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1200 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1201 SOCKBUF_UNLOCK(&so->so_snd);
1202 error = EWOULDBLOCK;
1205 error = sbwait(&so->so_snd);
1206 SOCKBUF_UNLOCK(&so->so_snd);
1211 SOCKBUF_UNLOCK(&so->so_snd);
1216 if (flags & MSG_EOR)
1217 top->m_flags |= M_EOR;
1219 #ifdef ZERO_COPY_SOCKETS
1220 error = sosend_copyin(uio, &top, atomic,
1226 * Copy the data from userland into a mbuf
1227 * chain. If no data is to be copied in,
1228 * a single empty mbuf is returned.
1230 top = m_uiotombuf(uio, M_WAITOK, space,
1231 (atomic ? max_hdr : 0),
1232 (atomic ? M_PKTHDR : 0) |
1233 ((flags & MSG_EOR) ? M_EOR : 0));
1235 error = EFAULT; /* only possible error */
1238 space -= resid - uio->uio_resid;
1240 resid = uio->uio_resid;
1244 so->so_options |= SO_DONTROUTE;
1248 * XXX all the SBS_CANTSENDMORE checks previously
1249 * done could be out of date. We could have recieved
1250 * a reset packet in an interrupt or maybe we slept
1251 * while doing page faults in uiomove() etc. We
1252 * could probably recheck again inside the locking
1253 * protection here, but there are probably other
1254 * places that this also happens. We must rethink
1257 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1258 (flags & MSG_OOB) ? PRUS_OOB :
1260 * If the user set MSG_EOF, the protocol understands
1261 * this flag and nothing left to send then use
1262 * PRU_SEND_EOF instead of PRU_SEND.
1264 ((flags & MSG_EOF) &&
1265 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1268 /* If there is more to send set PRUS_MORETOCOME. */
1269 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1270 top, addr, control, td);
1273 so->so_options &= ~SO_DONTROUTE;
1281 } while (resid && space > 0);
1285 sbunlock(&so->so_snd);
1289 if (control != NULL)
1295 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1296 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1300 CURVNET_SET(so->so_vnet);
1301 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1302 control, flags, td);
1308 * The part of soreceive() that implements reading non-inline out-of-band
1309 * data from a socket. For more complete comments, see soreceive(), from
1310 * which this code originated.
1312 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1313 * unable to return an mbuf chain to the caller.
1316 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1318 struct protosw *pr = so->so_proto;
1322 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1324 m = m_get(M_WAIT, MT_DATA);
1325 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1329 #ifdef ZERO_COPY_SOCKETS
1330 if (so_zero_copy_receive) {
1333 if ((m->m_flags & M_EXT)
1334 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1339 error = uiomoveco(mtod(m, void *),
1340 min(uio->uio_resid, m->m_len),
1343 #endif /* ZERO_COPY_SOCKETS */
1344 error = uiomove(mtod(m, void *),
1345 (int) min(uio->uio_resid, m->m_len), uio);
1347 } while (uio->uio_resid && error == 0 && m);
1355 * Following replacement or removal of the first mbuf on the first mbuf chain
1356 * of a socket buffer, push necessary state changes back into the socket
1357 * buffer so that other consumers see the values consistently. 'nextrecord'
1358 * is the callers locally stored value of the original value of
1359 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1360 * NOTE: 'nextrecord' may be NULL.
1362 static __inline void
1363 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1366 SOCKBUF_LOCK_ASSERT(sb);
1368 * First, update for the new value of nextrecord. If necessary, make
1369 * it the first record.
1371 if (sb->sb_mb != NULL)
1372 sb->sb_mb->m_nextpkt = nextrecord;
1374 sb->sb_mb = nextrecord;
1377 * Now update any dependent socket buffer fields to reflect the new
1378 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1379 * addition of a second clause that takes care of the case where
1380 * sb_mb has been updated, but remains the last record.
1382 if (sb->sb_mb == NULL) {
1383 sb->sb_mbtail = NULL;
1384 sb->sb_lastrecord = NULL;
1385 } else if (sb->sb_mb->m_nextpkt == NULL)
1386 sb->sb_lastrecord = sb->sb_mb;
1391 * Implement receive operations on a socket. We depend on the way that
1392 * records are added to the sockbuf by sbappend. In particular, each record
1393 * (mbufs linked through m_next) must begin with an address if the protocol
1394 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1395 * data, and then zero or more mbufs of data. In order to allow parallelism
1396 * between network receive and copying to user space, as well as avoid
1397 * sleeping with a mutex held, we release the socket buffer mutex during the
1398 * user space copy. Although the sockbuf is locked, new data may still be
1399 * appended, and thus we must maintain consistency of the sockbuf during that
1402 * The caller may receive the data as a single mbuf chain by supplying an
1403 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1404 * the count in uio_resid.
1407 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1408 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1410 struct mbuf *m, **mp;
1411 int flags, len, error, offset;
1412 struct protosw *pr = so->so_proto;
1413 struct mbuf *nextrecord;
1415 int orig_resid = uio->uio_resid;
1420 if (controlp != NULL)
1423 flags = *flagsp &~ MSG_EOR;
1426 if (flags & MSG_OOB)
1427 return (soreceive_rcvoob(so, uio, flags));
1430 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1432 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1434 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1439 SOCKBUF_LOCK(&so->so_rcv);
1440 m = so->so_rcv.sb_mb;
1442 * If we have less data than requested, block awaiting more (subject
1443 * to any timeout) if:
1444 * 1. the current count is less than the low water mark, or
1445 * 2. MSG_WAITALL is set, and it is possible to do the entire
1446 * receive operation at once if we block (resid <= hiwat).
1447 * 3. MSG_DONTWAIT is not set
1448 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1449 * we have to do the receive in sections, and thus risk returning a
1450 * short count if a timeout or signal occurs after we start.
1452 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1453 so->so_rcv.sb_cc < uio->uio_resid) &&
1454 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1455 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1456 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1457 KASSERT(m != NULL || !so->so_rcv.sb_cc,
1458 ("receive: m == %p so->so_rcv.sb_cc == %u",
1459 m, so->so_rcv.sb_cc));
1463 error = so->so_error;
1464 if ((flags & MSG_PEEK) == 0)
1466 SOCKBUF_UNLOCK(&so->so_rcv);
1469 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1470 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1472 SOCKBUF_UNLOCK(&so->so_rcv);
1477 for (; m != NULL; m = m->m_next)
1478 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1479 m = so->so_rcv.sb_mb;
1482 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1483 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1484 SOCKBUF_UNLOCK(&so->so_rcv);
1488 if (uio->uio_resid == 0) {
1489 SOCKBUF_UNLOCK(&so->so_rcv);
1492 if ((so->so_state & SS_NBIO) ||
1493 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1494 SOCKBUF_UNLOCK(&so->so_rcv);
1495 error = EWOULDBLOCK;
1498 SBLASTRECORDCHK(&so->so_rcv);
1499 SBLASTMBUFCHK(&so->so_rcv);
1500 error = sbwait(&so->so_rcv);
1501 SOCKBUF_UNLOCK(&so->so_rcv);
1508 * From this point onward, we maintain 'nextrecord' as a cache of the
1509 * pointer to the next record in the socket buffer. We must keep the
1510 * various socket buffer pointers and local stack versions of the
1511 * pointers in sync, pushing out modifications before dropping the
1512 * socket buffer mutex, and re-reading them when picking it up.
1514 * Otherwise, we will race with the network stack appending new data
1515 * or records onto the socket buffer by using inconsistent/stale
1516 * versions of the field, possibly resulting in socket buffer
1519 * By holding the high-level sblock(), we prevent simultaneous
1520 * readers from pulling off the front of the socket buffer.
1522 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1524 uio->uio_td->td_ru.ru_msgrcv++;
1525 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1526 SBLASTRECORDCHK(&so->so_rcv);
1527 SBLASTMBUFCHK(&so->so_rcv);
1528 nextrecord = m->m_nextpkt;
1529 if (pr->pr_flags & PR_ADDR) {
1530 KASSERT(m->m_type == MT_SONAME,
1531 ("m->m_type == %d", m->m_type));
1534 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1536 if (flags & MSG_PEEK) {
1539 sbfree(&so->so_rcv, m);
1540 so->so_rcv.sb_mb = m_free(m);
1541 m = so->so_rcv.sb_mb;
1542 sockbuf_pushsync(&so->so_rcv, nextrecord);
1547 * Process one or more MT_CONTROL mbufs present before any data mbufs
1548 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1549 * just copy the data; if !MSG_PEEK, we call into the protocol to
1550 * perform externalization (or freeing if controlp == NULL).
1552 if (m != NULL && m->m_type == MT_CONTROL) {
1553 struct mbuf *cm = NULL, *cmn;
1554 struct mbuf **cme = &cm;
1557 if (flags & MSG_PEEK) {
1558 if (controlp != NULL) {
1559 *controlp = m_copy(m, 0, m->m_len);
1560 controlp = &(*controlp)->m_next;
1564 sbfree(&so->so_rcv, m);
1565 so->so_rcv.sb_mb = m->m_next;
1568 cme = &(*cme)->m_next;
1569 m = so->so_rcv.sb_mb;
1571 } while (m != NULL && m->m_type == MT_CONTROL);
1572 if ((flags & MSG_PEEK) == 0)
1573 sockbuf_pushsync(&so->so_rcv, nextrecord);
1574 while (cm != NULL) {
1577 if (pr->pr_domain->dom_externalize != NULL) {
1578 SOCKBUF_UNLOCK(&so->so_rcv);
1579 error = (*pr->pr_domain->dom_externalize)
1581 SOCKBUF_LOCK(&so->so_rcv);
1582 } else if (controlp != NULL)
1586 if (controlp != NULL) {
1588 while (*controlp != NULL)
1589 controlp = &(*controlp)->m_next;
1594 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1596 nextrecord = so->so_rcv.sb_mb;
1600 if ((flags & MSG_PEEK) == 0) {
1601 KASSERT(m->m_nextpkt == nextrecord,
1602 ("soreceive: post-control, nextrecord !sync"));
1603 if (nextrecord == NULL) {
1604 KASSERT(so->so_rcv.sb_mb == m,
1605 ("soreceive: post-control, sb_mb!=m"));
1606 KASSERT(so->so_rcv.sb_lastrecord == m,
1607 ("soreceive: post-control, lastrecord!=m"));
1611 if (type == MT_OOBDATA)
1614 if ((flags & MSG_PEEK) == 0) {
1615 KASSERT(so->so_rcv.sb_mb == nextrecord,
1616 ("soreceive: sb_mb != nextrecord"));
1617 if (so->so_rcv.sb_mb == NULL) {
1618 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1619 ("soreceive: sb_lastercord != NULL"));
1623 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1624 SBLASTRECORDCHK(&so->so_rcv);
1625 SBLASTMBUFCHK(&so->so_rcv);
1628 * Now continue to read any data mbufs off of the head of the socket
1629 * buffer until the read request is satisfied. Note that 'type' is
1630 * used to store the type of any mbuf reads that have happened so far
1631 * such that soreceive() can stop reading if the type changes, which
1632 * causes soreceive() to return only one of regular data and inline
1633 * out-of-band data in a single socket receive operation.
1637 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1639 * If the type of mbuf has changed since the last mbuf
1640 * examined ('type'), end the receive operation.
1642 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1643 if (m->m_type == MT_OOBDATA) {
1644 if (type != MT_OOBDATA)
1646 } else if (type == MT_OOBDATA)
1649 KASSERT(m->m_type == MT_DATA,
1650 ("m->m_type == %d", m->m_type));
1651 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1652 len = uio->uio_resid;
1653 if (so->so_oobmark && len > so->so_oobmark - offset)
1654 len = so->so_oobmark - offset;
1655 if (len > m->m_len - moff)
1656 len = m->m_len - moff;
1658 * If mp is set, just pass back the mbufs. Otherwise copy
1659 * them out via the uio, then free. Sockbuf must be
1660 * consistent here (points to current mbuf, it points to next
1661 * record) when we drop priority; we must note any additions
1662 * to the sockbuf when we block interrupts again.
1665 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1666 SBLASTRECORDCHK(&so->so_rcv);
1667 SBLASTMBUFCHK(&so->so_rcv);
1668 SOCKBUF_UNLOCK(&so->so_rcv);
1669 #ifdef ZERO_COPY_SOCKETS
1670 if (so_zero_copy_receive) {
1673 if ((m->m_flags & M_EXT)
1674 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1679 error = uiomoveco(mtod(m, char *) + moff,
1683 #endif /* ZERO_COPY_SOCKETS */
1684 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1685 SOCKBUF_LOCK(&so->so_rcv);
1688 * The MT_SONAME mbuf has already been removed
1689 * from the record, so it is necessary to
1690 * remove the data mbufs, if any, to preserve
1691 * the invariant in the case of PR_ADDR that
1692 * requires MT_SONAME mbufs at the head of
1695 if (m && pr->pr_flags & PR_ATOMIC &&
1696 ((flags & MSG_PEEK) == 0))
1697 (void)sbdroprecord_locked(&so->so_rcv);
1698 SOCKBUF_UNLOCK(&so->so_rcv);
1702 uio->uio_resid -= len;
1703 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1704 if (len == m->m_len - moff) {
1705 if (m->m_flags & M_EOR)
1707 if (flags & MSG_PEEK) {
1711 nextrecord = m->m_nextpkt;
1712 sbfree(&so->so_rcv, m);
1716 so->so_rcv.sb_mb = m = m->m_next;
1719 so->so_rcv.sb_mb = m_free(m);
1720 m = so->so_rcv.sb_mb;
1722 sockbuf_pushsync(&so->so_rcv, nextrecord);
1723 SBLASTRECORDCHK(&so->so_rcv);
1724 SBLASTMBUFCHK(&so->so_rcv);
1727 if (flags & MSG_PEEK)
1733 if (flags & MSG_DONTWAIT)
1734 copy_flag = M_DONTWAIT;
1737 if (copy_flag == M_WAIT)
1738 SOCKBUF_UNLOCK(&so->so_rcv);
1739 *mp = m_copym(m, 0, len, copy_flag);
1740 if (copy_flag == M_WAIT)
1741 SOCKBUF_LOCK(&so->so_rcv);
1744 * m_copym() couldn't
1745 * allocate an mbuf. Adjust
1746 * uio_resid back (it was
1747 * adjusted down by len
1748 * bytes, which we didn't end
1749 * up "copying" over).
1751 uio->uio_resid += len;
1757 so->so_rcv.sb_cc -= len;
1760 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1761 if (so->so_oobmark) {
1762 if ((flags & MSG_PEEK) == 0) {
1763 so->so_oobmark -= len;
1764 if (so->so_oobmark == 0) {
1765 so->so_rcv.sb_state |= SBS_RCVATMARK;
1770 if (offset == so->so_oobmark)
1774 if (flags & MSG_EOR)
1777 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1778 * must not quit until "uio->uio_resid == 0" or an error
1779 * termination. If a signal/timeout occurs, return with a
1780 * short count but without error. Keep sockbuf locked
1781 * against other readers.
1783 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1784 !sosendallatonce(so) && nextrecord == NULL) {
1785 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1786 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1789 * Notify the protocol that some data has been
1790 * drained before blocking.
1792 if (pr->pr_flags & PR_WANTRCVD) {
1793 SOCKBUF_UNLOCK(&so->so_rcv);
1794 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1795 SOCKBUF_LOCK(&so->so_rcv);
1797 SBLASTRECORDCHK(&so->so_rcv);
1798 SBLASTMBUFCHK(&so->so_rcv);
1799 error = sbwait(&so->so_rcv);
1801 SOCKBUF_UNLOCK(&so->so_rcv);
1804 m = so->so_rcv.sb_mb;
1806 nextrecord = m->m_nextpkt;
1810 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1811 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1813 if ((flags & MSG_PEEK) == 0)
1814 (void) sbdroprecord_locked(&so->so_rcv);
1816 if ((flags & MSG_PEEK) == 0) {
1819 * First part is an inline SB_EMPTY_FIXUP(). Second
1820 * part makes sure sb_lastrecord is up-to-date if
1821 * there is still data in the socket buffer.
1823 so->so_rcv.sb_mb = nextrecord;
1824 if (so->so_rcv.sb_mb == NULL) {
1825 so->so_rcv.sb_mbtail = NULL;
1826 so->so_rcv.sb_lastrecord = NULL;
1827 } else if (nextrecord->m_nextpkt == NULL)
1828 so->so_rcv.sb_lastrecord = nextrecord;
1830 SBLASTRECORDCHK(&so->so_rcv);
1831 SBLASTMBUFCHK(&so->so_rcv);
1833 * If soreceive() is being done from the socket callback,
1834 * then don't need to generate ACK to peer to update window,
1835 * since ACK will be generated on return to TCP.
1837 if (!(flags & MSG_SOCALLBCK) &&
1838 (pr->pr_flags & PR_WANTRCVD)) {
1839 SOCKBUF_UNLOCK(&so->so_rcv);
1840 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1841 SOCKBUF_LOCK(&so->so_rcv);
1844 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1845 if (orig_resid == uio->uio_resid && orig_resid &&
1846 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1847 SOCKBUF_UNLOCK(&so->so_rcv);
1850 SOCKBUF_UNLOCK(&so->so_rcv);
1855 sbunlock(&so->so_rcv);
1860 * Optimized version of soreceive() for stream (TCP) sockets.
1863 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
1864 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1866 int len = 0, error = 0, flags, oresid;
1868 struct mbuf *m, *n = NULL;
1870 /* We only do stream sockets. */
1871 if (so->so_type != SOCK_STREAM)
1875 if (controlp != NULL)
1878 flags = *flagsp &~ MSG_EOR;
1881 if (flags & MSG_OOB)
1882 return (soreceive_rcvoob(so, uio, flags));
1888 /* Prevent other readers from entering the socket. */
1889 error = sblock(sb, SBLOCKWAIT(flags));
1894 /* Easy one, no space to copyout anything. */
1895 if (uio->uio_resid == 0) {
1899 oresid = uio->uio_resid;
1901 /* We will never ever get anything unless we are connected. */
1902 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1903 /* When disconnecting there may be still some data left. */
1906 if (!(so->so_state & SS_ISDISCONNECTED))
1911 /* Socket buffer is empty and we shall not block. */
1912 if (sb->sb_cc == 0 &&
1913 ((sb->sb_flags & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
1919 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1921 /* Abort if socket has reported problems. */
1925 if (oresid > uio->uio_resid)
1927 error = so->so_error;
1928 if (!(flags & MSG_PEEK))
1933 /* Door is closed. Deliver what is left, if any. */
1934 if (sb->sb_state & SBS_CANTRCVMORE) {
1941 /* Socket buffer got some data that we shall deliver now. */
1942 if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
1943 ((sb->sb_flags & SS_NBIO) ||
1944 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
1945 sb->sb_cc >= sb->sb_lowat ||
1946 sb->sb_cc >= uio->uio_resid ||
1947 sb->sb_cc >= sb->sb_hiwat) ) {
1951 /* On MSG_WAITALL we must wait until all data or error arrives. */
1952 if ((flags & MSG_WAITALL) &&
1953 (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
1957 * Wait and block until (more) data comes in.
1958 * NB: Drops the sockbuf lock during wait.
1966 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1967 KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
1968 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
1972 uio->uio_td->td_ru.ru_msgrcv++;
1974 /* Fill uio until full or current end of socket buffer is reached. */
1975 len = min(uio->uio_resid, sb->sb_cc);
1977 /* Dequeue as many mbufs as possible. */
1978 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
1979 for (*mp0 = m = sb->sb_mb;
1980 m != NULL && m->m_len <= len;
1983 uio->uio_resid -= m->m_len;
1988 if (sb->sb_mb == NULL)
1992 /* Copy the remainder. */
1994 KASSERT(sb->sb_mb != NULL,
1995 ("%s: len > 0 && sb->sb_mb empty", __func__));
1997 m = m_copym(sb->sb_mb, 0, len, M_DONTWAIT);
1999 len = 0; /* Don't flush data from sockbuf. */
2001 uio->uio_resid -= m->m_len;
2012 /* NB: Must unlock socket buffer as uiomove may sleep. */
2014 error = m_mbuftouio(uio, sb->sb_mb, len);
2019 SBLASTRECORDCHK(sb);
2023 * Remove the delivered data from the socket buffer unless we
2024 * were only peeking.
2026 if (!(flags & MSG_PEEK)) {
2028 sbdrop_locked(sb, len);
2030 /* Notify protocol that we drained some data. */
2031 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2032 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2033 !(flags & MSG_SOCALLBCK))) {
2035 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2041 * For MSG_WAITALL we may have to loop again and wait for
2042 * more data to come in.
2044 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2047 SOCKBUF_LOCK_ASSERT(sb);
2048 SBLASTRECORDCHK(sb);
2056 * Optimized version of soreceive() for simple datagram cases from userspace.
2057 * Unlike in the stream case, we're able to drop a datagram if copyout()
2058 * fails, and because we handle datagrams atomically, we don't need to use a
2059 * sleep lock to prevent I/O interlacing.
2062 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2063 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2065 struct mbuf *m, *m2;
2066 int flags, len, error;
2067 struct protosw *pr = so->so_proto;
2068 struct mbuf *nextrecord;
2072 if (controlp != NULL)
2075 flags = *flagsp &~ MSG_EOR;
2080 * For any complicated cases, fall back to the full
2081 * soreceive_generic().
2083 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2084 return (soreceive_generic(so, psa, uio, mp0, controlp,
2088 * Enforce restrictions on use.
2090 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2091 ("soreceive_dgram: wantrcvd"));
2092 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2093 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2094 ("soreceive_dgram: SBS_RCVATMARK"));
2095 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2096 ("soreceive_dgram: P_CONNREQUIRED"));
2099 * Loop blocking while waiting for a datagram.
2101 SOCKBUF_LOCK(&so->so_rcv);
2102 while ((m = so->so_rcv.sb_mb) == NULL) {
2103 KASSERT(so->so_rcv.sb_cc == 0,
2104 ("soreceive_dgram: sb_mb NULL but sb_cc %u",
2107 error = so->so_error;
2109 SOCKBUF_UNLOCK(&so->so_rcv);
2112 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2113 uio->uio_resid == 0) {
2114 SOCKBUF_UNLOCK(&so->so_rcv);
2117 if ((so->so_state & SS_NBIO) ||
2118 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2119 SOCKBUF_UNLOCK(&so->so_rcv);
2120 return (EWOULDBLOCK);
2122 SBLASTRECORDCHK(&so->so_rcv);
2123 SBLASTMBUFCHK(&so->so_rcv);
2124 error = sbwait(&so->so_rcv);
2126 SOCKBUF_UNLOCK(&so->so_rcv);
2130 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2133 uio->uio_td->td_ru.ru_msgrcv++;
2134 SBLASTRECORDCHK(&so->so_rcv);
2135 SBLASTMBUFCHK(&so->so_rcv);
2136 nextrecord = m->m_nextpkt;
2137 if (nextrecord == NULL) {
2138 KASSERT(so->so_rcv.sb_lastrecord == m,
2139 ("soreceive_dgram: lastrecord != m"));
2142 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2143 ("soreceive_dgram: m_nextpkt != nextrecord"));
2146 * Pull 'm' and its chain off the front of the packet queue.
2148 so->so_rcv.sb_mb = NULL;
2149 sockbuf_pushsync(&so->so_rcv, nextrecord);
2152 * Walk 'm's chain and free that many bytes from the socket buffer.
2154 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2155 sbfree(&so->so_rcv, m2);
2158 * Do a few last checks before we let go of the lock.
2160 SBLASTRECORDCHK(&so->so_rcv);
2161 SBLASTMBUFCHK(&so->so_rcv);
2162 SOCKBUF_UNLOCK(&so->so_rcv);
2164 if (pr->pr_flags & PR_ADDR) {
2165 KASSERT(m->m_type == MT_SONAME,
2166 ("m->m_type == %d", m->m_type));
2168 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2173 /* XXXRW: Can this happen? */
2178 * Packet to copyout() is now in 'm' and it is disconnected from the
2181 * Process one or more MT_CONTROL mbufs present before any data mbufs
2182 * in the first mbuf chain on the socket buffer. We call into the
2183 * protocol to perform externalization (or freeing if controlp ==
2186 if (m->m_type == MT_CONTROL) {
2187 struct mbuf *cm = NULL, *cmn;
2188 struct mbuf **cme = &cm;
2194 cme = &(*cme)->m_next;
2196 } while (m != NULL && m->m_type == MT_CONTROL);
2197 while (cm != NULL) {
2200 if (pr->pr_domain->dom_externalize != NULL) {
2201 error = (*pr->pr_domain->dom_externalize)
2203 } else if (controlp != NULL)
2207 if (controlp != NULL) {
2208 while (*controlp != NULL)
2209 controlp = &(*controlp)->m_next;
2214 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data"));
2216 while (m != NULL && uio->uio_resid > 0) {
2217 len = uio->uio_resid;
2220 error = uiomove(mtod(m, char *), (int)len, uio);
2236 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2237 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2240 return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2245 soshutdown(struct socket *so, int how)
2247 struct protosw *pr = so->so_proto;
2250 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2252 if (pr->pr_usrreqs->pru_flush != NULL) {
2253 (*pr->pr_usrreqs->pru_flush)(so, how);
2257 if (how != SHUT_RD) {
2258 CURVNET_SET(so->so_vnet);
2259 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2267 sorflush(struct socket *so)
2269 struct sockbuf *sb = &so->so_rcv;
2270 struct protosw *pr = so->so_proto;
2274 * In order to avoid calling dom_dispose with the socket buffer mutex
2275 * held, and in order to generally avoid holding the lock for a long
2276 * time, we make a copy of the socket buffer and clear the original
2277 * (except locks, state). The new socket buffer copy won't have
2278 * initialized locks so we can only call routines that won't use or
2279 * assert those locks.
2281 * Dislodge threads currently blocked in receive and wait to acquire
2282 * a lock against other simultaneous readers before clearing the
2283 * socket buffer. Don't let our acquire be interrupted by a signal
2284 * despite any existing socket disposition on interruptable waiting.
2286 CURVNET_SET(so->so_vnet);
2288 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2291 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2292 * and mutex data unchanged.
2295 bzero(&asb, offsetof(struct sockbuf, sb_startzero));
2296 bcopy(&sb->sb_startzero, &asb.sb_startzero,
2297 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2298 bzero(&sb->sb_startzero,
2299 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2304 * Dispose of special rights and flush the socket buffer. Don't call
2305 * any unsafe routines (that rely on locks being initialized) on asb.
2307 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2308 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
2309 sbrelease_internal(&asb, so);
2314 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2315 * additional variant to handle the case where the option value needs to be
2316 * some kind of integer, but not a specific size. In addition to their use
2317 * here, these functions are also called by the protocol-level pr_ctloutput()
2321 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2326 * If the user gives us more than we wanted, we ignore it, but if we
2327 * don't get the minimum length the caller wants, we return EINVAL.
2328 * On success, sopt->sopt_valsize is set to however much we actually
2331 if ((valsize = sopt->sopt_valsize) < minlen)
2334 sopt->sopt_valsize = valsize = len;
2336 if (sopt->sopt_td != NULL)
2337 return (copyin(sopt->sopt_val, buf, valsize));
2339 bcopy(sopt->sopt_val, buf, valsize);
2344 * Kernel version of setsockopt(2).
2346 * XXX: optlen is size_t, not socklen_t
2349 so_setsockopt(struct socket *so, int level, int optname, void *optval,
2352 struct sockopt sopt;
2354 sopt.sopt_level = level;
2355 sopt.sopt_name = optname;
2356 sopt.sopt_dir = SOPT_SET;
2357 sopt.sopt_val = optval;
2358 sopt.sopt_valsize = optlen;
2359 sopt.sopt_td = NULL;
2360 return (sosetopt(so, &sopt));
2364 sosetopt(struct socket *so, struct sockopt *sopt)
2375 if (sopt->sopt_level != SOL_SOCKET) {
2376 if (so->so_proto && so->so_proto->pr_ctloutput)
2377 return ((*so->so_proto->pr_ctloutput)
2379 error = ENOPROTOOPT;
2381 switch (sopt->sopt_name) {
2383 case SO_ACCEPTFILTER:
2384 error = do_setopt_accept_filter(so, sopt);
2390 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2395 so->so_linger = l.l_linger;
2397 so->so_options |= SO_LINGER;
2399 so->so_options &= ~SO_LINGER;
2406 case SO_USELOOPBACK:
2416 error = sooptcopyin(sopt, &optval, sizeof optval,
2422 so->so_options |= sopt->sopt_name;
2424 so->so_options &= ~sopt->sopt_name;
2429 error = sooptcopyin(sopt, &optval, sizeof optval,
2431 if (optval < 1 || optval > rt_numfibs) {
2435 if ((so->so_proto->pr_domain->dom_family == PF_INET) ||
2436 (so->so_proto->pr_domain->dom_family == PF_ROUTE)) {
2437 so->so_fibnum = optval;
2438 /* Note: ignore error */
2439 if (so->so_proto && so->so_proto->pr_ctloutput)
2440 (*so->so_proto->pr_ctloutput)(so, sopt);
2449 error = sooptcopyin(sopt, &optval, sizeof optval,
2455 * Values < 1 make no sense for any of these options,
2463 switch (sopt->sopt_name) {
2466 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2467 &so->so_snd : &so->so_rcv, (u_long)optval,
2468 so, curthread) == 0) {
2472 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2473 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2477 * Make sure the low-water is never greater than the
2481 SOCKBUF_LOCK(&so->so_snd);
2482 so->so_snd.sb_lowat =
2483 (optval > so->so_snd.sb_hiwat) ?
2484 so->so_snd.sb_hiwat : optval;
2485 SOCKBUF_UNLOCK(&so->so_snd);
2488 SOCKBUF_LOCK(&so->so_rcv);
2489 so->so_rcv.sb_lowat =
2490 (optval > so->so_rcv.sb_hiwat) ?
2491 so->so_rcv.sb_hiwat : optval;
2492 SOCKBUF_UNLOCK(&so->so_rcv);
2499 #ifdef COMPAT_FREEBSD32
2500 if (SV_CURPROC_FLAG(SV_ILP32)) {
2501 struct timeval32 tv32;
2503 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2505 CP(tv32, tv, tv_sec);
2506 CP(tv32, tv, tv_usec);
2509 error = sooptcopyin(sopt, &tv, sizeof tv,
2514 /* assert(hz > 0); */
2515 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2516 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2520 /* assert(tick > 0); */
2521 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2522 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
2523 if (val > INT_MAX) {
2527 if (val == 0 && tv.tv_usec != 0)
2530 switch (sopt->sopt_name) {
2532 so->so_snd.sb_timeo = val;
2535 so->so_rcv.sb_timeo = val;
2542 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2546 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2554 error = ENOPROTOOPT;
2557 if (error == 0 && so->so_proto != NULL &&
2558 so->so_proto->pr_ctloutput != NULL) {
2559 (void) ((*so->so_proto->pr_ctloutput)
2568 * Helper routine for getsockopt.
2571 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2579 * Documented get behavior is that we always return a value, possibly
2580 * truncated to fit in the user's buffer. Traditional behavior is
2581 * that we always tell the user precisely how much we copied, rather
2582 * than something useful like the total amount we had available for
2583 * her. Note that this interface is not idempotent; the entire
2584 * answer must generated ahead of time.
2586 valsize = min(len, sopt->sopt_valsize);
2587 sopt->sopt_valsize = valsize;
2588 if (sopt->sopt_val != NULL) {
2589 if (sopt->sopt_td != NULL)
2590 error = copyout(buf, sopt->sopt_val, valsize);
2592 bcopy(buf, sopt->sopt_val, valsize);
2598 sogetopt(struct socket *so, struct sockopt *sopt)
2608 if (sopt->sopt_level != SOL_SOCKET) {
2609 if (so->so_proto && so->so_proto->pr_ctloutput) {
2610 return ((*so->so_proto->pr_ctloutput)
2613 return (ENOPROTOOPT);
2615 switch (sopt->sopt_name) {
2617 case SO_ACCEPTFILTER:
2618 error = do_getopt_accept_filter(so, sopt);
2623 l.l_onoff = so->so_options & SO_LINGER;
2624 l.l_linger = so->so_linger;
2626 error = sooptcopyout(sopt, &l, sizeof l);
2629 case SO_USELOOPBACK:
2641 optval = so->so_options & sopt->sopt_name;
2643 error = sooptcopyout(sopt, &optval, sizeof optval);
2647 optval = so->so_type;
2652 optval = so->so_error;
2658 optval = so->so_snd.sb_hiwat;
2662 optval = so->so_rcv.sb_hiwat;
2666 optval = so->so_snd.sb_lowat;
2670 optval = so->so_rcv.sb_lowat;
2675 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2676 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2678 tv.tv_sec = optval / hz;
2679 tv.tv_usec = (optval % hz) * tick;
2680 #ifdef COMPAT_FREEBSD32
2681 if (SV_CURPROC_FLAG(SV_ILP32)) {
2682 struct timeval32 tv32;
2684 CP(tv, tv32, tv_sec);
2685 CP(tv, tv32, tv_usec);
2686 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2689 error = sooptcopyout(sopt, &tv, sizeof tv);
2694 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2698 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2702 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2710 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2714 error = mac_getsockopt_peerlabel(
2715 sopt->sopt_td->td_ucred, so, &extmac);
2718 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2724 case SO_LISTENQLIMIT:
2725 optval = so->so_qlimit;
2729 optval = so->so_qlen;
2732 case SO_LISTENINCQLEN:
2733 optval = so->so_incqlen;
2737 error = ENOPROTOOPT;
2744 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2746 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2748 struct mbuf *m, *m_prev;
2749 int sopt_size = sopt->sopt_valsize;
2751 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2754 if (sopt_size > MLEN) {
2755 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT);
2756 if ((m->m_flags & M_EXT) == 0) {
2760 m->m_len = min(MCLBYTES, sopt_size);
2762 m->m_len = min(MLEN, sopt_size);
2764 sopt_size -= m->m_len;
2769 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2774 if (sopt_size > MLEN) {
2775 MCLGET(m, sopt->sopt_td != NULL ? M_WAIT :
2777 if ((m->m_flags & M_EXT) == 0) {
2782 m->m_len = min(MCLBYTES, sopt_size);
2784 m->m_len = min(MLEN, sopt_size);
2786 sopt_size -= m->m_len;
2793 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2795 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2797 struct mbuf *m0 = m;
2799 if (sopt->sopt_val == NULL)
2801 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2802 if (sopt->sopt_td != NULL) {
2805 error = copyin(sopt->sopt_val, mtod(m, char *),
2812 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2813 sopt->sopt_valsize -= m->m_len;
2814 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2817 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2818 panic("ip6_sooptmcopyin");
2822 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2824 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2826 struct mbuf *m0 = m;
2829 if (sopt->sopt_val == NULL)
2831 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2832 if (sopt->sopt_td != NULL) {
2835 error = copyout(mtod(m, char *), sopt->sopt_val,
2842 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2843 sopt->sopt_valsize -= m->m_len;
2844 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2845 valsize += m->m_len;
2849 /* enough soopt buffer should be given from user-land */
2853 sopt->sopt_valsize = valsize;
2858 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2859 * out-of-band data, which will then notify socket consumers.
2862 sohasoutofband(struct socket *so)
2865 if (so->so_sigio != NULL)
2866 pgsigio(&so->so_sigio, SIGURG, 0);
2867 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2871 sopoll(struct socket *so, int events, struct ucred *active_cred,
2875 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
2880 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
2885 SOCKBUF_LOCK(&so->so_snd);
2886 SOCKBUF_LOCK(&so->so_rcv);
2887 if (events & (POLLIN | POLLRDNORM))
2888 if (soreadabledata(so))
2889 revents |= events & (POLLIN | POLLRDNORM);
2891 if (events & (POLLOUT | POLLWRNORM))
2892 if (sowriteable(so))
2893 revents |= events & (POLLOUT | POLLWRNORM);
2895 if (events & (POLLPRI | POLLRDBAND))
2896 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2897 revents |= events & (POLLPRI | POLLRDBAND);
2899 if ((events & POLLINIGNEOF) == 0) {
2900 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2901 revents |= events & (POLLIN | POLLRDNORM);
2902 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
2908 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
2909 selrecord(td, &so->so_rcv.sb_sel);
2910 so->so_rcv.sb_flags |= SB_SEL;
2913 if (events & (POLLOUT | POLLWRNORM)) {
2914 selrecord(td, &so->so_snd.sb_sel);
2915 so->so_snd.sb_flags |= SB_SEL;
2919 SOCKBUF_UNLOCK(&so->so_rcv);
2920 SOCKBUF_UNLOCK(&so->so_snd);
2925 soo_kqfilter(struct file *fp, struct knote *kn)
2927 struct socket *so = kn->kn_fp->f_data;
2930 switch (kn->kn_filter) {
2932 if (so->so_options & SO_ACCEPTCONN)
2933 kn->kn_fop = &solisten_filtops;
2935 kn->kn_fop = &soread_filtops;
2939 kn->kn_fop = &sowrite_filtops;
2947 knlist_add(&sb->sb_sel.si_note, kn, 1);
2948 sb->sb_flags |= SB_KNOTE;
2954 * Some routines that return EOPNOTSUPP for entry points that are not
2955 * supported by a protocol. Fill in as needed.
2958 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
2965 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
2972 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2979 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2986 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
2993 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
2994 struct ifnet *ifp, struct thread *td)
3001 pru_disconnect_notsupp(struct socket *so)
3008 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3015 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3022 pru_rcvd_notsupp(struct socket *so, int flags)
3029 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3036 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3037 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3044 * This isn't really a ``null'' operation, but it's the default one and
3045 * doesn't do anything destructive.
3048 pru_sense_null(struct socket *so, struct stat *sb)
3051 sb->st_blksize = so->so_snd.sb_hiwat;
3056 pru_shutdown_notsupp(struct socket *so)
3063 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3070 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3071 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3078 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3079 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3086 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3094 filt_sordetach(struct knote *kn)
3096 struct socket *so = kn->kn_fp->f_data;
3098 SOCKBUF_LOCK(&so->so_rcv);
3099 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
3100 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
3101 so->so_rcv.sb_flags &= ~SB_KNOTE;
3102 SOCKBUF_UNLOCK(&so->so_rcv);
3107 filt_soread(struct knote *kn, long hint)
3111 so = kn->kn_fp->f_data;
3112 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3114 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
3115 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3116 kn->kn_flags |= EV_EOF;
3117 kn->kn_fflags = so->so_error;
3119 } else if (so->so_error) /* temporary udp error */
3121 else if (kn->kn_sfflags & NOTE_LOWAT)
3122 return (kn->kn_data >= kn->kn_sdata);
3124 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
3128 filt_sowdetach(struct knote *kn)
3130 struct socket *so = kn->kn_fp->f_data;
3132 SOCKBUF_LOCK(&so->so_snd);
3133 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
3134 if (knlist_empty(&so->so_snd.sb_sel.si_note))
3135 so->so_snd.sb_flags &= ~SB_KNOTE;
3136 SOCKBUF_UNLOCK(&so->so_snd);
3141 filt_sowrite(struct knote *kn, long hint)
3145 so = kn->kn_fp->f_data;
3146 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3147 kn->kn_data = sbspace(&so->so_snd);
3148 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3149 kn->kn_flags |= EV_EOF;
3150 kn->kn_fflags = so->so_error;
3152 } else if (so->so_error) /* temporary udp error */
3154 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3155 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3157 else if (kn->kn_sfflags & NOTE_LOWAT)
3158 return (kn->kn_data >= kn->kn_sdata);
3160 return (kn->kn_data >= so->so_snd.sb_lowat);
3165 filt_solisten(struct knote *kn, long hint)
3167 struct socket *so = kn->kn_fp->f_data;
3169 kn->kn_data = so->so_qlen;
3170 return (! TAILQ_EMPTY(&so->so_comp));
3174 socheckuid(struct socket *so, uid_t uid)
3179 if (so->so_cred->cr_uid != uid)
3185 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
3191 error = sysctl_handle_int(oidp, &val, 0, req);
3192 if (error || !req->newptr )
3195 if (val < 1 || val > USHRT_MAX)
3203 * These functions are used by protocols to notify the socket layer (and its
3204 * consumers) of state changes in the sockets driven by protocol-side events.
3208 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3210 * Normal sequence from the active (originating) side is that
3211 * soisconnecting() is called during processing of connect() call, resulting
3212 * in an eventual call to soisconnected() if/when the connection is
3213 * established. When the connection is torn down soisdisconnecting() is
3214 * called during processing of disconnect() call, and soisdisconnected() is
3215 * called when the connection to the peer is totally severed. The semantics
3216 * of these routines are such that connectionless protocols can call
3217 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3218 * calls when setting up a ``connection'' takes no time.
3220 * From the passive side, a socket is created with two queues of sockets:
3221 * so_incomp for connections in progress and so_comp for connections already
3222 * made and awaiting user acceptance. As a protocol is preparing incoming
3223 * connections, it creates a socket structure queued on so_incomp by calling
3224 * sonewconn(). When the connection is established, soisconnected() is
3225 * called, and transfers the socket structure to so_comp, making it available
3228 * If a socket is closed with sockets on either so_incomp or so_comp, these
3229 * sockets are dropped.
3231 * If higher-level protocols are implemented in the kernel, the wakeups done
3232 * here will sometimes cause software-interrupt process scheduling.
3235 soisconnecting(struct socket *so)
3239 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3240 so->so_state |= SS_ISCONNECTING;
3245 soisconnected(struct socket *so)
3247 struct socket *head;
3253 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3254 so->so_state |= SS_ISCONNECTED;
3256 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3257 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3259 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3261 so->so_qstate &= ~SQ_INCOMP;
3262 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3264 so->so_qstate |= SQ_COMP;
3267 wakeup_one(&head->so_timeo);
3270 soupcall_set(so, SO_RCV,
3271 head->so_accf->so_accept_filter->accf_callback,
3272 head->so_accf->so_accept_filter_arg);
3273 so->so_options &= ~SO_ACCEPTFILTER;
3274 ret = head->so_accf->so_accept_filter->accf_callback(so,
3275 head->so_accf->so_accept_filter_arg, M_DONTWAIT);
3276 if (ret == SU_ISCONNECTED)
3277 soupcall_clear(so, SO_RCV);
3279 if (ret == SU_ISCONNECTED)
3286 wakeup(&so->so_timeo);
3292 soisdisconnecting(struct socket *so)
3296 * Note: This code assumes that SOCK_LOCK(so) and
3297 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3299 SOCKBUF_LOCK(&so->so_rcv);
3300 so->so_state &= ~SS_ISCONNECTING;
3301 so->so_state |= SS_ISDISCONNECTING;
3302 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3303 sorwakeup_locked(so);
3304 SOCKBUF_LOCK(&so->so_snd);
3305 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3306 sowwakeup_locked(so);
3307 wakeup(&so->so_timeo);
3311 soisdisconnected(struct socket *so)
3315 * Note: This code assumes that SOCK_LOCK(so) and
3316 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3318 SOCKBUF_LOCK(&so->so_rcv);
3319 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3320 so->so_state |= SS_ISDISCONNECTED;
3321 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3322 sorwakeup_locked(so);
3323 SOCKBUF_LOCK(&so->so_snd);
3324 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3325 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
3326 sowwakeup_locked(so);
3327 wakeup(&so->so_timeo);
3331 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3334 sodupsockaddr(const struct sockaddr *sa, int mflags)
3336 struct sockaddr *sa2;
3338 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3340 bcopy(sa, sa2, sa->sa_len);
3345 * Register per-socket buffer upcalls.
3348 soupcall_set(struct socket *so, int which,
3349 int (*func)(struct socket *, void *, int), void *arg)
3361 panic("soupcall_set: bad which");
3363 SOCKBUF_LOCK_ASSERT(sb);
3365 /* XXX: accf_http actually wants to do this on purpose. */
3366 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
3368 sb->sb_upcall = func;
3369 sb->sb_upcallarg = arg;
3370 sb->sb_flags |= SB_UPCALL;
3374 soupcall_clear(struct socket *so, int which)
3386 panic("soupcall_clear: bad which");
3388 SOCKBUF_LOCK_ASSERT(sb);
3389 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
3390 sb->sb_upcall = NULL;
3391 sb->sb_upcallarg = NULL;
3392 sb->sb_flags &= ~SB_UPCALL;
3396 * Create an external-format (``xsocket'') structure using the information in
3397 * the kernel-format socket structure pointed to by so. This is done to
3398 * reduce the spew of irrelevant information over this interface, to isolate
3399 * user code from changes in the kernel structure, and potentially to provide
3400 * information-hiding if we decide that some of this information should be
3401 * hidden from users.
3404 sotoxsocket(struct socket *so, struct xsocket *xso)
3407 xso->xso_len = sizeof *xso;
3409 xso->so_type = so->so_type;
3410 xso->so_options = so->so_options;
3411 xso->so_linger = so->so_linger;
3412 xso->so_state = so->so_state;
3413 xso->so_pcb = so->so_pcb;
3414 xso->xso_protocol = so->so_proto->pr_protocol;
3415 xso->xso_family = so->so_proto->pr_domain->dom_family;
3416 xso->so_qlen = so->so_qlen;
3417 xso->so_incqlen = so->so_incqlen;
3418 xso->so_qlimit = so->so_qlimit;
3419 xso->so_timeo = so->so_timeo;
3420 xso->so_error = so->so_error;
3421 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3422 xso->so_oobmark = so->so_oobmark;
3423 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3424 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3425 xso->so_uid = so->so_cred->cr_uid;
3430 * Socket accessor functions to provide external consumers with
3431 * a safe interface to socket state
3436 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg)
3439 TAILQ_FOREACH(so, &so->so_comp, so_list)
3444 so_sockbuf_rcv(struct socket *so)
3447 return (&so->so_rcv);
3451 so_sockbuf_snd(struct socket *so)
3454 return (&so->so_snd);
3458 so_state_get(const struct socket *so)
3461 return (so->so_state);
3465 so_state_set(struct socket *so, int val)
3472 so_options_get(const struct socket *so)
3475 return (so->so_options);
3479 so_options_set(struct socket *so, int val)
3482 so->so_options = val;
3486 so_error_get(const struct socket *so)
3489 return (so->so_error);
3493 so_error_set(struct socket *so, int val)
3500 so_linger_get(const struct socket *so)
3503 return (so->so_linger);
3507 so_linger_set(struct socket *so, int val)
3510 so->so_linger = val;
3514 so_protosw_get(const struct socket *so)
3517 return (so->so_proto);
3521 so_protosw_set(struct socket *so, struct protosw *val)
3528 so_sorwakeup(struct socket *so)
3535 so_sowwakeup(struct socket *so)
3542 so_sorwakeup_locked(struct socket *so)
3545 sorwakeup_locked(so);
3549 so_sowwakeup_locked(struct socket *so)
3552 sowwakeup_locked(so);
3556 so_lock(struct socket *so)
3562 so_unlock(struct socket *so)