2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
36 * Comments on the socket life cycle:
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
103 #include <sys/cdefs.h>
104 __FBSDID("$FreeBSD$");
106 #include "opt_inet.h"
107 #include "opt_inet6.h"
108 #include "opt_zero.h"
109 #include "opt_compat.h"
111 #include <sys/param.h>
112 #include <sys/systm.h>
113 #include <sys/fcntl.h>
114 #include <sys/limits.h>
115 #include <sys/lock.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/domain.h>
121 #include <sys/file.h> /* for struct knote */
122 #include <sys/kernel.h>
123 #include <sys/event.h>
124 #include <sys/eventhandler.h>
125 #include <sys/poll.h>
126 #include <sys/proc.h>
127 #include <sys/protosw.h>
128 #include <sys/socket.h>
129 #include <sys/socketvar.h>
130 #include <sys/resourcevar.h>
131 #include <net/route.h>
132 #include <sys/signalvar.h>
133 #include <sys/stat.h>
135 #include <sys/sysctl.h>
137 #include <sys/jail.h>
138 #include <sys/syslog.h>
140 #include <net/vnet.h>
142 #include <security/mac/mac_framework.h>
146 #ifdef COMPAT_FREEBSD32
147 #include <sys/mount.h>
148 #include <sys/sysent.h>
149 #include <compat/freebsd32/freebsd32.h>
152 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
155 static void filt_sordetach(struct knote *kn);
156 static int filt_soread(struct knote *kn, long hint);
157 static void filt_sowdetach(struct knote *kn);
158 static int filt_sowrite(struct knote *kn, long hint);
159 static int filt_solisten(struct knote *kn, long hint);
161 static struct filterops solisten_filtops = {
163 .f_detach = filt_sordetach,
164 .f_event = filt_solisten,
166 static struct filterops soread_filtops = {
168 .f_detach = filt_sordetach,
169 .f_event = filt_soread,
171 static struct filterops sowrite_filtops = {
173 .f_detach = filt_sowdetach,
174 .f_event = filt_sowrite,
177 so_gen_t so_gencnt; /* generation count for sockets */
179 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
180 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
182 #define VNET_SO_ASSERT(so) \
183 VNET_ASSERT(curvnet != NULL, \
184 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
187 * Limit on the number of connections in the listen queue waiting
189 * NB: The orginal sysctl somaxconn is still available but hidden
190 * to prevent confusion about the actual purpose of this number.
192 static int somaxconn = SOMAXCONN;
195 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
201 error = sysctl_handle_int(oidp, &val, 0, req);
202 if (error || !req->newptr )
205 if (val < 1 || val > USHRT_MAX)
211 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW,
212 0, sizeof(int), sysctl_somaxconn, "I",
213 "Maximum listen socket pending connection accept queue size");
214 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
215 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP,
216 0, sizeof(int), sysctl_somaxconn, "I",
217 "Maximum listen socket pending connection accept queue size (compat)");
219 static int numopensockets;
220 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
221 &numopensockets, 0, "Number of open sockets");
223 #if defined(SOCKET_SEND_COW) || defined(SOCKET_RECV_PFLIP)
224 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
225 "Zero copy controls");
226 #ifdef SOCKET_RECV_PFLIP
227 int so_zero_copy_receive = 1;
228 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
229 &so_zero_copy_receive, 0, "Enable zero copy receive");
231 #ifdef SOCKET_SEND_COW
232 int so_zero_copy_send = 1;
233 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
234 &so_zero_copy_send, 0, "Enable zero copy send");
235 #endif /* SOCKET_SEND_COW */
236 #endif /* SOCKET_SEND_COW || SOCKET_RECV_PFLIP */
239 * accept_mtx locks down per-socket fields relating to accept queues. See
240 * socketvar.h for an annotation of the protected fields of struct socket.
242 struct mtx accept_mtx;
243 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
246 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
249 static struct mtx so_global_mtx;
250 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
253 * General IPC sysctl name space, used by sockets and a variety of other IPC
256 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
259 * Initialize the socket subsystem and set up the socket
262 static uma_zone_t socket_zone;
266 socket_zone_change(void *tag)
269 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
273 socket_init(void *tag)
276 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
277 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
278 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
279 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
280 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
281 EVENTHANDLER_PRI_FIRST);
283 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
286 * Initialise maxsockets. This SYSINIT must be run after
290 init_maxsockets(void *ignored)
293 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
294 maxsockets = imax(maxsockets, maxfiles);
296 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
299 * Sysctl to get and set the maximum global sockets limit. Notify protocols
300 * of the change so that they can update their dependent limits as required.
303 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
305 int error, newmaxsockets;
307 newmaxsockets = maxsockets;
308 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
309 if (error == 0 && req->newptr) {
310 if (newmaxsockets > maxsockets &&
311 newmaxsockets <= maxfiles) {
312 maxsockets = newmaxsockets;
313 EVENTHANDLER_INVOKE(maxsockets_change);
319 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
320 &maxsockets, 0, sysctl_maxsockets, "IU",
321 "Maximum number of sockets avaliable");
324 * Socket operation routines. These routines are called by the routines in
325 * sys_socket.c or from a system process, and implement the semantics of
326 * socket operations by switching out to the protocol specific routines.
330 * Get a socket structure from our zone, and initialize it. Note that it
331 * would probably be better to allocate socket and PCB at the same time, but
332 * I'm not convinced that all the protocols can be easily modified to do
335 * soalloc() returns a socket with a ref count of 0.
337 static struct socket *
338 soalloc(struct vnet *vnet)
342 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
346 if (mac_socket_init(so, M_NOWAIT) != 0) {
347 uma_zfree(socket_zone, so);
351 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
352 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
353 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
354 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
355 TAILQ_INIT(&so->so_aiojobq);
356 mtx_lock(&so_global_mtx);
357 so->so_gencnt = ++so_gencnt;
360 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
361 __func__, __LINE__, so));
362 vnet->vnet_sockcnt++;
365 mtx_unlock(&so_global_mtx);
370 * Free the storage associated with a socket at the socket layer, tear down
371 * locks, labels, etc. All protocol state is assumed already to have been
372 * torn down (and possibly never set up) by the caller.
375 sodealloc(struct socket *so)
378 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
379 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
381 mtx_lock(&so_global_mtx);
382 so->so_gencnt = ++so_gencnt;
383 --numopensockets; /* Could be below, but faster here. */
385 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
386 __func__, __LINE__, so));
387 so->so_vnet->vnet_sockcnt--;
389 mtx_unlock(&so_global_mtx);
390 if (so->so_rcv.sb_hiwat)
391 (void)chgsbsize(so->so_cred->cr_uidinfo,
392 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
393 if (so->so_snd.sb_hiwat)
394 (void)chgsbsize(so->so_cred->cr_uidinfo,
395 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
397 /* remove acccept filter if one is present. */
398 if (so->so_accf != NULL)
399 do_setopt_accept_filter(so, NULL);
402 mac_socket_destroy(so);
405 sx_destroy(&so->so_snd.sb_sx);
406 sx_destroy(&so->so_rcv.sb_sx);
407 SOCKBUF_LOCK_DESTROY(&so->so_snd);
408 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
409 uma_zfree(socket_zone, so);
413 * socreate returns a socket with a ref count of 1. The socket should be
414 * closed with soclose().
417 socreate(int dom, struct socket **aso, int type, int proto,
418 struct ucred *cred, struct thread *td)
425 prp = pffindproto(dom, proto, type);
427 prp = pffindtype(dom, type);
430 /* No support for domain. */
431 if (pffinddomain(dom) == NULL)
432 return (EAFNOSUPPORT);
433 /* No support for socket type. */
434 if (proto == 0 && type != 0)
436 return (EPROTONOSUPPORT);
438 if (prp->pr_usrreqs->pru_attach == NULL ||
439 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
440 return (EPROTONOSUPPORT);
442 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
443 return (EPROTONOSUPPORT);
445 if (prp->pr_type != type)
447 so = soalloc(CRED_TO_VNET(cred));
451 TAILQ_INIT(&so->so_incomp);
452 TAILQ_INIT(&so->so_comp);
454 so->so_cred = crhold(cred);
455 if ((prp->pr_domain->dom_family == PF_INET) ||
456 (prp->pr_domain->dom_family == PF_INET6) ||
457 (prp->pr_domain->dom_family == PF_ROUTE))
458 so->so_fibnum = td->td_proc->p_fibnum;
463 mac_socket_create(cred, so);
465 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
466 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
469 * Auto-sizing of socket buffers is managed by the protocols and
470 * the appropriate flags must be set in the pru_attach function.
472 CURVNET_SET(so->so_vnet);
473 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
476 KASSERT(so->so_count == 1, ("socreate: so_count %d",
487 static int regression_sonewconn_earlytest = 1;
488 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
489 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
493 * When an attempt at a new connection is noted on a socket which accepts
494 * connections, sonewconn is called. If the connection is possible (subject
495 * to space constraints, etc.) then we allocate a new structure, propoerly
496 * linked into the data structure of the original socket, and return this.
497 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
499 * Note: the ref count on the socket is 0 on return.
502 sonewconn(struct socket *head, int connstatus)
508 over = (head->so_qlen > 3 * head->so_qlimit / 2);
511 if (regression_sonewconn_earlytest && over) {
515 log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: "
516 "%i already in queue awaiting acceptance\n",
517 __func__, head->so_pcb, over);
520 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
521 __func__, __LINE__, head));
522 so = soalloc(head->so_vnet);
524 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
525 "limit reached or out of memory\n",
526 __func__, head->so_pcb);
529 if ((head->so_options & SO_ACCEPTFILTER) != 0)
532 so->so_type = head->so_type;
533 so->so_options = head->so_options &~ SO_ACCEPTCONN;
534 so->so_linger = head->so_linger;
535 so->so_state = head->so_state | SS_NOFDREF;
536 so->so_fibnum = head->so_fibnum;
537 so->so_proto = head->so_proto;
538 so->so_cred = crhold(head->so_cred);
540 mac_socket_newconn(head, so);
542 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
543 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
544 VNET_SO_ASSERT(head);
545 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
547 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
548 __func__, head->so_pcb);
551 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
553 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
554 __func__, head->so_pcb);
557 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
558 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
559 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
560 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
561 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
562 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
563 so->so_state |= connstatus;
566 * The accept socket may be tearing down but we just
567 * won a race on the ACCEPT_LOCK.
569 if (!(head->so_options & SO_ACCEPTCONN)) {
572 sofree(so); /* NB: returns ACCEPT_UNLOCK'ed. */
576 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
577 so->so_qstate |= SQ_COMP;
581 * Keep removing sockets from the head until there's room for
582 * us to insert on the tail. In pre-locking revisions, this
583 * was a simple if(), but as we could be racing with other
584 * threads and soabort() requires dropping locks, we must
585 * loop waiting for the condition to be true.
587 while (head->so_incqlen > head->so_qlimit) {
589 sp = TAILQ_FIRST(&head->so_incomp);
590 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
592 sp->so_qstate &= ~SQ_INCOMP;
598 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
599 so->so_qstate |= SQ_INCOMP;
605 wakeup_one(&head->so_timeo);
611 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
615 CURVNET_SET(so->so_vnet);
616 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
622 * solisten() transitions a socket from a non-listening state to a listening
623 * state, but can also be used to update the listen queue depth on an
624 * existing listen socket. The protocol will call back into the sockets
625 * layer using solisten_proto_check() and solisten_proto() to check and set
626 * socket-layer listen state. Call backs are used so that the protocol can
627 * acquire both protocol and socket layer locks in whatever order is required
630 * Protocol implementors are advised to hold the socket lock across the
631 * socket-layer test and set to avoid races at the socket layer.
634 solisten(struct socket *so, int backlog, struct thread *td)
638 CURVNET_SET(so->so_vnet);
639 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
645 solisten_proto_check(struct socket *so)
648 SOCK_LOCK_ASSERT(so);
650 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
657 solisten_proto(struct socket *so, int backlog)
660 SOCK_LOCK_ASSERT(so);
662 if (backlog < 0 || backlog > somaxconn)
664 so->so_qlimit = backlog;
665 so->so_options |= SO_ACCEPTCONN;
669 * Evaluate the reference count and named references on a socket; if no
670 * references remain, free it. This should be called whenever a reference is
671 * released, such as in sorele(), but also when named reference flags are
672 * cleared in socket or protocol code.
674 * sofree() will free the socket if:
676 * - There are no outstanding file descriptor references or related consumers
679 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
681 * - The protocol does not have an outstanding strong reference on the socket
684 * - The socket is not in a completed connection queue, so a process has been
685 * notified that it is present. If it is removed, the user process may
686 * block in accept() despite select() saying the socket was ready.
689 sofree(struct socket *so)
691 struct protosw *pr = so->so_proto;
694 ACCEPT_LOCK_ASSERT();
695 SOCK_LOCK_ASSERT(so);
697 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
698 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
706 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
707 (so->so_qstate & SQ_INCOMP) != 0,
708 ("sofree: so_head != NULL, but neither SQ_COMP nor "
710 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
711 (so->so_qstate & SQ_INCOMP) == 0,
712 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
713 TAILQ_REMOVE(&head->so_incomp, so, so_list);
715 so->so_qstate &= ~SQ_INCOMP;
718 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
719 (so->so_qstate & SQ_INCOMP) == 0,
720 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
721 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
722 if (so->so_options & SO_ACCEPTCONN) {
723 KASSERT((TAILQ_EMPTY(&so->so_comp)),
724 ("sofree: so_comp populated"));
725 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
726 ("sofree: so_incomp populated"));
732 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
733 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
734 if (pr->pr_usrreqs->pru_detach != NULL)
735 (*pr->pr_usrreqs->pru_detach)(so);
738 * From this point on, we assume that no other references to this
739 * socket exist anywhere else in the stack. Therefore, no locks need
740 * to be acquired or held.
742 * We used to do a lot of socket buffer and socket locking here, as
743 * well as invoke sorflush() and perform wakeups. The direct call to
744 * dom_dispose() and sbrelease_internal() are an inlining of what was
745 * necessary from sorflush().
747 * Notice that the socket buffer and kqueue state are torn down
748 * before calling pru_detach. This means that protocols shold not
749 * assume they can perform socket wakeups, etc, in their detach code.
751 sbdestroy(&so->so_snd, so);
752 sbdestroy(&so->so_rcv, so);
753 seldrain(&so->so_snd.sb_sel);
754 seldrain(&so->so_rcv.sb_sel);
755 knlist_destroy(&so->so_rcv.sb_sel.si_note);
756 knlist_destroy(&so->so_snd.sb_sel.si_note);
761 * Close a socket on last file table reference removal. Initiate disconnect
762 * if connected. Free socket when disconnect complete.
764 * This function will sorele() the socket. Note that soclose() may be called
765 * prior to the ref count reaching zero. The actual socket structure will
766 * not be freed until the ref count reaches zero.
769 soclose(struct socket *so)
773 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
775 CURVNET_SET(so->so_vnet);
776 funsetown(&so->so_sigio);
777 if (so->so_state & SS_ISCONNECTED) {
778 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
779 error = sodisconnect(so);
781 if (error == ENOTCONN)
786 if (so->so_options & SO_LINGER) {
787 if ((so->so_state & SS_ISDISCONNECTING) &&
788 (so->so_state & SS_NBIO))
790 while (so->so_state & SS_ISCONNECTED) {
791 error = tsleep(&so->so_timeo,
792 PSOCK | PCATCH, "soclos",
801 if (so->so_proto->pr_usrreqs->pru_close != NULL)
802 (*so->so_proto->pr_usrreqs->pru_close)(so);
804 if (so->so_options & SO_ACCEPTCONN) {
807 * Prevent new additions to the accept queues due
808 * to ACCEPT_LOCK races while we are draining them.
810 so->so_options &= ~SO_ACCEPTCONN;
811 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
812 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
814 sp->so_qstate &= ~SQ_INCOMP;
820 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
821 TAILQ_REMOVE(&so->so_comp, sp, so_list);
823 sp->so_qstate &= ~SQ_COMP;
829 KASSERT((TAILQ_EMPTY(&so->so_comp)),
830 ("%s: so_comp populated", __func__));
831 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
832 ("%s: so_incomp populated", __func__));
835 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
836 so->so_state |= SS_NOFDREF;
837 sorele(so); /* NB: Returns with ACCEPT_UNLOCK(). */
843 * soabort() is used to abruptly tear down a connection, such as when a
844 * resource limit is reached (listen queue depth exceeded), or if a listen
845 * socket is closed while there are sockets waiting to be accepted.
847 * This interface is tricky, because it is called on an unreferenced socket,
848 * and must be called only by a thread that has actually removed the socket
849 * from the listen queue it was on, or races with other threads are risked.
851 * This interface will call into the protocol code, so must not be called
852 * with any socket locks held. Protocols do call it while holding their own
853 * recursible protocol mutexes, but this is something that should be subject
854 * to review in the future.
857 soabort(struct socket *so)
861 * In as much as is possible, assert that no references to this
862 * socket are held. This is not quite the same as asserting that the
863 * current thread is responsible for arranging for no references, but
864 * is as close as we can get for now.
866 KASSERT(so->so_count == 0, ("soabort: so_count"));
867 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
868 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
869 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
870 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
873 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
874 (*so->so_proto->pr_usrreqs->pru_abort)(so);
881 soaccept(struct socket *so, struct sockaddr **nam)
886 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
887 so->so_state &= ~SS_NOFDREF;
890 CURVNET_SET(so->so_vnet);
891 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
897 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
901 if (so->so_options & SO_ACCEPTCONN)
904 CURVNET_SET(so->so_vnet);
906 * If protocol is connection-based, can only connect once.
907 * Otherwise, if connected, try to disconnect first. This allows
908 * user to disconnect by connecting to, e.g., a null address.
910 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
911 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
912 (error = sodisconnect(so)))) {
916 * Prevent accumulated error from previous connection from
920 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
928 soconnect2(struct socket *so1, struct socket *so2)
932 CURVNET_SET(so1->so_vnet);
933 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
939 sodisconnect(struct socket *so)
943 if ((so->so_state & SS_ISCONNECTED) == 0)
945 if (so->so_state & SS_ISDISCONNECTING)
948 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
952 #ifdef SOCKET_SEND_COW
953 struct so_zerocopy_stats{
958 struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
961 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise
962 * sosend_dgram() and sosend_generic() use m_uiotombuf().
964 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
965 * all of the data referenced by the uio. If desired, it uses zero-copy.
966 * *space will be updated to reflect data copied in.
968 * NB: If atomic I/O is requested, the caller must already have checked that
969 * space can hold resid bytes.
971 * NB: In the event of an error, the caller may need to free the partial
972 * chain pointed to by *mpp. The contents of both *uio and *space may be
973 * modified even in the case of an error.
976 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
979 struct mbuf *m, **mp, *top;
988 resid = uio->uio_resid;
992 if (resid >= MINCLSIZE) {
994 m = m_gethdr(M_WAITOK, MT_DATA);
996 m->m_pkthdr.rcvif = NULL;
998 m = m_get(M_WAITOK, MT_DATA);
999 if (so_zero_copy_send &&
1000 resid >= PAGE_SIZE &&
1001 *space >= PAGE_SIZE &&
1002 uio->uio_iov->iov_len >= PAGE_SIZE) {
1003 so_zerocp_stats.size_ok++;
1004 so_zerocp_stats.align_ok++;
1005 cow_send = socow_setup(m, uio);
1009 m_clget(m, M_WAITOK);
1010 len = min(min(MCLBYTES, resid), *space);
1014 m = m_gethdr(M_WAITOK, MT_DATA);
1015 m->m_pkthdr.len = 0;
1016 m->m_pkthdr.rcvif = NULL;
1018 len = min(min(MHLEN, resid), *space);
1020 * For datagram protocols, leave room
1021 * for protocol headers in first mbuf.
1023 if (atomic && m && len < MHLEN)
1026 m = m_get(M_WAITOK, MT_DATA);
1027 len = min(min(MLEN, resid), *space);
1039 error = uiomove(mtod(m, void *), (int)len, uio);
1040 resid = uio->uio_resid;
1043 top->m_pkthdr.len += len;
1048 if (flags & MSG_EOR)
1049 top->m_flags |= M_EOR;
1052 } while (*space > 0 && atomic);
1057 #endif /* SOCKET_SEND_COW */
1059 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1062 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1063 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1067 int clen = 0, error, dontroute;
1068 #ifdef SOCKET_SEND_COW
1069 int atomic = sosendallatonce(so) || top;
1072 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1073 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1074 ("sosend_dgram: !PR_ATOMIC"));
1077 resid = uio->uio_resid;
1079 resid = top->m_pkthdr.len;
1081 * In theory resid should be unsigned. However, space must be
1082 * signed, as it might be less than 0 if we over-committed, and we
1083 * must use a signed comparison of space and resid. On the other
1084 * hand, a negative resid causes us to loop sending 0-length
1085 * segments to the protocol.
1093 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1095 td->td_ru.ru_msgsnd++;
1096 if (control != NULL)
1097 clen = control->m_len;
1099 SOCKBUF_LOCK(&so->so_snd);
1100 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1101 SOCKBUF_UNLOCK(&so->so_snd);
1106 error = so->so_error;
1108 SOCKBUF_UNLOCK(&so->so_snd);
1111 if ((so->so_state & SS_ISCONNECTED) == 0) {
1113 * `sendto' and `sendmsg' is allowed on a connection-based
1114 * socket if it supports implied connect. Return ENOTCONN if
1115 * not connected and no address is supplied.
1117 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1118 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1119 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1120 !(resid == 0 && clen != 0)) {
1121 SOCKBUF_UNLOCK(&so->so_snd);
1125 } else if (addr == NULL) {
1126 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1129 error = EDESTADDRREQ;
1130 SOCKBUF_UNLOCK(&so->so_snd);
1136 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1137 * problem and need fixing.
1139 space = sbspace(&so->so_snd);
1140 if (flags & MSG_OOB)
1143 SOCKBUF_UNLOCK(&so->so_snd);
1144 if (resid > space) {
1150 if (flags & MSG_EOR)
1151 top->m_flags |= M_EOR;
1153 #ifdef SOCKET_SEND_COW
1154 error = sosend_copyin(uio, &top, atomic, &space, flags);
1159 * Copy the data from userland into a mbuf chain.
1160 * If no data is to be copied in, a single empty mbuf
1163 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1164 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1166 error = EFAULT; /* only possible error */
1169 space -= resid - uio->uio_resid;
1170 #endif /* SOCKET_SEND_COW */
1171 resid = uio->uio_resid;
1173 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1175 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1180 so->so_options |= SO_DONTROUTE;
1184 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1185 * of date. We could have recieved a reset packet in an interrupt or
1186 * maybe we slept while doing page faults in uiomove() etc. We could
1187 * probably recheck again inside the locking protection here, but
1188 * there are probably other places that this also happens. We must
1192 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1193 (flags & MSG_OOB) ? PRUS_OOB :
1195 * If the user set MSG_EOF, the protocol understands this flag and
1196 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1198 ((flags & MSG_EOF) &&
1199 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1202 /* If there is more to send set PRUS_MORETOCOME */
1203 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1204 top, addr, control, td);
1207 so->so_options &= ~SO_DONTROUTE;
1216 if (control != NULL)
1222 * Send on a socket. If send must go all at once and message is larger than
1223 * send buffering, then hard error. Lock against other senders. If must go
1224 * all at once and not enough room now, then inform user that this would
1225 * block and do nothing. Otherwise, if nonblocking, send as much as
1226 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1227 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1228 * in mbuf chain must be small enough to send all at once.
1230 * Returns nonzero on error, timeout or signal; callers must check for short
1231 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1235 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1236 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1240 int clen = 0, error, dontroute;
1241 int atomic = sosendallatonce(so) || top;
1244 resid = uio->uio_resid;
1246 resid = top->m_pkthdr.len;
1248 * In theory resid should be unsigned. However, space must be
1249 * signed, as it might be less than 0 if we over-committed, and we
1250 * must use a signed comparison of space and resid. On the other
1251 * hand, a negative resid causes us to loop sending 0-length
1252 * segments to the protocol.
1254 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1255 * type sockets since that's an error.
1257 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1263 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1264 (so->so_proto->pr_flags & PR_ATOMIC);
1266 td->td_ru.ru_msgsnd++;
1267 if (control != NULL)
1268 clen = control->m_len;
1270 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1276 SOCKBUF_LOCK(&so->so_snd);
1277 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1278 SOCKBUF_UNLOCK(&so->so_snd);
1283 error = so->so_error;
1285 SOCKBUF_UNLOCK(&so->so_snd);
1288 if ((so->so_state & SS_ISCONNECTED) == 0) {
1290 * `sendto' and `sendmsg' is allowed on a connection-
1291 * based socket if it supports implied connect.
1292 * Return ENOTCONN if not connected and no address is
1295 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1296 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1297 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1298 !(resid == 0 && clen != 0)) {
1299 SOCKBUF_UNLOCK(&so->so_snd);
1303 } else if (addr == NULL) {
1304 SOCKBUF_UNLOCK(&so->so_snd);
1305 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1308 error = EDESTADDRREQ;
1312 space = sbspace(&so->so_snd);
1313 if (flags & MSG_OOB)
1315 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1316 clen > so->so_snd.sb_hiwat) {
1317 SOCKBUF_UNLOCK(&so->so_snd);
1321 if (space < resid + clen &&
1322 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1323 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1324 SOCKBUF_UNLOCK(&so->so_snd);
1325 error = EWOULDBLOCK;
1328 error = sbwait(&so->so_snd);
1329 SOCKBUF_UNLOCK(&so->so_snd);
1334 SOCKBUF_UNLOCK(&so->so_snd);
1339 if (flags & MSG_EOR)
1340 top->m_flags |= M_EOR;
1342 #ifdef SOCKET_SEND_COW
1343 error = sosend_copyin(uio, &top, atomic,
1349 * Copy the data from userland into a mbuf
1350 * chain. If no data is to be copied in,
1351 * a single empty mbuf is returned.
1353 top = m_uiotombuf(uio, M_WAITOK, space,
1354 (atomic ? max_hdr : 0),
1355 (atomic ? M_PKTHDR : 0) |
1356 ((flags & MSG_EOR) ? M_EOR : 0));
1358 error = EFAULT; /* only possible error */
1361 space -= resid - uio->uio_resid;
1362 #endif /* SOCKET_SEND_COW */
1363 resid = uio->uio_resid;
1367 so->so_options |= SO_DONTROUTE;
1371 * XXX all the SBS_CANTSENDMORE checks previously
1372 * done could be out of date. We could have recieved
1373 * a reset packet in an interrupt or maybe we slept
1374 * while doing page faults in uiomove() etc. We
1375 * could probably recheck again inside the locking
1376 * protection here, but there are probably other
1377 * places that this also happens. We must rethink
1381 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1382 (flags & MSG_OOB) ? PRUS_OOB :
1384 * If the user set MSG_EOF, the protocol understands
1385 * this flag and nothing left to send then use
1386 * PRU_SEND_EOF instead of PRU_SEND.
1388 ((flags & MSG_EOF) &&
1389 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1392 /* If there is more to send set PRUS_MORETOCOME. */
1393 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1394 top, addr, control, td);
1397 so->so_options &= ~SO_DONTROUTE;
1405 } while (resid && space > 0);
1409 sbunlock(&so->so_snd);
1413 if (control != NULL)
1419 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1420 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1424 CURVNET_SET(so->so_vnet);
1425 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1426 control, flags, td);
1432 * The part of soreceive() that implements reading non-inline out-of-band
1433 * data from a socket. For more complete comments, see soreceive(), from
1434 * which this code originated.
1436 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1437 * unable to return an mbuf chain to the caller.
1440 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1442 struct protosw *pr = so->so_proto;
1446 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1449 m = m_get(M_WAITOK, MT_DATA);
1450 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1454 #ifdef SOCKET_RECV_PFLIP
1455 if (so_zero_copy_receive) {
1458 if ((m->m_flags & M_EXT)
1459 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1464 error = uiomoveco(mtod(m, void *),
1465 min(uio->uio_resid, m->m_len), uio, disposable);
1467 #endif /* SOCKET_RECV_PFLIP */
1468 error = uiomove(mtod(m, void *),
1469 (int) min(uio->uio_resid, m->m_len), uio);
1471 } while (uio->uio_resid && error == 0 && m);
1479 * Following replacement or removal of the first mbuf on the first mbuf chain
1480 * of a socket buffer, push necessary state changes back into the socket
1481 * buffer so that other consumers see the values consistently. 'nextrecord'
1482 * is the callers locally stored value of the original value of
1483 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1484 * NOTE: 'nextrecord' may be NULL.
1486 static __inline void
1487 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1490 SOCKBUF_LOCK_ASSERT(sb);
1492 * First, update for the new value of nextrecord. If necessary, make
1493 * it the first record.
1495 if (sb->sb_mb != NULL)
1496 sb->sb_mb->m_nextpkt = nextrecord;
1498 sb->sb_mb = nextrecord;
1501 * Now update any dependent socket buffer fields to reflect the new
1502 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1503 * addition of a second clause that takes care of the case where
1504 * sb_mb has been updated, but remains the last record.
1506 if (sb->sb_mb == NULL) {
1507 sb->sb_mbtail = NULL;
1508 sb->sb_lastrecord = NULL;
1509 } else if (sb->sb_mb->m_nextpkt == NULL)
1510 sb->sb_lastrecord = sb->sb_mb;
1514 * Implement receive operations on a socket. We depend on the way that
1515 * records are added to the sockbuf by sbappend. In particular, each record
1516 * (mbufs linked through m_next) must begin with an address if the protocol
1517 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1518 * data, and then zero or more mbufs of data. In order to allow parallelism
1519 * between network receive and copying to user space, as well as avoid
1520 * sleeping with a mutex held, we release the socket buffer mutex during the
1521 * user space copy. Although the sockbuf is locked, new data may still be
1522 * appended, and thus we must maintain consistency of the sockbuf during that
1525 * The caller may receive the data as a single mbuf chain by supplying an
1526 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1527 * the count in uio_resid.
1530 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1531 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1533 struct mbuf *m, **mp;
1534 int flags, error, offset;
1536 struct protosw *pr = so->so_proto;
1537 struct mbuf *nextrecord;
1539 ssize_t orig_resid = uio->uio_resid;
1544 if (controlp != NULL)
1547 flags = *flagsp &~ MSG_EOR;
1550 if (flags & MSG_OOB)
1551 return (soreceive_rcvoob(so, uio, flags));
1554 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1555 && uio->uio_resid) {
1557 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1560 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1565 SOCKBUF_LOCK(&so->so_rcv);
1566 m = so->so_rcv.sb_mb;
1568 * If we have less data than requested, block awaiting more (subject
1569 * to any timeout) if:
1570 * 1. the current count is less than the low water mark, or
1571 * 2. MSG_DONTWAIT is not set
1573 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1574 so->so_rcv.sb_cc < uio->uio_resid) &&
1575 so->so_rcv.sb_cc < so->so_rcv.sb_lowat &&
1576 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1577 KASSERT(m != NULL || !so->so_rcv.sb_cc,
1578 ("receive: m == %p so->so_rcv.sb_cc == %u",
1579 m, so->so_rcv.sb_cc));
1583 error = so->so_error;
1584 if ((flags & MSG_PEEK) == 0)
1586 SOCKBUF_UNLOCK(&so->so_rcv);
1589 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1590 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1592 SOCKBUF_UNLOCK(&so->so_rcv);
1597 for (; m != NULL; m = m->m_next)
1598 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1599 m = so->so_rcv.sb_mb;
1602 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1603 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1604 SOCKBUF_UNLOCK(&so->so_rcv);
1608 if (uio->uio_resid == 0) {
1609 SOCKBUF_UNLOCK(&so->so_rcv);
1612 if ((so->so_state & SS_NBIO) ||
1613 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1614 SOCKBUF_UNLOCK(&so->so_rcv);
1615 error = EWOULDBLOCK;
1618 SBLASTRECORDCHK(&so->so_rcv);
1619 SBLASTMBUFCHK(&so->so_rcv);
1620 error = sbwait(&so->so_rcv);
1621 SOCKBUF_UNLOCK(&so->so_rcv);
1628 * From this point onward, we maintain 'nextrecord' as a cache of the
1629 * pointer to the next record in the socket buffer. We must keep the
1630 * various socket buffer pointers and local stack versions of the
1631 * pointers in sync, pushing out modifications before dropping the
1632 * socket buffer mutex, and re-reading them when picking it up.
1634 * Otherwise, we will race with the network stack appending new data
1635 * or records onto the socket buffer by using inconsistent/stale
1636 * versions of the field, possibly resulting in socket buffer
1639 * By holding the high-level sblock(), we prevent simultaneous
1640 * readers from pulling off the front of the socket buffer.
1642 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1644 uio->uio_td->td_ru.ru_msgrcv++;
1645 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1646 SBLASTRECORDCHK(&so->so_rcv);
1647 SBLASTMBUFCHK(&so->so_rcv);
1648 nextrecord = m->m_nextpkt;
1649 if (pr->pr_flags & PR_ADDR) {
1650 KASSERT(m->m_type == MT_SONAME,
1651 ("m->m_type == %d", m->m_type));
1654 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1656 if (flags & MSG_PEEK) {
1659 sbfree(&so->so_rcv, m);
1660 so->so_rcv.sb_mb = m_free(m);
1661 m = so->so_rcv.sb_mb;
1662 sockbuf_pushsync(&so->so_rcv, nextrecord);
1667 * Process one or more MT_CONTROL mbufs present before any data mbufs
1668 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1669 * just copy the data; if !MSG_PEEK, we call into the protocol to
1670 * perform externalization (or freeing if controlp == NULL).
1672 if (m != NULL && m->m_type == MT_CONTROL) {
1673 struct mbuf *cm = NULL, *cmn;
1674 struct mbuf **cme = &cm;
1677 if (flags & MSG_PEEK) {
1678 if (controlp != NULL) {
1679 *controlp = m_copy(m, 0, m->m_len);
1680 controlp = &(*controlp)->m_next;
1684 sbfree(&so->so_rcv, m);
1685 so->so_rcv.sb_mb = m->m_next;
1688 cme = &(*cme)->m_next;
1689 m = so->so_rcv.sb_mb;
1691 } while (m != NULL && m->m_type == MT_CONTROL);
1692 if ((flags & MSG_PEEK) == 0)
1693 sockbuf_pushsync(&so->so_rcv, nextrecord);
1694 while (cm != NULL) {
1697 if (pr->pr_domain->dom_externalize != NULL) {
1698 SOCKBUF_UNLOCK(&so->so_rcv);
1700 error = (*pr->pr_domain->dom_externalize)
1702 SOCKBUF_LOCK(&so->so_rcv);
1703 } else if (controlp != NULL)
1707 if (controlp != NULL) {
1709 while (*controlp != NULL)
1710 controlp = &(*controlp)->m_next;
1715 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1717 nextrecord = so->so_rcv.sb_mb;
1721 if ((flags & MSG_PEEK) == 0) {
1722 KASSERT(m->m_nextpkt == nextrecord,
1723 ("soreceive: post-control, nextrecord !sync"));
1724 if (nextrecord == NULL) {
1725 KASSERT(so->so_rcv.sb_mb == m,
1726 ("soreceive: post-control, sb_mb!=m"));
1727 KASSERT(so->so_rcv.sb_lastrecord == m,
1728 ("soreceive: post-control, lastrecord!=m"));
1732 if (type == MT_OOBDATA)
1735 if ((flags & MSG_PEEK) == 0) {
1736 KASSERT(so->so_rcv.sb_mb == nextrecord,
1737 ("soreceive: sb_mb != nextrecord"));
1738 if (so->so_rcv.sb_mb == NULL) {
1739 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1740 ("soreceive: sb_lastercord != NULL"));
1744 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1745 SBLASTRECORDCHK(&so->so_rcv);
1746 SBLASTMBUFCHK(&so->so_rcv);
1749 * Now continue to read any data mbufs off of the head of the socket
1750 * buffer until the read request is satisfied. Note that 'type' is
1751 * used to store the type of any mbuf reads that have happened so far
1752 * such that soreceive() can stop reading if the type changes, which
1753 * causes soreceive() to return only one of regular data and inline
1754 * out-of-band data in a single socket receive operation.
1758 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1760 * If the type of mbuf has changed since the last mbuf
1761 * examined ('type'), end the receive operation.
1763 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1764 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
1765 if (type != m->m_type)
1767 } else if (type == MT_OOBDATA)
1770 KASSERT(m->m_type == MT_DATA,
1771 ("m->m_type == %d", m->m_type));
1772 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1773 len = uio->uio_resid;
1774 if (so->so_oobmark && len > so->so_oobmark - offset)
1775 len = so->so_oobmark - offset;
1776 if (len > m->m_len - moff)
1777 len = m->m_len - moff;
1779 * If mp is set, just pass back the mbufs. Otherwise copy
1780 * them out via the uio, then free. Sockbuf must be
1781 * consistent here (points to current mbuf, it points to next
1782 * record) when we drop priority; we must note any additions
1783 * to the sockbuf when we block interrupts again.
1786 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1787 SBLASTRECORDCHK(&so->so_rcv);
1788 SBLASTMBUFCHK(&so->so_rcv);
1789 SOCKBUF_UNLOCK(&so->so_rcv);
1790 #ifdef SOCKET_RECV_PFLIP
1791 if (so_zero_copy_receive) {
1794 if ((m->m_flags & M_EXT)
1795 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1800 error = uiomoveco(mtod(m, char *) + moff,
1801 (int)len, uio, disposable);
1803 #endif /* SOCKET_RECV_PFLIP */
1804 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1805 SOCKBUF_LOCK(&so->so_rcv);
1808 * The MT_SONAME mbuf has already been removed
1809 * from the record, so it is necessary to
1810 * remove the data mbufs, if any, to preserve
1811 * the invariant in the case of PR_ADDR that
1812 * requires MT_SONAME mbufs at the head of
1815 if (m && pr->pr_flags & PR_ATOMIC &&
1816 ((flags & MSG_PEEK) == 0))
1817 (void)sbdroprecord_locked(&so->so_rcv);
1818 SOCKBUF_UNLOCK(&so->so_rcv);
1822 uio->uio_resid -= len;
1823 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1824 if (len == m->m_len - moff) {
1825 if (m->m_flags & M_EOR)
1827 if (flags & MSG_PEEK) {
1831 nextrecord = m->m_nextpkt;
1832 sbfree(&so->so_rcv, m);
1836 so->so_rcv.sb_mb = m = m->m_next;
1839 so->so_rcv.sb_mb = m_free(m);
1840 m = so->so_rcv.sb_mb;
1842 sockbuf_pushsync(&so->so_rcv, nextrecord);
1843 SBLASTRECORDCHK(&so->so_rcv);
1844 SBLASTMBUFCHK(&so->so_rcv);
1847 if (flags & MSG_PEEK)
1853 if (flags & MSG_DONTWAIT)
1854 copy_flag = M_NOWAIT;
1857 if (copy_flag == M_WAITOK)
1858 SOCKBUF_UNLOCK(&so->so_rcv);
1859 *mp = m_copym(m, 0, len, copy_flag);
1860 if (copy_flag == M_WAITOK)
1861 SOCKBUF_LOCK(&so->so_rcv);
1864 * m_copym() couldn't
1865 * allocate an mbuf. Adjust
1866 * uio_resid back (it was
1867 * adjusted down by len
1868 * bytes, which we didn't end
1869 * up "copying" over).
1871 uio->uio_resid += len;
1877 so->so_rcv.sb_cc -= len;
1880 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1881 if (so->so_oobmark) {
1882 if ((flags & MSG_PEEK) == 0) {
1883 so->so_oobmark -= len;
1884 if (so->so_oobmark == 0) {
1885 so->so_rcv.sb_state |= SBS_RCVATMARK;
1890 if (offset == so->so_oobmark)
1894 if (flags & MSG_EOR)
1897 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1898 * must not quit until "uio->uio_resid == 0" or an error
1899 * termination. If a signal/timeout occurs, return with a
1900 * short count but without error. Keep sockbuf locked
1901 * against other readers.
1903 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1904 !sosendallatonce(so) && nextrecord == NULL) {
1905 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1907 so->so_rcv.sb_state & SBS_CANTRCVMORE)
1910 * Notify the protocol that some data has been
1911 * drained before blocking.
1913 if (pr->pr_flags & PR_WANTRCVD) {
1914 SOCKBUF_UNLOCK(&so->so_rcv);
1916 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1917 SOCKBUF_LOCK(&so->so_rcv);
1919 SBLASTRECORDCHK(&so->so_rcv);
1920 SBLASTMBUFCHK(&so->so_rcv);
1922 * We could receive some data while was notifying
1923 * the protocol. Skip blocking in this case.
1925 if (so->so_rcv.sb_mb == NULL) {
1926 error = sbwait(&so->so_rcv);
1928 SOCKBUF_UNLOCK(&so->so_rcv);
1932 m = so->so_rcv.sb_mb;
1934 nextrecord = m->m_nextpkt;
1938 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1939 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1941 if ((flags & MSG_PEEK) == 0)
1942 (void) sbdroprecord_locked(&so->so_rcv);
1944 if ((flags & MSG_PEEK) == 0) {
1947 * First part is an inline SB_EMPTY_FIXUP(). Second
1948 * part makes sure sb_lastrecord is up-to-date if
1949 * there is still data in the socket buffer.
1951 so->so_rcv.sb_mb = nextrecord;
1952 if (so->so_rcv.sb_mb == NULL) {
1953 so->so_rcv.sb_mbtail = NULL;
1954 so->so_rcv.sb_lastrecord = NULL;
1955 } else if (nextrecord->m_nextpkt == NULL)
1956 so->so_rcv.sb_lastrecord = nextrecord;
1958 SBLASTRECORDCHK(&so->so_rcv);
1959 SBLASTMBUFCHK(&so->so_rcv);
1961 * If soreceive() is being done from the socket callback,
1962 * then don't need to generate ACK to peer to update window,
1963 * since ACK will be generated on return to TCP.
1965 if (!(flags & MSG_SOCALLBCK) &&
1966 (pr->pr_flags & PR_WANTRCVD)) {
1967 SOCKBUF_UNLOCK(&so->so_rcv);
1969 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1970 SOCKBUF_LOCK(&so->so_rcv);
1973 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1974 if (orig_resid == uio->uio_resid && orig_resid &&
1975 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1976 SOCKBUF_UNLOCK(&so->so_rcv);
1979 SOCKBUF_UNLOCK(&so->so_rcv);
1984 sbunlock(&so->so_rcv);
1989 * Optimized version of soreceive() for stream (TCP) sockets.
1990 * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled.
1993 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
1994 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1996 int len = 0, error = 0, flags, oresid;
1998 struct mbuf *m, *n = NULL;
2000 /* We only do stream sockets. */
2001 if (so->so_type != SOCK_STREAM)
2005 if (controlp != NULL)
2008 flags = *flagsp &~ MSG_EOR;
2011 if (flags & MSG_OOB)
2012 return (soreceive_rcvoob(so, uio, flags));
2018 /* Prevent other readers from entering the socket. */
2019 error = sblock(sb, SBLOCKWAIT(flags));
2024 /* Easy one, no space to copyout anything. */
2025 if (uio->uio_resid == 0) {
2029 oresid = uio->uio_resid;
2031 /* We will never ever get anything unless we are or were connected. */
2032 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
2038 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2040 /* Abort if socket has reported problems. */
2044 if (oresid > uio->uio_resid)
2046 error = so->so_error;
2047 if (!(flags & MSG_PEEK))
2052 /* Door is closed. Deliver what is left, if any. */
2053 if (sb->sb_state & SBS_CANTRCVMORE) {
2060 /* Socket buffer is empty and we shall not block. */
2061 if (sb->sb_cc == 0 &&
2062 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
2067 /* Socket buffer got some data that we shall deliver now. */
2068 if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
2069 ((sb->sb_flags & SS_NBIO) ||
2070 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2071 sb->sb_cc >= sb->sb_lowat ||
2072 sb->sb_cc >= uio->uio_resid ||
2073 sb->sb_cc >= sb->sb_hiwat) ) {
2077 /* On MSG_WAITALL we must wait until all data or error arrives. */
2078 if ((flags & MSG_WAITALL) &&
2079 (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_hiwat))
2083 * Wait and block until (more) data comes in.
2084 * NB: Drops the sockbuf lock during wait.
2092 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2093 KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
2094 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2098 uio->uio_td->td_ru.ru_msgrcv++;
2100 /* Fill uio until full or current end of socket buffer is reached. */
2101 len = min(uio->uio_resid, sb->sb_cc);
2103 /* Dequeue as many mbufs as possible. */
2104 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2108 m_cat(*mp0, sb->sb_mb);
2110 m != NULL && m->m_len <= len;
2113 uio->uio_resid -= m->m_len;
2119 sb->sb_lastrecord = sb->sb_mb;
2120 if (sb->sb_mb == NULL)
2123 /* Copy the remainder. */
2125 KASSERT(sb->sb_mb != NULL,
2126 ("%s: len > 0 && sb->sb_mb empty", __func__));
2128 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2130 len = 0; /* Don't flush data from sockbuf. */
2132 uio->uio_resid -= len;
2143 /* NB: Must unlock socket buffer as uiomove may sleep. */
2145 error = m_mbuftouio(uio, sb->sb_mb, len);
2150 SBLASTRECORDCHK(sb);
2154 * Remove the delivered data from the socket buffer unless we
2155 * were only peeking.
2157 if (!(flags & MSG_PEEK)) {
2159 sbdrop_locked(sb, len);
2161 /* Notify protocol that we drained some data. */
2162 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2163 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2164 !(flags & MSG_SOCALLBCK))) {
2167 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2173 * For MSG_WAITALL we may have to loop again and wait for
2174 * more data to come in.
2176 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2179 SOCKBUF_LOCK_ASSERT(sb);
2180 SBLASTRECORDCHK(sb);
2188 * Optimized version of soreceive() for simple datagram cases from userspace.
2189 * Unlike in the stream case, we're able to drop a datagram if copyout()
2190 * fails, and because we handle datagrams atomically, we don't need to use a
2191 * sleep lock to prevent I/O interlacing.
2194 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2195 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2197 struct mbuf *m, *m2;
2200 struct protosw *pr = so->so_proto;
2201 struct mbuf *nextrecord;
2205 if (controlp != NULL)
2208 flags = *flagsp &~ MSG_EOR;
2213 * For any complicated cases, fall back to the full
2214 * soreceive_generic().
2216 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2217 return (soreceive_generic(so, psa, uio, mp0, controlp,
2221 * Enforce restrictions on use.
2223 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2224 ("soreceive_dgram: wantrcvd"));
2225 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2226 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2227 ("soreceive_dgram: SBS_RCVATMARK"));
2228 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2229 ("soreceive_dgram: P_CONNREQUIRED"));
2232 * Loop blocking while waiting for a datagram.
2234 SOCKBUF_LOCK(&so->so_rcv);
2235 while ((m = so->so_rcv.sb_mb) == NULL) {
2236 KASSERT(so->so_rcv.sb_cc == 0,
2237 ("soreceive_dgram: sb_mb NULL but sb_cc %u",
2240 error = so->so_error;
2242 SOCKBUF_UNLOCK(&so->so_rcv);
2245 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2246 uio->uio_resid == 0) {
2247 SOCKBUF_UNLOCK(&so->so_rcv);
2250 if ((so->so_state & SS_NBIO) ||
2251 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2252 SOCKBUF_UNLOCK(&so->so_rcv);
2253 return (EWOULDBLOCK);
2255 SBLASTRECORDCHK(&so->so_rcv);
2256 SBLASTMBUFCHK(&so->so_rcv);
2257 error = sbwait(&so->so_rcv);
2259 SOCKBUF_UNLOCK(&so->so_rcv);
2263 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2266 uio->uio_td->td_ru.ru_msgrcv++;
2267 SBLASTRECORDCHK(&so->so_rcv);
2268 SBLASTMBUFCHK(&so->so_rcv);
2269 nextrecord = m->m_nextpkt;
2270 if (nextrecord == NULL) {
2271 KASSERT(so->so_rcv.sb_lastrecord == m,
2272 ("soreceive_dgram: lastrecord != m"));
2275 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2276 ("soreceive_dgram: m_nextpkt != nextrecord"));
2279 * Pull 'm' and its chain off the front of the packet queue.
2281 so->so_rcv.sb_mb = NULL;
2282 sockbuf_pushsync(&so->so_rcv, nextrecord);
2285 * Walk 'm's chain and free that many bytes from the socket buffer.
2287 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2288 sbfree(&so->so_rcv, m2);
2291 * Do a few last checks before we let go of the lock.
2293 SBLASTRECORDCHK(&so->so_rcv);
2294 SBLASTMBUFCHK(&so->so_rcv);
2295 SOCKBUF_UNLOCK(&so->so_rcv);
2297 if (pr->pr_flags & PR_ADDR) {
2298 KASSERT(m->m_type == MT_SONAME,
2299 ("m->m_type == %d", m->m_type));
2301 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2306 /* XXXRW: Can this happen? */
2311 * Packet to copyout() is now in 'm' and it is disconnected from the
2314 * Process one or more MT_CONTROL mbufs present before any data mbufs
2315 * in the first mbuf chain on the socket buffer. We call into the
2316 * protocol to perform externalization (or freeing if controlp ==
2319 if (m->m_type == MT_CONTROL) {
2320 struct mbuf *cm = NULL, *cmn;
2321 struct mbuf **cme = &cm;
2327 cme = &(*cme)->m_next;
2329 } while (m != NULL && m->m_type == MT_CONTROL);
2330 while (cm != NULL) {
2333 if (pr->pr_domain->dom_externalize != NULL) {
2334 error = (*pr->pr_domain->dom_externalize)
2336 } else if (controlp != NULL)
2340 if (controlp != NULL) {
2341 while (*controlp != NULL)
2342 controlp = &(*controlp)->m_next;
2347 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data"));
2349 while (m != NULL && uio->uio_resid > 0) {
2350 len = uio->uio_resid;
2353 error = uiomove(mtod(m, char *), (int)len, uio);
2358 if (len == m->m_len)
2374 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2375 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2379 CURVNET_SET(so->so_vnet);
2380 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2387 soshutdown(struct socket *so, int how)
2389 struct protosw *pr = so->so_proto;
2392 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2395 CURVNET_SET(so->so_vnet);
2396 if (pr->pr_usrreqs->pru_flush != NULL)
2397 (*pr->pr_usrreqs->pru_flush)(so, how);
2400 if (how != SHUT_RD) {
2401 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2410 sorflush(struct socket *so)
2412 struct sockbuf *sb = &so->so_rcv;
2413 struct protosw *pr = so->so_proto;
2419 * In order to avoid calling dom_dispose with the socket buffer mutex
2420 * held, and in order to generally avoid holding the lock for a long
2421 * time, we make a copy of the socket buffer and clear the original
2422 * (except locks, state). The new socket buffer copy won't have
2423 * initialized locks so we can only call routines that won't use or
2424 * assert those locks.
2426 * Dislodge threads currently blocked in receive and wait to acquire
2427 * a lock against other simultaneous readers before clearing the
2428 * socket buffer. Don't let our acquire be interrupted by a signal
2429 * despite any existing socket disposition on interruptable waiting.
2432 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2435 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2436 * and mutex data unchanged.
2439 bzero(&asb, offsetof(struct sockbuf, sb_startzero));
2440 bcopy(&sb->sb_startzero, &asb.sb_startzero,
2441 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2442 bzero(&sb->sb_startzero,
2443 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2448 * Dispose of special rights and flush the socket buffer. Don't call
2449 * any unsafe routines (that rely on locks being initialized) on asb.
2451 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2452 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
2453 sbrelease_internal(&asb, so);
2457 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2458 * additional variant to handle the case where the option value needs to be
2459 * some kind of integer, but not a specific size. In addition to their use
2460 * here, these functions are also called by the protocol-level pr_ctloutput()
2464 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2469 * If the user gives us more than we wanted, we ignore it, but if we
2470 * don't get the minimum length the caller wants, we return EINVAL.
2471 * On success, sopt->sopt_valsize is set to however much we actually
2474 if ((valsize = sopt->sopt_valsize) < minlen)
2477 sopt->sopt_valsize = valsize = len;
2479 if (sopt->sopt_td != NULL)
2480 return (copyin(sopt->sopt_val, buf, valsize));
2482 bcopy(sopt->sopt_val, buf, valsize);
2487 * Kernel version of setsockopt(2).
2489 * XXX: optlen is size_t, not socklen_t
2492 so_setsockopt(struct socket *so, int level, int optname, void *optval,
2495 struct sockopt sopt;
2497 sopt.sopt_level = level;
2498 sopt.sopt_name = optname;
2499 sopt.sopt_dir = SOPT_SET;
2500 sopt.sopt_val = optval;
2501 sopt.sopt_valsize = optlen;
2502 sopt.sopt_td = NULL;
2503 return (sosetopt(so, &sopt));
2507 sosetopt(struct socket *so, struct sockopt *sopt)
2518 CURVNET_SET(so->so_vnet);
2520 if (sopt->sopt_level != SOL_SOCKET) {
2521 if (so->so_proto->pr_ctloutput != NULL) {
2522 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2526 error = ENOPROTOOPT;
2528 switch (sopt->sopt_name) {
2530 case SO_ACCEPTFILTER:
2531 error = do_setopt_accept_filter(so, sopt);
2537 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2542 so->so_linger = l.l_linger;
2544 so->so_options |= SO_LINGER;
2546 so->so_options &= ~SO_LINGER;
2553 case SO_USELOOPBACK:
2563 error = sooptcopyin(sopt, &optval, sizeof optval,
2569 so->so_options |= sopt->sopt_name;
2571 so->so_options &= ~sopt->sopt_name;
2576 error = sooptcopyin(sopt, &optval, sizeof optval,
2581 if (optval < 0 || optval >= rt_numfibs) {
2585 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
2586 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
2587 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
2588 so->so_fibnum = optval;
2593 case SO_USER_COOKIE:
2594 error = sooptcopyin(sopt, &val32, sizeof val32,
2598 so->so_user_cookie = val32;
2605 error = sooptcopyin(sopt, &optval, sizeof optval,
2611 * Values < 1 make no sense for any of these options,
2619 switch (sopt->sopt_name) {
2622 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2623 &so->so_snd : &so->so_rcv, (u_long)optval,
2624 so, curthread) == 0) {
2628 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2629 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2633 * Make sure the low-water is never greater than the
2637 SOCKBUF_LOCK(&so->so_snd);
2638 so->so_snd.sb_lowat =
2639 (optval > so->so_snd.sb_hiwat) ?
2640 so->so_snd.sb_hiwat : optval;
2641 SOCKBUF_UNLOCK(&so->so_snd);
2644 SOCKBUF_LOCK(&so->so_rcv);
2645 so->so_rcv.sb_lowat =
2646 (optval > so->so_rcv.sb_hiwat) ?
2647 so->so_rcv.sb_hiwat : optval;
2648 SOCKBUF_UNLOCK(&so->so_rcv);
2655 #ifdef COMPAT_FREEBSD32
2656 if (SV_CURPROC_FLAG(SV_ILP32)) {
2657 struct timeval32 tv32;
2659 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2661 CP(tv32, tv, tv_sec);
2662 CP(tv32, tv, tv_usec);
2665 error = sooptcopyin(sopt, &tv, sizeof tv,
2670 /* assert(hz > 0); */
2671 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2672 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2676 /* assert(tick > 0); */
2677 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2678 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
2679 if (val > INT_MAX) {
2683 if (val == 0 && tv.tv_usec != 0)
2686 switch (sopt->sopt_name) {
2688 so->so_snd.sb_timeo = val;
2691 so->so_rcv.sb_timeo = val;
2698 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2702 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2710 error = ENOPROTOOPT;
2713 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
2714 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
2722 * Helper routine for getsockopt.
2725 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2733 * Documented get behavior is that we always return a value, possibly
2734 * truncated to fit in the user's buffer. Traditional behavior is
2735 * that we always tell the user precisely how much we copied, rather
2736 * than something useful like the total amount we had available for
2737 * her. Note that this interface is not idempotent; the entire
2738 * answer must generated ahead of time.
2740 valsize = min(len, sopt->sopt_valsize);
2741 sopt->sopt_valsize = valsize;
2742 if (sopt->sopt_val != NULL) {
2743 if (sopt->sopt_td != NULL)
2744 error = copyout(buf, sopt->sopt_val, valsize);
2746 bcopy(buf, sopt->sopt_val, valsize);
2752 sogetopt(struct socket *so, struct sockopt *sopt)
2761 CURVNET_SET(so->so_vnet);
2763 if (sopt->sopt_level != SOL_SOCKET) {
2764 if (so->so_proto->pr_ctloutput != NULL)
2765 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2767 error = ENOPROTOOPT;
2771 switch (sopt->sopt_name) {
2773 case SO_ACCEPTFILTER:
2774 error = do_getopt_accept_filter(so, sopt);
2779 l.l_onoff = so->so_options & SO_LINGER;
2780 l.l_linger = so->so_linger;
2782 error = sooptcopyout(sopt, &l, sizeof l);
2785 case SO_USELOOPBACK:
2797 optval = so->so_options & sopt->sopt_name;
2799 error = sooptcopyout(sopt, &optval, sizeof optval);
2803 optval = so->so_type;
2807 optval = so->so_proto->pr_protocol;
2812 optval = so->so_error;
2818 optval = so->so_snd.sb_hiwat;
2822 optval = so->so_rcv.sb_hiwat;
2826 optval = so->so_snd.sb_lowat;
2830 optval = so->so_rcv.sb_lowat;
2835 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2836 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2838 tv.tv_sec = optval / hz;
2839 tv.tv_usec = (optval % hz) * tick;
2840 #ifdef COMPAT_FREEBSD32
2841 if (SV_CURPROC_FLAG(SV_ILP32)) {
2842 struct timeval32 tv32;
2844 CP(tv, tv32, tv_sec);
2845 CP(tv, tv32, tv_usec);
2846 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2849 error = sooptcopyout(sopt, &tv, sizeof tv);
2854 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2858 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2862 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2870 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2874 error = mac_getsockopt_peerlabel(
2875 sopt->sopt_td->td_ucred, so, &extmac);
2878 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2884 case SO_LISTENQLIMIT:
2885 optval = so->so_qlimit;
2889 optval = so->so_qlen;
2892 case SO_LISTENINCQLEN:
2893 optval = so->so_incqlen;
2897 error = ENOPROTOOPT;
2909 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2911 struct mbuf *m, *m_prev;
2912 int sopt_size = sopt->sopt_valsize;
2914 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2917 if (sopt_size > MLEN) {
2918 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
2919 if ((m->m_flags & M_EXT) == 0) {
2923 m->m_len = min(MCLBYTES, sopt_size);
2925 m->m_len = min(MLEN, sopt_size);
2927 sopt_size -= m->m_len;
2932 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2937 if (sopt_size > MLEN) {
2938 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
2940 if ((m->m_flags & M_EXT) == 0) {
2945 m->m_len = min(MCLBYTES, sopt_size);
2947 m->m_len = min(MLEN, sopt_size);
2949 sopt_size -= m->m_len;
2957 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2959 struct mbuf *m0 = m;
2961 if (sopt->sopt_val == NULL)
2963 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2964 if (sopt->sopt_td != NULL) {
2967 error = copyin(sopt->sopt_val, mtod(m, char *),
2974 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2975 sopt->sopt_valsize -= m->m_len;
2976 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2979 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2980 panic("ip6_sooptmcopyin");
2985 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2987 struct mbuf *m0 = m;
2990 if (sopt->sopt_val == NULL)
2992 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2993 if (sopt->sopt_td != NULL) {
2996 error = copyout(mtod(m, char *), sopt->sopt_val,
3003 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
3004 sopt->sopt_valsize -= m->m_len;
3005 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3006 valsize += m->m_len;
3010 /* enough soopt buffer should be given from user-land */
3014 sopt->sopt_valsize = valsize;
3019 * sohasoutofband(): protocol notifies socket layer of the arrival of new
3020 * out-of-band data, which will then notify socket consumers.
3023 sohasoutofband(struct socket *so)
3026 if (so->so_sigio != NULL)
3027 pgsigio(&so->so_sigio, SIGURG, 0);
3028 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
3032 sopoll(struct socket *so, int events, struct ucred *active_cred,
3037 * We do not need to set or assert curvnet as long as everyone uses
3040 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
3045 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3050 SOCKBUF_LOCK(&so->so_snd);
3051 SOCKBUF_LOCK(&so->so_rcv);
3052 if (events & (POLLIN | POLLRDNORM))
3053 if (soreadabledata(so))
3054 revents |= events & (POLLIN | POLLRDNORM);
3056 if (events & (POLLOUT | POLLWRNORM))
3057 if (sowriteable(so))
3058 revents |= events & (POLLOUT | POLLWRNORM);
3060 if (events & (POLLPRI | POLLRDBAND))
3061 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
3062 revents |= events & (POLLPRI | POLLRDBAND);
3064 if ((events & POLLINIGNEOF) == 0) {
3065 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3066 revents |= events & (POLLIN | POLLRDNORM);
3067 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3073 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
3074 selrecord(td, &so->so_rcv.sb_sel);
3075 so->so_rcv.sb_flags |= SB_SEL;
3078 if (events & (POLLOUT | POLLWRNORM)) {
3079 selrecord(td, &so->so_snd.sb_sel);
3080 so->so_snd.sb_flags |= SB_SEL;
3084 SOCKBUF_UNLOCK(&so->so_rcv);
3085 SOCKBUF_UNLOCK(&so->so_snd);
3090 soo_kqfilter(struct file *fp, struct knote *kn)
3092 struct socket *so = kn->kn_fp->f_data;
3095 switch (kn->kn_filter) {
3097 if (so->so_options & SO_ACCEPTCONN)
3098 kn->kn_fop = &solisten_filtops;
3100 kn->kn_fop = &soread_filtops;
3104 kn->kn_fop = &sowrite_filtops;
3112 knlist_add(&sb->sb_sel.si_note, kn, 1);
3113 sb->sb_flags |= SB_KNOTE;
3119 * Some routines that return EOPNOTSUPP for entry points that are not
3120 * supported by a protocol. Fill in as needed.
3123 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3130 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3137 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3144 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3151 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3158 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3159 struct ifnet *ifp, struct thread *td)
3166 pru_disconnect_notsupp(struct socket *so)
3173 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3180 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3187 pru_rcvd_notsupp(struct socket *so, int flags)
3194 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3201 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3202 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3209 * This isn't really a ``null'' operation, but it's the default one and
3210 * doesn't do anything destructive.
3213 pru_sense_null(struct socket *so, struct stat *sb)
3216 sb->st_blksize = so->so_snd.sb_hiwat;
3221 pru_shutdown_notsupp(struct socket *so)
3228 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3235 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3236 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3243 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3244 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3251 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3259 filt_sordetach(struct knote *kn)
3261 struct socket *so = kn->kn_fp->f_data;
3263 SOCKBUF_LOCK(&so->so_rcv);
3264 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
3265 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
3266 so->so_rcv.sb_flags &= ~SB_KNOTE;
3267 SOCKBUF_UNLOCK(&so->so_rcv);
3272 filt_soread(struct knote *kn, long hint)
3276 so = kn->kn_fp->f_data;
3277 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3279 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
3280 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3281 kn->kn_flags |= EV_EOF;
3282 kn->kn_fflags = so->so_error;
3284 } else if (so->so_error) /* temporary udp error */
3286 else if (kn->kn_sfflags & NOTE_LOWAT)
3287 return (kn->kn_data >= kn->kn_sdata);
3289 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
3293 filt_sowdetach(struct knote *kn)
3295 struct socket *so = kn->kn_fp->f_data;
3297 SOCKBUF_LOCK(&so->so_snd);
3298 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
3299 if (knlist_empty(&so->so_snd.sb_sel.si_note))
3300 so->so_snd.sb_flags &= ~SB_KNOTE;
3301 SOCKBUF_UNLOCK(&so->so_snd);
3306 filt_sowrite(struct knote *kn, long hint)
3310 so = kn->kn_fp->f_data;
3311 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3312 kn->kn_data = sbspace(&so->so_snd);
3313 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3314 kn->kn_flags |= EV_EOF;
3315 kn->kn_fflags = so->so_error;
3317 } else if (so->so_error) /* temporary udp error */
3319 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3320 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3322 else if (kn->kn_sfflags & NOTE_LOWAT)
3323 return (kn->kn_data >= kn->kn_sdata);
3325 return (kn->kn_data >= so->so_snd.sb_lowat);
3330 filt_solisten(struct knote *kn, long hint)
3332 struct socket *so = kn->kn_fp->f_data;
3334 kn->kn_data = so->so_qlen;
3335 return (!TAILQ_EMPTY(&so->so_comp));
3339 socheckuid(struct socket *so, uid_t uid)
3344 if (so->so_cred->cr_uid != uid)
3350 * These functions are used by protocols to notify the socket layer (and its
3351 * consumers) of state changes in the sockets driven by protocol-side events.
3355 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3357 * Normal sequence from the active (originating) side is that
3358 * soisconnecting() is called during processing of connect() call, resulting
3359 * in an eventual call to soisconnected() if/when the connection is
3360 * established. When the connection is torn down soisdisconnecting() is
3361 * called during processing of disconnect() call, and soisdisconnected() is
3362 * called when the connection to the peer is totally severed. The semantics
3363 * of these routines are such that connectionless protocols can call
3364 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3365 * calls when setting up a ``connection'' takes no time.
3367 * From the passive side, a socket is created with two queues of sockets:
3368 * so_incomp for connections in progress and so_comp for connections already
3369 * made and awaiting user acceptance. As a protocol is preparing incoming
3370 * connections, it creates a socket structure queued on so_incomp by calling
3371 * sonewconn(). When the connection is established, soisconnected() is
3372 * called, and transfers the socket structure to so_comp, making it available
3375 * If a socket is closed with sockets on either so_incomp or so_comp, these
3376 * sockets are dropped.
3378 * If higher-level protocols are implemented in the kernel, the wakeups done
3379 * here will sometimes cause software-interrupt process scheduling.
3382 soisconnecting(struct socket *so)
3386 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3387 so->so_state |= SS_ISCONNECTING;
3392 soisconnected(struct socket *so)
3394 struct socket *head;
3400 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3401 so->so_state |= SS_ISCONNECTED;
3403 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3404 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3406 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3408 so->so_qstate &= ~SQ_INCOMP;
3409 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3411 so->so_qstate |= SQ_COMP;
3414 wakeup_one(&head->so_timeo);
3417 soupcall_set(so, SO_RCV,
3418 head->so_accf->so_accept_filter->accf_callback,
3419 head->so_accf->so_accept_filter_arg);
3420 so->so_options &= ~SO_ACCEPTFILTER;
3421 ret = head->so_accf->so_accept_filter->accf_callback(so,
3422 head->so_accf->so_accept_filter_arg, M_NOWAIT);
3423 if (ret == SU_ISCONNECTED)
3424 soupcall_clear(so, SO_RCV);
3426 if (ret == SU_ISCONNECTED)
3433 wakeup(&so->so_timeo);
3439 soisdisconnecting(struct socket *so)
3443 * Note: This code assumes that SOCK_LOCK(so) and
3444 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3446 SOCKBUF_LOCK(&so->so_rcv);
3447 so->so_state &= ~SS_ISCONNECTING;
3448 so->so_state |= SS_ISDISCONNECTING;
3449 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3450 sorwakeup_locked(so);
3451 SOCKBUF_LOCK(&so->so_snd);
3452 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3453 sowwakeup_locked(so);
3454 wakeup(&so->so_timeo);
3458 soisdisconnected(struct socket *so)
3462 * Note: This code assumes that SOCK_LOCK(so) and
3463 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3465 SOCKBUF_LOCK(&so->so_rcv);
3466 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3467 so->so_state |= SS_ISDISCONNECTED;
3468 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3469 sorwakeup_locked(so);
3470 SOCKBUF_LOCK(&so->so_snd);
3471 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3472 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
3473 sowwakeup_locked(so);
3474 wakeup(&so->so_timeo);
3478 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3481 sodupsockaddr(const struct sockaddr *sa, int mflags)
3483 struct sockaddr *sa2;
3485 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3487 bcopy(sa, sa2, sa->sa_len);
3492 * Register per-socket buffer upcalls.
3495 soupcall_set(struct socket *so, int which,
3496 int (*func)(struct socket *, void *, int), void *arg)
3508 panic("soupcall_set: bad which");
3510 SOCKBUF_LOCK_ASSERT(sb);
3512 /* XXX: accf_http actually wants to do this on purpose. */
3513 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
3515 sb->sb_upcall = func;
3516 sb->sb_upcallarg = arg;
3517 sb->sb_flags |= SB_UPCALL;
3521 soupcall_clear(struct socket *so, int which)
3533 panic("soupcall_clear: bad which");
3535 SOCKBUF_LOCK_ASSERT(sb);
3536 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
3537 sb->sb_upcall = NULL;
3538 sb->sb_upcallarg = NULL;
3539 sb->sb_flags &= ~SB_UPCALL;
3543 * Create an external-format (``xsocket'') structure using the information in
3544 * the kernel-format socket structure pointed to by so. This is done to
3545 * reduce the spew of irrelevant information over this interface, to isolate
3546 * user code from changes in the kernel structure, and potentially to provide
3547 * information-hiding if we decide that some of this information should be
3548 * hidden from users.
3551 sotoxsocket(struct socket *so, struct xsocket *xso)
3554 xso->xso_len = sizeof *xso;
3556 xso->so_type = so->so_type;
3557 xso->so_options = so->so_options;
3558 xso->so_linger = so->so_linger;
3559 xso->so_state = so->so_state;
3560 xso->so_pcb = so->so_pcb;
3561 xso->xso_protocol = so->so_proto->pr_protocol;
3562 xso->xso_family = so->so_proto->pr_domain->dom_family;
3563 xso->so_qlen = so->so_qlen;
3564 xso->so_incqlen = so->so_incqlen;
3565 xso->so_qlimit = so->so_qlimit;
3566 xso->so_timeo = so->so_timeo;
3567 xso->so_error = so->so_error;
3568 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3569 xso->so_oobmark = so->so_oobmark;
3570 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3571 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3572 xso->so_uid = so->so_cred->cr_uid;
3577 * Socket accessor functions to provide external consumers with
3578 * a safe interface to socket state
3583 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *),
3587 TAILQ_FOREACH(so, &so->so_comp, so_list)
3592 so_sockbuf_rcv(struct socket *so)
3595 return (&so->so_rcv);
3599 so_sockbuf_snd(struct socket *so)
3602 return (&so->so_snd);
3606 so_state_get(const struct socket *so)
3609 return (so->so_state);
3613 so_state_set(struct socket *so, int val)
3620 so_options_get(const struct socket *so)
3623 return (so->so_options);
3627 so_options_set(struct socket *so, int val)
3630 so->so_options = val;
3634 so_error_get(const struct socket *so)
3637 return (so->so_error);
3641 so_error_set(struct socket *so, int val)
3648 so_linger_get(const struct socket *so)
3651 return (so->so_linger);
3655 so_linger_set(struct socket *so, int val)
3658 so->so_linger = val;
3662 so_protosw_get(const struct socket *so)
3665 return (so->so_proto);
3669 so_protosw_set(struct socket *so, struct protosw *val)
3676 so_sorwakeup(struct socket *so)
3683 so_sowwakeup(struct socket *so)
3690 so_sorwakeup_locked(struct socket *so)
3693 sorwakeup_locked(so);
3697 so_sowwakeup_locked(struct socket *so)
3700 sowwakeup_locked(so);
3704 so_lock(struct socket *so)
3711 so_unlock(struct socket *so)