2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
36 * Comments on the socket life cycle:
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
103 #include <sys/cdefs.h>
104 __FBSDID("$FreeBSD$");
106 #include "opt_inet.h"
107 #include "opt_inet6.h"
108 #include "opt_compat.h"
109 #include "opt_sctp.h"
111 #include <sys/param.h>
112 #include <sys/systm.h>
113 #include <sys/fcntl.h>
114 #include <sys/limits.h>
115 #include <sys/lock.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/domain.h>
121 #include <sys/file.h> /* for struct knote */
122 #include <sys/hhook.h>
123 #include <sys/kernel.h>
124 #include <sys/khelp.h>
125 #include <sys/event.h>
126 #include <sys/eventhandler.h>
127 #include <sys/poll.h>
128 #include <sys/proc.h>
129 #include <sys/protosw.h>
130 #include <sys/socket.h>
131 #include <sys/socketvar.h>
132 #include <sys/resourcevar.h>
133 #include <net/route.h>
134 #include <sys/signalvar.h>
135 #include <sys/stat.h>
137 #include <sys/sysctl.h>
138 #include <sys/taskqueue.h>
140 #include <sys/jail.h>
141 #include <sys/syslog.h>
142 #include <netinet/in.h>
144 #include <net/vnet.h>
146 #include <security/mac/mac_framework.h>
150 #ifdef COMPAT_FREEBSD32
151 #include <sys/mount.h>
152 #include <sys/sysent.h>
153 #include <compat/freebsd32/freebsd32.h>
156 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
158 static void so_rdknl_lock(void *);
159 static void so_rdknl_unlock(void *);
160 static void so_rdknl_assert_locked(void *);
161 static void so_rdknl_assert_unlocked(void *);
162 static void so_wrknl_lock(void *);
163 static void so_wrknl_unlock(void *);
164 static void so_wrknl_assert_locked(void *);
165 static void so_wrknl_assert_unlocked(void *);
167 static void filt_sordetach(struct knote *kn);
168 static int filt_soread(struct knote *kn, long hint);
169 static void filt_sowdetach(struct knote *kn);
170 static int filt_sowrite(struct knote *kn, long hint);
171 static int filt_soempty(struct knote *kn, long hint);
172 static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
173 fo_kqfilter_t soo_kqfilter;
175 static struct filterops soread_filtops = {
177 .f_detach = filt_sordetach,
178 .f_event = filt_soread,
180 static struct filterops sowrite_filtops = {
182 .f_detach = filt_sowdetach,
183 .f_event = filt_sowrite,
185 static struct filterops soempty_filtops = {
187 .f_detach = filt_sowdetach,
188 .f_event = filt_soempty,
191 so_gen_t so_gencnt; /* generation count for sockets */
193 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
194 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
196 #define VNET_SO_ASSERT(so) \
197 VNET_ASSERT(curvnet != NULL, \
198 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
200 VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
201 #define V_socket_hhh VNET(socket_hhh)
204 * Limit on the number of connections in the listen queue waiting
206 * NB: The original sysctl somaxconn is still available but hidden
207 * to prevent confusion about the actual purpose of this number.
209 static u_int somaxconn = SOMAXCONN;
212 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
218 error = sysctl_handle_int(oidp, &val, 0, req);
219 if (error || !req->newptr )
223 * The purpose of the UINT_MAX / 3 limit, is so that the formula
225 * below, will not overflow.
228 if (val < 1 || val > UINT_MAX / 3)
234 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW,
235 0, sizeof(int), sysctl_somaxconn, "I",
236 "Maximum listen socket pending connection accept queue size");
237 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
238 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP,
239 0, sizeof(int), sysctl_somaxconn, "I",
240 "Maximum listen socket pending connection accept queue size (compat)");
242 static int numopensockets;
243 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
244 &numopensockets, 0, "Number of open sockets");
247 * accept_mtx locks down per-socket fields relating to accept queues. See
248 * socketvar.h for an annotation of the protected fields of struct socket.
250 struct mtx accept_mtx;
251 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
254 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
257 static struct mtx so_global_mtx;
258 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
261 * General IPC sysctl name space, used by sockets and a variety of other IPC
264 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
267 * Initialize the socket subsystem and set up the socket
270 static uma_zone_t socket_zone;
274 socket_zone_change(void *tag)
277 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
281 socket_hhook_register(int subtype)
284 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
285 &V_socket_hhh[subtype],
286 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
287 printf("%s: WARNING: unable to register hook\n", __func__);
291 socket_hhook_deregister(int subtype)
294 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
295 printf("%s: WARNING: unable to deregister hook\n", __func__);
299 socket_init(void *tag)
302 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
303 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
304 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
305 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
306 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
307 EVENTHANDLER_PRI_FIRST);
309 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
312 socket_vnet_init(const void *unused __unused)
316 /* We expect a contiguous range */
317 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
318 socket_hhook_register(i);
320 VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
321 socket_vnet_init, NULL);
324 socket_vnet_uninit(const void *unused __unused)
328 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
329 socket_hhook_deregister(i);
331 VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
332 socket_vnet_uninit, NULL);
335 * Initialise maxsockets. This SYSINIT must be run after
339 init_maxsockets(void *ignored)
342 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
343 maxsockets = imax(maxsockets, maxfiles);
345 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
348 * Sysctl to get and set the maximum global sockets limit. Notify protocols
349 * of the change so that they can update their dependent limits as required.
352 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
354 int error, newmaxsockets;
356 newmaxsockets = maxsockets;
357 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
358 if (error == 0 && req->newptr) {
359 if (newmaxsockets > maxsockets &&
360 newmaxsockets <= maxfiles) {
361 maxsockets = newmaxsockets;
362 EVENTHANDLER_INVOKE(maxsockets_change);
368 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
369 &maxsockets, 0, sysctl_maxsockets, "IU",
370 "Maximum number of sockets available");
373 * Socket operation routines. These routines are called by the routines in
374 * sys_socket.c or from a system process, and implement the semantics of
375 * socket operations by switching out to the protocol specific routines.
379 * Get a socket structure from our zone, and initialize it. Note that it
380 * would probably be better to allocate socket and PCB at the same time, but
381 * I'm not convinced that all the protocols can be easily modified to do
384 * soalloc() returns a socket with a ref count of 0.
386 static struct socket *
387 soalloc(struct vnet *vnet)
391 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
395 if (mac_socket_init(so, M_NOWAIT) != 0) {
396 uma_zfree(socket_zone, so);
400 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
401 uma_zfree(socket_zone, so);
406 * The socket locking protocol allows to lock 2 sockets at a time,
407 * however, the first one must be a listening socket. WITNESS lacks
408 * a feature to change class of an existing lock, so we use DUPOK.
410 mtx_init(&so->so_lock, "socket", NULL, MTX_DEF | MTX_DUPOK);
411 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
412 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
413 so->so_rcv.sb_sel = &so->so_rdsel;
414 so->so_snd.sb_sel = &so->so_wrsel;
415 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
416 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
417 TAILQ_INIT(&so->so_snd.sb_aiojobq);
418 TAILQ_INIT(&so->so_rcv.sb_aiojobq);
419 TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so);
420 TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so);
422 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
423 __func__, __LINE__, so));
426 /* We shouldn't need the so_global_mtx */
427 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
428 /* Do we need more comprehensive error returns? */
429 uma_zfree(socket_zone, so);
432 mtx_lock(&so_global_mtx);
433 so->so_gencnt = ++so_gencnt;
436 vnet->vnet_sockcnt++;
438 mtx_unlock(&so_global_mtx);
444 * Free the storage associated with a socket at the socket layer, tear down
445 * locks, labels, etc. All protocol state is assumed already to have been
446 * torn down (and possibly never set up) by the caller.
449 sodealloc(struct socket *so)
452 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
453 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
455 mtx_lock(&so_global_mtx);
456 so->so_gencnt = ++so_gencnt;
457 --numopensockets; /* Could be below, but faster here. */
459 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
460 __func__, __LINE__, so));
461 so->so_vnet->vnet_sockcnt--;
463 mtx_unlock(&so_global_mtx);
465 mac_socket_destroy(so);
467 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
470 khelp_destroy_osd(&so->osd);
471 if (SOLISTENING(so)) {
472 if (so->sol_accept_filter != NULL)
473 accept_filt_setopt(so, NULL);
475 if (so->so_rcv.sb_hiwat)
476 (void)chgsbsize(so->so_cred->cr_uidinfo,
477 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
478 if (so->so_snd.sb_hiwat)
479 (void)chgsbsize(so->so_cred->cr_uidinfo,
480 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
481 sx_destroy(&so->so_snd.sb_sx);
482 sx_destroy(&so->so_rcv.sb_sx);
483 SOCKBUF_LOCK_DESTROY(&so->so_snd);
484 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
486 mtx_destroy(&so->so_lock);
487 uma_zfree(socket_zone, so);
491 * socreate returns a socket with a ref count of 1. The socket should be
492 * closed with soclose().
495 socreate(int dom, struct socket **aso, int type, int proto,
496 struct ucred *cred, struct thread *td)
503 prp = pffindproto(dom, proto, type);
505 prp = pffindtype(dom, type);
508 /* No support for domain. */
509 if (pffinddomain(dom) == NULL)
510 return (EAFNOSUPPORT);
511 /* No support for socket type. */
512 if (proto == 0 && type != 0)
514 return (EPROTONOSUPPORT);
516 if (prp->pr_usrreqs->pru_attach == NULL ||
517 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
518 return (EPROTONOSUPPORT);
520 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
521 return (EPROTONOSUPPORT);
523 if (prp->pr_type != type)
525 so = soalloc(CRED_TO_VNET(cred));
530 so->so_cred = crhold(cred);
531 if ((prp->pr_domain->dom_family == PF_INET) ||
532 (prp->pr_domain->dom_family == PF_INET6) ||
533 (prp->pr_domain->dom_family == PF_ROUTE))
534 so->so_fibnum = td->td_proc->p_fibnum;
539 mac_socket_create(cred, so);
541 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
542 so_rdknl_assert_locked, so_rdknl_assert_unlocked);
543 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
544 so_wrknl_assert_locked, so_wrknl_assert_unlocked);
546 * Auto-sizing of socket buffers is managed by the protocols and
547 * the appropriate flags must be set in the pru_attach function.
549 CURVNET_SET(so->so_vnet);
550 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
562 static int regression_sonewconn_earlytest = 1;
563 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
564 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
568 * When an attempt at a new connection is noted on a socket which accepts
569 * connections, sonewconn is called. If the connection is possible (subject
570 * to space constraints, etc.) then we allocate a new structure, properly
571 * linked into the data structure of the original socket, and return this.
572 * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED.
574 * Note: the ref count on the socket is 0 on return.
577 sonewconn(struct socket *head, int connstatus)
579 static struct timeval lastover;
580 static struct timeval overinterval = { 60, 0 };
581 static int overcount;
587 over = (head->sol_qlen > 3 * head->sol_qlimit / 2);
588 SOLISTEN_UNLOCK(head);
590 if (regression_sonewconn_earlytest && over) {
596 if (ratecheck(&lastover, &overinterval)) {
597 log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: "
598 "%i already in queue awaiting acceptance "
599 "(%d occurrences)\n",
600 __func__, head->so_pcb, head->sol_qlen, overcount);
607 VNET_ASSERT(head->so_vnet != NULL, ("%s: so %p vnet is NULL",
609 so = soalloc(head->so_vnet);
611 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
612 "limit reached or out of memory\n",
613 __func__, head->so_pcb);
616 so->so_listen = head;
617 so->so_type = head->so_type;
618 so->so_linger = head->so_linger;
619 so->so_state = head->so_state | SS_NOFDREF;
620 so->so_fibnum = head->so_fibnum;
621 so->so_proto = head->so_proto;
622 so->so_cred = crhold(head->so_cred);
624 mac_socket_newconn(head, so);
626 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
627 so_rdknl_assert_locked, so_rdknl_assert_unlocked);
628 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
629 so_wrknl_assert_locked, so_wrknl_assert_unlocked);
630 VNET_SO_ASSERT(head);
631 if (soreserve(so, head->sol_sbsnd_hiwat, head->sol_sbrcv_hiwat)) {
633 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
634 __func__, head->so_pcb);
637 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
639 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
640 __func__, head->so_pcb);
643 so->so_rcv.sb_lowat = head->sol_sbrcv_lowat;
644 so->so_snd.sb_lowat = head->sol_sbsnd_lowat;
645 so->so_rcv.sb_timeo = head->sol_sbrcv_timeo;
646 so->so_snd.sb_timeo = head->sol_sbsnd_timeo;
647 so->so_rcv.sb_flags |= head->sol_sbrcv_flags & SB_AUTOSIZE;
648 so->so_snd.sb_flags |= head->sol_sbsnd_flags & SB_AUTOSIZE;
651 if (head->sol_accept_filter != NULL)
653 so->so_state |= connstatus;
654 so->so_options = head->so_options & ~SO_ACCEPTCONN;
655 soref(head); /* A socket on (in)complete queue refs head. */
657 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
658 so->so_qstate = SQ_COMP;
660 solisten_wakeup(head); /* unlocks */
663 * Keep removing sockets from the head until there's room for
664 * us to insert on the tail. In pre-locking revisions, this
665 * was a simple if(), but as we could be racing with other
666 * threads and soabort() requires dropping locks, we must
667 * loop waiting for the condition to be true.
669 while (head->sol_incqlen > head->sol_qlimit) {
672 sp = TAILQ_FIRST(&head->sol_incomp);
673 TAILQ_REMOVE(&head->sol_incomp, sp, so_list);
676 sp->so_qstate = SQ_NONE;
677 sp->so_listen = NULL;
679 sorele(head); /* does SOLISTEN_UNLOCK, head stays */
683 TAILQ_INSERT_TAIL(&head->sol_incomp, so, so_list);
684 so->so_qstate = SQ_INCOMP;
686 SOLISTEN_UNLOCK(head);
693 * Socket part of sctp_peeloff(). Detach a new socket from an
694 * association. The new socket is returned with a reference.
697 sopeeloff(struct socket *head)
701 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
702 __func__, __LINE__, head));
703 so = soalloc(head->so_vnet);
705 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
706 "limit reached or out of memory\n",
707 __func__, head->so_pcb);
710 so->so_type = head->so_type;
711 so->so_options = head->so_options;
712 so->so_linger = head->so_linger;
713 so->so_state = (head->so_state & SS_NBIO) | SS_ISCONNECTED;
714 so->so_fibnum = head->so_fibnum;
715 so->so_proto = head->so_proto;
716 so->so_cred = crhold(head->so_cred);
718 mac_socket_newconn(head, so);
720 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
721 so_rdknl_assert_locked, so_rdknl_assert_unlocked);
722 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
723 so_wrknl_assert_locked, so_wrknl_assert_unlocked);
724 VNET_SO_ASSERT(head);
725 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
727 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
728 __func__, head->so_pcb);
731 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
733 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
734 __func__, head->so_pcb);
737 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
738 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
739 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
740 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
741 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
742 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
751 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
755 CURVNET_SET(so->so_vnet);
756 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
762 sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
766 CURVNET_SET(so->so_vnet);
767 error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td);
773 * solisten() transitions a socket from a non-listening state to a listening
774 * state, but can also be used to update the listen queue depth on an
775 * existing listen socket. The protocol will call back into the sockets
776 * layer using solisten_proto_check() and solisten_proto() to check and set
777 * socket-layer listen state. Call backs are used so that the protocol can
778 * acquire both protocol and socket layer locks in whatever order is required
781 * Protocol implementors are advised to hold the socket lock across the
782 * socket-layer test and set to avoid races at the socket layer.
785 solisten(struct socket *so, int backlog, struct thread *td)
789 CURVNET_SET(so->so_vnet);
790 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
796 solisten_proto_check(struct socket *so)
799 SOCK_LOCK_ASSERT(so);
801 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
808 solisten_proto(struct socket *so, int backlog)
810 int sbrcv_lowat, sbsnd_lowat;
811 u_int sbrcv_hiwat, sbsnd_hiwat;
812 short sbrcv_flags, sbsnd_flags;
813 sbintime_t sbrcv_timeo, sbsnd_timeo;
815 SOCK_LOCK_ASSERT(so);
821 * Change this socket to listening state.
823 sbrcv_lowat = so->so_rcv.sb_lowat;
824 sbsnd_lowat = so->so_snd.sb_lowat;
825 sbrcv_hiwat = so->so_rcv.sb_hiwat;
826 sbsnd_hiwat = so->so_snd.sb_hiwat;
827 sbrcv_flags = so->so_rcv.sb_flags;
828 sbsnd_flags = so->so_snd.sb_flags;
829 sbrcv_timeo = so->so_rcv.sb_timeo;
830 sbsnd_timeo = so->so_snd.sb_timeo;
832 sbdestroy(&so->so_snd, so);
833 sbdestroy(&so->so_rcv, so);
834 sx_destroy(&so->so_snd.sb_sx);
835 sx_destroy(&so->so_rcv.sb_sx);
836 SOCKBUF_LOCK_DESTROY(&so->so_snd);
837 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
841 sizeof(struct socket) - offsetof(struct socket, so_rcv));
844 so->sol_sbrcv_lowat = sbrcv_lowat;
845 so->sol_sbsnd_lowat = sbsnd_lowat;
846 so->sol_sbrcv_hiwat = sbrcv_hiwat;
847 so->sol_sbsnd_hiwat = sbsnd_hiwat;
848 so->sol_sbrcv_flags = sbrcv_flags;
849 so->sol_sbsnd_flags = sbsnd_flags;
850 so->sol_sbrcv_timeo = sbrcv_timeo;
851 so->sol_sbsnd_timeo = sbsnd_timeo;
853 so->sol_qlen = so->sol_incqlen = 0;
854 TAILQ_INIT(&so->sol_incomp);
855 TAILQ_INIT(&so->sol_comp);
857 so->sol_accept_filter = NULL;
858 so->sol_accept_filter_arg = NULL;
859 so->sol_accept_filter_str = NULL;
861 so->sol_upcall = NULL;
862 so->sol_upcallarg = NULL;
864 so->so_options |= SO_ACCEPTCONN;
867 if (backlog < 0 || backlog > somaxconn)
869 so->sol_qlimit = backlog;
873 * Wakeup listeners/subsystems once we have a complete connection.
874 * Enters with lock, returns unlocked.
877 solisten_wakeup(struct socket *sol)
880 if (sol->sol_upcall != NULL)
881 (void )sol->sol_upcall(sol, sol->sol_upcallarg, M_NOWAIT);
883 selwakeuppri(&sol->so_rdsel, PSOCK);
884 KNOTE_LOCKED(&sol->so_rdsel.si_note, 0);
886 SOLISTEN_UNLOCK(sol);
887 wakeup_one(&sol->sol_comp);
891 * Return single connection off a listening socket queue. Main consumer of
892 * the function is kern_accept4(). Some modules, that do their own accept
893 * management also use the function.
895 * Listening socket must be locked on entry and is returned unlocked on
897 * The flags argument is set of accept4(2) flags and ACCEPT4_INHERIT.
900 solisten_dequeue(struct socket *head, struct socket **ret, int flags)
905 SOLISTEN_LOCK_ASSERT(head);
907 while (!(head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp) &&
908 head->so_error == 0) {
909 error = msleep(&head->sol_comp, &head->so_lock, PSOCK | PCATCH,
912 SOLISTEN_UNLOCK(head);
916 if (head->so_error) {
917 error = head->so_error;
919 SOLISTEN_UNLOCK(head);
922 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp)) {
923 SOLISTEN_UNLOCK(head);
924 return (EWOULDBLOCK);
926 so = TAILQ_FIRST(&head->sol_comp);
928 KASSERT(so->so_qstate == SQ_COMP,
929 ("%s: so %p not SQ_COMP", __func__, so));
932 so->so_qstate = SQ_NONE;
933 so->so_listen = NULL;
934 TAILQ_REMOVE(&head->sol_comp, so, so_list);
935 if (flags & ACCEPT4_INHERIT)
936 so->so_state |= (head->so_state & SS_NBIO);
938 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
947 * Evaluate the reference count and named references on a socket; if no
948 * references remain, free it. This should be called whenever a reference is
949 * released, such as in sorele(), but also when named reference flags are
950 * cleared in socket or protocol code.
952 * sofree() will free the socket if:
954 * - There are no outstanding file descriptor references or related consumers
957 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
959 * - The protocol does not have an outstanding strong reference on the socket
962 * - The socket is not in a completed connection queue, so a process has been
963 * notified that it is present. If it is removed, the user process may
964 * block in accept() despite select() saying the socket was ready.
967 sofree(struct socket *so)
969 struct protosw *pr = so->so_proto;
971 SOCK_LOCK_ASSERT(so);
973 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
974 (so->so_state & SS_PROTOREF) || (so->so_qstate == SQ_COMP)) {
979 if (!SOLISTENING(so) && so->so_qstate == SQ_INCOMP) {
983 KASSERT(sol, ("%s: so %p on incomp of NULL", __func__, so));
986 * To solve race between close of a listening socket and
987 * a socket on its incomplete queue, we need to lock both.
988 * The order is first listening socket, then regular.
989 * Since we don't have SS_NOFDREF neither SS_PROTOREF, this
990 * function and the listening socket are the only pointers
991 * to so. To preserve so and sol, we reference both and then
993 * After relock the socket may not move to so_comp since it
994 * doesn't have PCB already, but it may be removed from
995 * so_incomp. If that happens, we share responsiblity on
996 * freeing the socket, but soclose() has already removed
1004 if (so->so_qstate == SQ_INCOMP) {
1005 KASSERT(so->so_listen == sol,
1006 ("%s: so %p migrated out of sol %p",
1007 __func__, so, sol));
1008 TAILQ_REMOVE(&sol->sol_incomp, so, so_list);
1010 /* This is guarenteed not to be the last. */
1011 refcount_release(&sol->so_count);
1012 so->so_qstate = SQ_NONE;
1013 so->so_listen = NULL;
1015 KASSERT(so->so_listen == NULL,
1016 ("%s: so %p not on (in)comp with so_listen",
1019 KASSERT(so->so_count == 1,
1020 ("%s: so %p count %u", __func__, so, so->so_count));
1023 if (SOLISTENING(so))
1024 so->so_error = ECONNABORTED;
1028 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
1029 (*pr->pr_domain->dom_dispose)(so);
1030 if (pr->pr_usrreqs->pru_detach != NULL)
1031 (*pr->pr_usrreqs->pru_detach)(so);
1034 * From this point on, we assume that no other references to this
1035 * socket exist anywhere else in the stack. Therefore, no locks need
1036 * to be acquired or held.
1038 * We used to do a lot of socket buffer and socket locking here, as
1039 * well as invoke sorflush() and perform wakeups. The direct call to
1040 * dom_dispose() and sbrelease_internal() are an inlining of what was
1041 * necessary from sorflush().
1043 * Notice that the socket buffer and kqueue state are torn down
1044 * before calling pru_detach. This means that protocols shold not
1045 * assume they can perform socket wakeups, etc, in their detach code.
1047 if (!SOLISTENING(so)) {
1048 sbdestroy(&so->so_snd, so);
1049 sbdestroy(&so->so_rcv, so);
1051 seldrain(&so->so_rdsel);
1052 seldrain(&so->so_wrsel);
1053 knlist_destroy(&so->so_rdsel.si_note);
1054 knlist_destroy(&so->so_wrsel.si_note);
1059 * Close a socket on last file table reference removal. Initiate disconnect
1060 * if connected. Free socket when disconnect complete.
1062 * This function will sorele() the socket. Note that soclose() may be called
1063 * prior to the ref count reaching zero. The actual socket structure will
1064 * not be freed until the ref count reaches zero.
1067 soclose(struct socket *so)
1069 struct accept_queue lqueue;
1073 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
1075 CURVNET_SET(so->so_vnet);
1076 funsetown(&so->so_sigio);
1077 if (so->so_state & SS_ISCONNECTED) {
1078 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
1079 error = sodisconnect(so);
1081 if (error == ENOTCONN)
1086 if (so->so_options & SO_LINGER) {
1087 if ((so->so_state & SS_ISDISCONNECTING) &&
1088 (so->so_state & SS_NBIO))
1090 while (so->so_state & SS_ISCONNECTED) {
1091 error = tsleep(&so->so_timeo,
1092 PSOCK | PCATCH, "soclos",
1093 so->so_linger * hz);
1101 if (so->so_proto->pr_usrreqs->pru_close != NULL)
1102 (*so->so_proto->pr_usrreqs->pru_close)(so);
1105 if ((listening = (so->so_options & SO_ACCEPTCONN))) {
1108 TAILQ_INIT(&lqueue);
1109 TAILQ_SWAP(&lqueue, &so->sol_incomp, socket, so_list);
1110 TAILQ_CONCAT(&lqueue, &so->sol_comp, so_list);
1112 so->sol_qlen = so->sol_incqlen = 0;
1114 TAILQ_FOREACH(sp, &lqueue, so_list) {
1116 sp->so_qstate = SQ_NONE;
1117 sp->so_listen = NULL;
1119 /* Guaranteed not to be the last. */
1120 refcount_release(&so->so_count);
1123 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
1124 so->so_state |= SS_NOFDREF;
1129 TAILQ_FOREACH(sp, &lqueue, so_list) {
1131 if (sp->so_count == 0) {
1135 /* sp is now in sofree() */
1144 * soabort() is used to abruptly tear down a connection, such as when a
1145 * resource limit is reached (listen queue depth exceeded), or if a listen
1146 * socket is closed while there are sockets waiting to be accepted.
1148 * This interface is tricky, because it is called on an unreferenced socket,
1149 * and must be called only by a thread that has actually removed the socket
1150 * from the listen queue it was on, or races with other threads are risked.
1152 * This interface will call into the protocol code, so must not be called
1153 * with any socket locks held. Protocols do call it while holding their own
1154 * recursible protocol mutexes, but this is something that should be subject
1155 * to review in the future.
1158 soabort(struct socket *so)
1162 * In as much as is possible, assert that no references to this
1163 * socket are held. This is not quite the same as asserting that the
1164 * current thread is responsible for arranging for no references, but
1165 * is as close as we can get for now.
1167 KASSERT(so->so_count == 0, ("soabort: so_count"));
1168 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
1169 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
1170 KASSERT(so->so_qstate == SQ_NONE, ("soabort: !SQ_NONE"));
1173 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
1174 (*so->so_proto->pr_usrreqs->pru_abort)(so);
1180 soaccept(struct socket *so, struct sockaddr **nam)
1185 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
1186 so->so_state &= ~SS_NOFDREF;
1189 CURVNET_SET(so->so_vnet);
1190 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
1196 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
1199 return (soconnectat(AT_FDCWD, so, nam, td));
1203 soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
1207 if (so->so_options & SO_ACCEPTCONN)
1208 return (EOPNOTSUPP);
1210 CURVNET_SET(so->so_vnet);
1212 * If protocol is connection-based, can only connect once.
1213 * Otherwise, if connected, try to disconnect first. This allows
1214 * user to disconnect by connecting to, e.g., a null address.
1216 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
1217 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
1218 (error = sodisconnect(so)))) {
1222 * Prevent accumulated error from previous connection from
1226 if (fd == AT_FDCWD) {
1227 error = (*so->so_proto->pr_usrreqs->pru_connect)(so,
1230 error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd,
1240 soconnect2(struct socket *so1, struct socket *so2)
1244 CURVNET_SET(so1->so_vnet);
1245 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
1251 sodisconnect(struct socket *so)
1255 if ((so->so_state & SS_ISCONNECTED) == 0)
1257 if (so->so_state & SS_ISDISCONNECTING)
1260 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
1264 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1267 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1268 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1272 int clen = 0, error, dontroute;
1274 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1275 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1276 ("sosend_dgram: !PR_ATOMIC"));
1279 resid = uio->uio_resid;
1281 resid = top->m_pkthdr.len;
1283 * In theory resid should be unsigned. However, space must be
1284 * signed, as it might be less than 0 if we over-committed, and we
1285 * must use a signed comparison of space and resid. On the other
1286 * hand, a negative resid causes us to loop sending 0-length
1287 * segments to the protocol.
1295 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1297 td->td_ru.ru_msgsnd++;
1298 if (control != NULL)
1299 clen = control->m_len;
1301 SOCKBUF_LOCK(&so->so_snd);
1302 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1303 SOCKBUF_UNLOCK(&so->so_snd);
1308 error = so->so_error;
1310 SOCKBUF_UNLOCK(&so->so_snd);
1313 if ((so->so_state & SS_ISCONNECTED) == 0) {
1315 * `sendto' and `sendmsg' is allowed on a connection-based
1316 * socket if it supports implied connect. Return ENOTCONN if
1317 * not connected and no address is supplied.
1319 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1320 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1321 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1322 !(resid == 0 && clen != 0)) {
1323 SOCKBUF_UNLOCK(&so->so_snd);
1327 } else if (addr == NULL) {
1328 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1331 error = EDESTADDRREQ;
1332 SOCKBUF_UNLOCK(&so->so_snd);
1338 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1339 * problem and need fixing.
1341 space = sbspace(&so->so_snd);
1342 if (flags & MSG_OOB)
1345 SOCKBUF_UNLOCK(&so->so_snd);
1346 if (resid > space) {
1352 if (flags & MSG_EOR)
1353 top->m_flags |= M_EOR;
1356 * Copy the data from userland into a mbuf chain.
1357 * If no data is to be copied in, a single empty mbuf
1360 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1361 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1363 error = EFAULT; /* only possible error */
1366 space -= resid - uio->uio_resid;
1367 resid = uio->uio_resid;
1369 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1371 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1376 so->so_options |= SO_DONTROUTE;
1380 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1381 * of date. We could have received a reset packet in an interrupt or
1382 * maybe we slept while doing page faults in uiomove() etc. We could
1383 * probably recheck again inside the locking protection here, but
1384 * there are probably other places that this also happens. We must
1388 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1389 (flags & MSG_OOB) ? PRUS_OOB :
1391 * If the user set MSG_EOF, the protocol understands this flag and
1392 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1394 ((flags & MSG_EOF) &&
1395 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1398 /* If there is more to send set PRUS_MORETOCOME */
1399 (flags & MSG_MORETOCOME) ||
1400 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1401 top, addr, control, td);
1404 so->so_options &= ~SO_DONTROUTE;
1413 if (control != NULL)
1419 * Send on a socket. If send must go all at once and message is larger than
1420 * send buffering, then hard error. Lock against other senders. If must go
1421 * all at once and not enough room now, then inform user that this would
1422 * block and do nothing. Otherwise, if nonblocking, send as much as
1423 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1424 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1425 * in mbuf chain must be small enough to send all at once.
1427 * Returns nonzero on error, timeout or signal; callers must check for short
1428 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1432 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1433 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1437 int clen = 0, error, dontroute;
1438 int atomic = sosendallatonce(so) || top;
1441 resid = uio->uio_resid;
1443 resid = top->m_pkthdr.len;
1445 * In theory resid should be unsigned. However, space must be
1446 * signed, as it might be less than 0 if we over-committed, and we
1447 * must use a signed comparison of space and resid. On the other
1448 * hand, a negative resid causes us to loop sending 0-length
1449 * segments to the protocol.
1451 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1452 * type sockets since that's an error.
1454 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1460 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1461 (so->so_proto->pr_flags & PR_ATOMIC);
1463 td->td_ru.ru_msgsnd++;
1464 if (control != NULL)
1465 clen = control->m_len;
1467 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1473 SOCKBUF_LOCK(&so->so_snd);
1474 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1475 SOCKBUF_UNLOCK(&so->so_snd);
1480 error = so->so_error;
1482 SOCKBUF_UNLOCK(&so->so_snd);
1485 if ((so->so_state & SS_ISCONNECTED) == 0) {
1487 * `sendto' and `sendmsg' is allowed on a connection-
1488 * based socket if it supports implied connect.
1489 * Return ENOTCONN if not connected and no address is
1492 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1493 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1494 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1495 !(resid == 0 && clen != 0)) {
1496 SOCKBUF_UNLOCK(&so->so_snd);
1500 } else if (addr == NULL) {
1501 SOCKBUF_UNLOCK(&so->so_snd);
1502 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1505 error = EDESTADDRREQ;
1509 space = sbspace(&so->so_snd);
1510 if (flags & MSG_OOB)
1512 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1513 clen > so->so_snd.sb_hiwat) {
1514 SOCKBUF_UNLOCK(&so->so_snd);
1518 if (space < resid + clen &&
1519 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1520 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1521 SOCKBUF_UNLOCK(&so->so_snd);
1522 error = EWOULDBLOCK;
1525 error = sbwait(&so->so_snd);
1526 SOCKBUF_UNLOCK(&so->so_snd);
1531 SOCKBUF_UNLOCK(&so->so_snd);
1536 if (flags & MSG_EOR)
1537 top->m_flags |= M_EOR;
1540 * Copy the data from userland into a mbuf
1541 * chain. If resid is 0, which can happen
1542 * only if we have control to send, then
1543 * a single empty mbuf is returned. This
1544 * is a workaround to prevent protocol send
1547 top = m_uiotombuf(uio, M_WAITOK, space,
1548 (atomic ? max_hdr : 0),
1549 (atomic ? M_PKTHDR : 0) |
1550 ((flags & MSG_EOR) ? M_EOR : 0));
1552 error = EFAULT; /* only possible error */
1555 space -= resid - uio->uio_resid;
1556 resid = uio->uio_resid;
1560 so->so_options |= SO_DONTROUTE;
1564 * XXX all the SBS_CANTSENDMORE checks previously
1565 * done could be out of date. We could have received
1566 * a reset packet in an interrupt or maybe we slept
1567 * while doing page faults in uiomove() etc. We
1568 * could probably recheck again inside the locking
1569 * protection here, but there are probably other
1570 * places that this also happens. We must rethink
1574 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1575 (flags & MSG_OOB) ? PRUS_OOB :
1577 * If the user set MSG_EOF, the protocol understands
1578 * this flag and nothing left to send then use
1579 * PRU_SEND_EOF instead of PRU_SEND.
1581 ((flags & MSG_EOF) &&
1582 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1585 /* If there is more to send set PRUS_MORETOCOME. */
1586 (flags & MSG_MORETOCOME) ||
1587 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1588 top, addr, control, td);
1591 so->so_options &= ~SO_DONTROUTE;
1599 } while (resid && space > 0);
1603 sbunlock(&so->so_snd);
1607 if (control != NULL)
1613 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1614 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1618 CURVNET_SET(so->so_vnet);
1619 if (!SOLISTENING(so))
1620 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio,
1621 top, control, flags, td);
1632 * The part of soreceive() that implements reading non-inline out-of-band
1633 * data from a socket. For more complete comments, see soreceive(), from
1634 * which this code originated.
1636 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1637 * unable to return an mbuf chain to the caller.
1640 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1642 struct protosw *pr = so->so_proto;
1646 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1649 m = m_get(M_WAITOK, MT_DATA);
1650 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1654 error = uiomove(mtod(m, void *),
1655 (int) min(uio->uio_resid, m->m_len), uio);
1657 } while (uio->uio_resid && error == 0 && m);
1665 * Following replacement or removal of the first mbuf on the first mbuf chain
1666 * of a socket buffer, push necessary state changes back into the socket
1667 * buffer so that other consumers see the values consistently. 'nextrecord'
1668 * is the callers locally stored value of the original value of
1669 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1670 * NOTE: 'nextrecord' may be NULL.
1672 static __inline void
1673 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1676 SOCKBUF_LOCK_ASSERT(sb);
1678 * First, update for the new value of nextrecord. If necessary, make
1679 * it the first record.
1681 if (sb->sb_mb != NULL)
1682 sb->sb_mb->m_nextpkt = nextrecord;
1684 sb->sb_mb = nextrecord;
1687 * Now update any dependent socket buffer fields to reflect the new
1688 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1689 * addition of a second clause that takes care of the case where
1690 * sb_mb has been updated, but remains the last record.
1692 if (sb->sb_mb == NULL) {
1693 sb->sb_mbtail = NULL;
1694 sb->sb_lastrecord = NULL;
1695 } else if (sb->sb_mb->m_nextpkt == NULL)
1696 sb->sb_lastrecord = sb->sb_mb;
1700 * Implement receive operations on a socket. We depend on the way that
1701 * records are added to the sockbuf by sbappend. In particular, each record
1702 * (mbufs linked through m_next) must begin with an address if the protocol
1703 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1704 * data, and then zero or more mbufs of data. In order to allow parallelism
1705 * between network receive and copying to user space, as well as avoid
1706 * sleeping with a mutex held, we release the socket buffer mutex during the
1707 * user space copy. Although the sockbuf is locked, new data may still be
1708 * appended, and thus we must maintain consistency of the sockbuf during that
1711 * The caller may receive the data as a single mbuf chain by supplying an
1712 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1713 * the count in uio_resid.
1716 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1717 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1719 struct mbuf *m, **mp;
1720 int flags, error, offset;
1722 struct protosw *pr = so->so_proto;
1723 struct mbuf *nextrecord;
1725 ssize_t orig_resid = uio->uio_resid;
1730 if (controlp != NULL)
1733 flags = *flagsp &~ MSG_EOR;
1736 if (flags & MSG_OOB)
1737 return (soreceive_rcvoob(so, uio, flags));
1740 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1741 && uio->uio_resid) {
1743 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1746 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1751 SOCKBUF_LOCK(&so->so_rcv);
1752 m = so->so_rcv.sb_mb;
1754 * If we have less data than requested, block awaiting more (subject
1755 * to any timeout) if:
1756 * 1. the current count is less than the low water mark, or
1757 * 2. MSG_DONTWAIT is not set
1759 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1760 sbavail(&so->so_rcv) < uio->uio_resid) &&
1761 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
1762 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1763 KASSERT(m != NULL || !sbavail(&so->so_rcv),
1764 ("receive: m == %p sbavail == %u",
1765 m, sbavail(&so->so_rcv)));
1769 error = so->so_error;
1770 if ((flags & MSG_PEEK) == 0)
1772 SOCKBUF_UNLOCK(&so->so_rcv);
1775 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1776 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1778 SOCKBUF_UNLOCK(&so->so_rcv);
1783 for (; m != NULL; m = m->m_next)
1784 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1785 m = so->so_rcv.sb_mb;
1788 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1789 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1790 SOCKBUF_UNLOCK(&so->so_rcv);
1794 if (uio->uio_resid == 0) {
1795 SOCKBUF_UNLOCK(&so->so_rcv);
1798 if ((so->so_state & SS_NBIO) ||
1799 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1800 SOCKBUF_UNLOCK(&so->so_rcv);
1801 error = EWOULDBLOCK;
1804 SBLASTRECORDCHK(&so->so_rcv);
1805 SBLASTMBUFCHK(&so->so_rcv);
1806 error = sbwait(&so->so_rcv);
1807 SOCKBUF_UNLOCK(&so->so_rcv);
1814 * From this point onward, we maintain 'nextrecord' as a cache of the
1815 * pointer to the next record in the socket buffer. We must keep the
1816 * various socket buffer pointers and local stack versions of the
1817 * pointers in sync, pushing out modifications before dropping the
1818 * socket buffer mutex, and re-reading them when picking it up.
1820 * Otherwise, we will race with the network stack appending new data
1821 * or records onto the socket buffer by using inconsistent/stale
1822 * versions of the field, possibly resulting in socket buffer
1825 * By holding the high-level sblock(), we prevent simultaneous
1826 * readers from pulling off the front of the socket buffer.
1828 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1830 uio->uio_td->td_ru.ru_msgrcv++;
1831 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1832 SBLASTRECORDCHK(&so->so_rcv);
1833 SBLASTMBUFCHK(&so->so_rcv);
1834 nextrecord = m->m_nextpkt;
1835 if (pr->pr_flags & PR_ADDR) {
1836 KASSERT(m->m_type == MT_SONAME,
1837 ("m->m_type == %d", m->m_type));
1840 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1842 if (flags & MSG_PEEK) {
1845 sbfree(&so->so_rcv, m);
1846 so->so_rcv.sb_mb = m_free(m);
1847 m = so->so_rcv.sb_mb;
1848 sockbuf_pushsync(&so->so_rcv, nextrecord);
1853 * Process one or more MT_CONTROL mbufs present before any data mbufs
1854 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1855 * just copy the data; if !MSG_PEEK, we call into the protocol to
1856 * perform externalization (or freeing if controlp == NULL).
1858 if (m != NULL && m->m_type == MT_CONTROL) {
1859 struct mbuf *cm = NULL, *cmn;
1860 struct mbuf **cme = &cm;
1863 if (flags & MSG_PEEK) {
1864 if (controlp != NULL) {
1865 *controlp = m_copym(m, 0, m->m_len,
1867 controlp = &(*controlp)->m_next;
1871 sbfree(&so->so_rcv, m);
1872 so->so_rcv.sb_mb = m->m_next;
1875 cme = &(*cme)->m_next;
1876 m = so->so_rcv.sb_mb;
1878 } while (m != NULL && m->m_type == MT_CONTROL);
1879 if ((flags & MSG_PEEK) == 0)
1880 sockbuf_pushsync(&so->so_rcv, nextrecord);
1881 while (cm != NULL) {
1884 if (pr->pr_domain->dom_externalize != NULL) {
1885 SOCKBUF_UNLOCK(&so->so_rcv);
1887 error = (*pr->pr_domain->dom_externalize)
1888 (cm, controlp, flags);
1889 SOCKBUF_LOCK(&so->so_rcv);
1890 } else if (controlp != NULL)
1894 if (controlp != NULL) {
1896 while (*controlp != NULL)
1897 controlp = &(*controlp)->m_next;
1902 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1904 nextrecord = so->so_rcv.sb_mb;
1908 if ((flags & MSG_PEEK) == 0) {
1909 KASSERT(m->m_nextpkt == nextrecord,
1910 ("soreceive: post-control, nextrecord !sync"));
1911 if (nextrecord == NULL) {
1912 KASSERT(so->so_rcv.sb_mb == m,
1913 ("soreceive: post-control, sb_mb!=m"));
1914 KASSERT(so->so_rcv.sb_lastrecord == m,
1915 ("soreceive: post-control, lastrecord!=m"));
1919 if (type == MT_OOBDATA)
1922 if ((flags & MSG_PEEK) == 0) {
1923 KASSERT(so->so_rcv.sb_mb == nextrecord,
1924 ("soreceive: sb_mb != nextrecord"));
1925 if (so->so_rcv.sb_mb == NULL) {
1926 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1927 ("soreceive: sb_lastercord != NULL"));
1931 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1932 SBLASTRECORDCHK(&so->so_rcv);
1933 SBLASTMBUFCHK(&so->so_rcv);
1936 * Now continue to read any data mbufs off of the head of the socket
1937 * buffer until the read request is satisfied. Note that 'type' is
1938 * used to store the type of any mbuf reads that have happened so far
1939 * such that soreceive() can stop reading if the type changes, which
1940 * causes soreceive() to return only one of regular data and inline
1941 * out-of-band data in a single socket receive operation.
1945 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
1948 * If the type of mbuf has changed since the last mbuf
1949 * examined ('type'), end the receive operation.
1951 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1952 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
1953 if (type != m->m_type)
1955 } else if (type == MT_OOBDATA)
1958 KASSERT(m->m_type == MT_DATA,
1959 ("m->m_type == %d", m->m_type));
1960 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1961 len = uio->uio_resid;
1962 if (so->so_oobmark && len > so->so_oobmark - offset)
1963 len = so->so_oobmark - offset;
1964 if (len > m->m_len - moff)
1965 len = m->m_len - moff;
1967 * If mp is set, just pass back the mbufs. Otherwise copy
1968 * them out via the uio, then free. Sockbuf must be
1969 * consistent here (points to current mbuf, it points to next
1970 * record) when we drop priority; we must note any additions
1971 * to the sockbuf when we block interrupts again.
1974 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1975 SBLASTRECORDCHK(&so->so_rcv);
1976 SBLASTMBUFCHK(&so->so_rcv);
1977 SOCKBUF_UNLOCK(&so->so_rcv);
1978 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1979 SOCKBUF_LOCK(&so->so_rcv);
1982 * The MT_SONAME mbuf has already been removed
1983 * from the record, so it is necessary to
1984 * remove the data mbufs, if any, to preserve
1985 * the invariant in the case of PR_ADDR that
1986 * requires MT_SONAME mbufs at the head of
1989 if (pr->pr_flags & PR_ATOMIC &&
1990 ((flags & MSG_PEEK) == 0))
1991 (void)sbdroprecord_locked(&so->so_rcv);
1992 SOCKBUF_UNLOCK(&so->so_rcv);
1996 uio->uio_resid -= len;
1997 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1998 if (len == m->m_len - moff) {
1999 if (m->m_flags & M_EOR)
2001 if (flags & MSG_PEEK) {
2005 nextrecord = m->m_nextpkt;
2006 sbfree(&so->so_rcv, m);
2008 m->m_nextpkt = NULL;
2011 so->so_rcv.sb_mb = m = m->m_next;
2014 so->so_rcv.sb_mb = m_free(m);
2015 m = so->so_rcv.sb_mb;
2017 sockbuf_pushsync(&so->so_rcv, nextrecord);
2018 SBLASTRECORDCHK(&so->so_rcv);
2019 SBLASTMBUFCHK(&so->so_rcv);
2022 if (flags & MSG_PEEK)
2026 if (flags & MSG_DONTWAIT) {
2027 *mp = m_copym(m, 0, len,
2031 * m_copym() couldn't
2033 * Adjust uio_resid back
2035 * down by len bytes,
2036 * which we didn't end
2037 * up "copying" over).
2039 uio->uio_resid += len;
2043 SOCKBUF_UNLOCK(&so->so_rcv);
2044 *mp = m_copym(m, 0, len,
2046 SOCKBUF_LOCK(&so->so_rcv);
2049 sbcut_locked(&so->so_rcv, len);
2052 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2053 if (so->so_oobmark) {
2054 if ((flags & MSG_PEEK) == 0) {
2055 so->so_oobmark -= len;
2056 if (so->so_oobmark == 0) {
2057 so->so_rcv.sb_state |= SBS_RCVATMARK;
2062 if (offset == so->so_oobmark)
2066 if (flags & MSG_EOR)
2069 * If the MSG_WAITALL flag is set (for non-atomic socket), we
2070 * must not quit until "uio->uio_resid == 0" or an error
2071 * termination. If a signal/timeout occurs, return with a
2072 * short count but without error. Keep sockbuf locked
2073 * against other readers.
2075 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
2076 !sosendallatonce(so) && nextrecord == NULL) {
2077 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2079 so->so_rcv.sb_state & SBS_CANTRCVMORE)
2082 * Notify the protocol that some data has been
2083 * drained before blocking.
2085 if (pr->pr_flags & PR_WANTRCVD) {
2086 SOCKBUF_UNLOCK(&so->so_rcv);
2088 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
2089 SOCKBUF_LOCK(&so->so_rcv);
2091 SBLASTRECORDCHK(&so->so_rcv);
2092 SBLASTMBUFCHK(&so->so_rcv);
2094 * We could receive some data while was notifying
2095 * the protocol. Skip blocking in this case.
2097 if (so->so_rcv.sb_mb == NULL) {
2098 error = sbwait(&so->so_rcv);
2100 SOCKBUF_UNLOCK(&so->so_rcv);
2104 m = so->so_rcv.sb_mb;
2106 nextrecord = m->m_nextpkt;
2110 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2111 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
2113 if ((flags & MSG_PEEK) == 0)
2114 (void) sbdroprecord_locked(&so->so_rcv);
2116 if ((flags & MSG_PEEK) == 0) {
2119 * First part is an inline SB_EMPTY_FIXUP(). Second
2120 * part makes sure sb_lastrecord is up-to-date if
2121 * there is still data in the socket buffer.
2123 so->so_rcv.sb_mb = nextrecord;
2124 if (so->so_rcv.sb_mb == NULL) {
2125 so->so_rcv.sb_mbtail = NULL;
2126 so->so_rcv.sb_lastrecord = NULL;
2127 } else if (nextrecord->m_nextpkt == NULL)
2128 so->so_rcv.sb_lastrecord = nextrecord;
2130 SBLASTRECORDCHK(&so->so_rcv);
2131 SBLASTMBUFCHK(&so->so_rcv);
2133 * If soreceive() is being done from the socket callback,
2134 * then don't need to generate ACK to peer to update window,
2135 * since ACK will be generated on return to TCP.
2137 if (!(flags & MSG_SOCALLBCK) &&
2138 (pr->pr_flags & PR_WANTRCVD)) {
2139 SOCKBUF_UNLOCK(&so->so_rcv);
2141 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
2142 SOCKBUF_LOCK(&so->so_rcv);
2145 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2146 if (orig_resid == uio->uio_resid && orig_resid &&
2147 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
2148 SOCKBUF_UNLOCK(&so->so_rcv);
2151 SOCKBUF_UNLOCK(&so->so_rcv);
2156 sbunlock(&so->so_rcv);
2161 * Optimized version of soreceive() for stream (TCP) sockets.
2162 * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled.
2165 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
2166 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2168 int len = 0, error = 0, flags, oresid;
2170 struct mbuf *m, *n = NULL;
2172 /* We only do stream sockets. */
2173 if (so->so_type != SOCK_STREAM)
2177 if (controlp != NULL)
2180 flags = *flagsp &~ MSG_EOR;
2183 if (flags & MSG_OOB)
2184 return (soreceive_rcvoob(so, uio, flags));
2190 /* Prevent other readers from entering the socket. */
2191 error = sblock(sb, SBLOCKWAIT(flags));
2196 /* Easy one, no space to copyout anything. */
2197 if (uio->uio_resid == 0) {
2201 oresid = uio->uio_resid;
2203 /* We will never ever get anything unless we are or were connected. */
2204 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
2210 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2212 /* Abort if socket has reported problems. */
2214 if (sbavail(sb) > 0)
2216 if (oresid > uio->uio_resid)
2218 error = so->so_error;
2219 if (!(flags & MSG_PEEK))
2224 /* Door is closed. Deliver what is left, if any. */
2225 if (sb->sb_state & SBS_CANTRCVMORE) {
2226 if (sbavail(sb) > 0)
2232 /* Socket buffer is empty and we shall not block. */
2233 if (sbavail(sb) == 0 &&
2234 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
2239 /* Socket buffer got some data that we shall deliver now. */
2240 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
2241 ((so->so_state & SS_NBIO) ||
2242 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2243 sbavail(sb) >= sb->sb_lowat ||
2244 sbavail(sb) >= uio->uio_resid ||
2245 sbavail(sb) >= sb->sb_hiwat) ) {
2249 /* On MSG_WAITALL we must wait until all data or error arrives. */
2250 if ((flags & MSG_WAITALL) &&
2251 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
2255 * Wait and block until (more) data comes in.
2256 * NB: Drops the sockbuf lock during wait.
2264 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2265 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
2266 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2270 uio->uio_td->td_ru.ru_msgrcv++;
2272 /* Fill uio until full or current end of socket buffer is reached. */
2273 len = min(uio->uio_resid, sbavail(sb));
2275 /* Dequeue as many mbufs as possible. */
2276 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2280 m_cat(*mp0, sb->sb_mb);
2282 m != NULL && m->m_len <= len;
2284 KASSERT(!(m->m_flags & M_NOTAVAIL),
2285 ("%s: m %p not available", __func__, m));
2287 uio->uio_resid -= m->m_len;
2293 sb->sb_lastrecord = sb->sb_mb;
2294 if (sb->sb_mb == NULL)
2297 /* Copy the remainder. */
2299 KASSERT(sb->sb_mb != NULL,
2300 ("%s: len > 0 && sb->sb_mb empty", __func__));
2302 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2304 len = 0; /* Don't flush data from sockbuf. */
2306 uio->uio_resid -= len;
2317 /* NB: Must unlock socket buffer as uiomove may sleep. */
2319 error = m_mbuftouio(uio, sb->sb_mb, len);
2324 SBLASTRECORDCHK(sb);
2328 * Remove the delivered data from the socket buffer unless we
2329 * were only peeking.
2331 if (!(flags & MSG_PEEK)) {
2333 sbdrop_locked(sb, len);
2335 /* Notify protocol that we drained some data. */
2336 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2337 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2338 !(flags & MSG_SOCALLBCK))) {
2341 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2347 * For MSG_WAITALL we may have to loop again and wait for
2348 * more data to come in.
2350 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2353 SOCKBUF_LOCK_ASSERT(sb);
2354 SBLASTRECORDCHK(sb);
2362 * Optimized version of soreceive() for simple datagram cases from userspace.
2363 * Unlike in the stream case, we're able to drop a datagram if copyout()
2364 * fails, and because we handle datagrams atomically, we don't need to use a
2365 * sleep lock to prevent I/O interlacing.
2368 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2369 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2371 struct mbuf *m, *m2;
2374 struct protosw *pr = so->so_proto;
2375 struct mbuf *nextrecord;
2379 if (controlp != NULL)
2382 flags = *flagsp &~ MSG_EOR;
2387 * For any complicated cases, fall back to the full
2388 * soreceive_generic().
2390 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2391 return (soreceive_generic(so, psa, uio, mp0, controlp,
2395 * Enforce restrictions on use.
2397 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2398 ("soreceive_dgram: wantrcvd"));
2399 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2400 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2401 ("soreceive_dgram: SBS_RCVATMARK"));
2402 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2403 ("soreceive_dgram: P_CONNREQUIRED"));
2406 * Loop blocking while waiting for a datagram.
2408 SOCKBUF_LOCK(&so->so_rcv);
2409 while ((m = so->so_rcv.sb_mb) == NULL) {
2410 KASSERT(sbavail(&so->so_rcv) == 0,
2411 ("soreceive_dgram: sb_mb NULL but sbavail %u",
2412 sbavail(&so->so_rcv)));
2414 error = so->so_error;
2416 SOCKBUF_UNLOCK(&so->so_rcv);
2419 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2420 uio->uio_resid == 0) {
2421 SOCKBUF_UNLOCK(&so->so_rcv);
2424 if ((so->so_state & SS_NBIO) ||
2425 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2426 SOCKBUF_UNLOCK(&so->so_rcv);
2427 return (EWOULDBLOCK);
2429 SBLASTRECORDCHK(&so->so_rcv);
2430 SBLASTMBUFCHK(&so->so_rcv);
2431 error = sbwait(&so->so_rcv);
2433 SOCKBUF_UNLOCK(&so->so_rcv);
2437 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2440 uio->uio_td->td_ru.ru_msgrcv++;
2441 SBLASTRECORDCHK(&so->so_rcv);
2442 SBLASTMBUFCHK(&so->so_rcv);
2443 nextrecord = m->m_nextpkt;
2444 if (nextrecord == NULL) {
2445 KASSERT(so->so_rcv.sb_lastrecord == m,
2446 ("soreceive_dgram: lastrecord != m"));
2449 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2450 ("soreceive_dgram: m_nextpkt != nextrecord"));
2453 * Pull 'm' and its chain off the front of the packet queue.
2455 so->so_rcv.sb_mb = NULL;
2456 sockbuf_pushsync(&so->so_rcv, nextrecord);
2459 * Walk 'm's chain and free that many bytes from the socket buffer.
2461 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2462 sbfree(&so->so_rcv, m2);
2465 * Do a few last checks before we let go of the lock.
2467 SBLASTRECORDCHK(&so->so_rcv);
2468 SBLASTMBUFCHK(&so->so_rcv);
2469 SOCKBUF_UNLOCK(&so->so_rcv);
2471 if (pr->pr_flags & PR_ADDR) {
2472 KASSERT(m->m_type == MT_SONAME,
2473 ("m->m_type == %d", m->m_type));
2475 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2480 /* XXXRW: Can this happen? */
2485 * Packet to copyout() is now in 'm' and it is disconnected from the
2488 * Process one or more MT_CONTROL mbufs present before any data mbufs
2489 * in the first mbuf chain on the socket buffer. We call into the
2490 * protocol to perform externalization (or freeing if controlp ==
2491 * NULL). In some cases there can be only MT_CONTROL mbufs without
2494 if (m->m_type == MT_CONTROL) {
2495 struct mbuf *cm = NULL, *cmn;
2496 struct mbuf **cme = &cm;
2502 cme = &(*cme)->m_next;
2504 } while (m != NULL && m->m_type == MT_CONTROL);
2505 while (cm != NULL) {
2508 if (pr->pr_domain->dom_externalize != NULL) {
2509 error = (*pr->pr_domain->dom_externalize)
2510 (cm, controlp, flags);
2511 } else if (controlp != NULL)
2515 if (controlp != NULL) {
2516 while (*controlp != NULL)
2517 controlp = &(*controlp)->m_next;
2522 KASSERT(m == NULL || m->m_type == MT_DATA,
2523 ("soreceive_dgram: !data"));
2524 while (m != NULL && uio->uio_resid > 0) {
2525 len = uio->uio_resid;
2528 error = uiomove(mtod(m, char *), (int)len, uio);
2533 if (len == m->m_len)
2550 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2551 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2555 CURVNET_SET(so->so_vnet);
2556 if (!SOLISTENING(so))
2557 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio,
2558 mp0, controlp, flagsp));
2566 soshutdown(struct socket *so, int how)
2568 struct protosw *pr = so->so_proto;
2569 int error, soerror_enotconn;
2571 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2574 soerror_enotconn = 0;
2576 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
2578 * POSIX mandates us to return ENOTCONN when shutdown(2) is
2579 * invoked on a datagram sockets, however historically we would
2580 * actually tear socket down. This is known to be leveraged by
2581 * some applications to unblock process waiting in recvXXX(2)
2582 * by other process that it shares that socket with. Try to meet
2583 * both backward-compatibility and POSIX requirements by forcing
2584 * ENOTCONN but still asking protocol to perform pru_shutdown().
2586 if (so->so_type != SOCK_DGRAM)
2588 soerror_enotconn = 1;
2591 CURVNET_SET(so->so_vnet);
2592 if (pr->pr_usrreqs->pru_flush != NULL)
2593 (*pr->pr_usrreqs->pru_flush)(so, how);
2596 if (how != SHUT_RD) {
2597 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2598 wakeup(&so->so_timeo);
2600 return ((error == 0 && soerror_enotconn) ? ENOTCONN : error);
2602 wakeup(&so->so_timeo);
2605 return (soerror_enotconn ? ENOTCONN : 0);
2609 sorflush(struct socket *so)
2611 struct sockbuf *sb = &so->so_rcv;
2612 struct protosw *pr = so->so_proto;
2618 * In order to avoid calling dom_dispose with the socket buffer mutex
2619 * held, and in order to generally avoid holding the lock for a long
2620 * time, we make a copy of the socket buffer and clear the original
2621 * (except locks, state). The new socket buffer copy won't have
2622 * initialized locks so we can only call routines that won't use or
2623 * assert those locks.
2625 * Dislodge threads currently blocked in receive and wait to acquire
2626 * a lock against other simultaneous readers before clearing the
2627 * socket buffer. Don't let our acquire be interrupted by a signal
2628 * despite any existing socket disposition on interruptable waiting.
2631 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2634 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2635 * and mutex data unchanged.
2638 bzero(&aso, sizeof(aso));
2639 aso.so_pcb = so->so_pcb;
2640 bcopy(&sb->sb_startzero, &aso.so_rcv.sb_startzero,
2641 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2642 bzero(&sb->sb_startzero,
2643 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2648 * Dispose of special rights and flush the copied socket. Don't call
2649 * any unsafe routines (that rely on locks being initialized) on aso.
2651 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2652 (*pr->pr_domain->dom_dispose)(&aso);
2653 sbrelease_internal(&aso.so_rcv, so);
2657 * Wrapper for Socket established helper hook.
2658 * Parameters: socket, context of the hook point, hook id.
2661 hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
2663 struct socket_hhook_data hhook_data = {
2670 CURVNET_SET(so->so_vnet);
2671 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
2674 /* Ugly but needed, since hhooks return void for now */
2675 return (hhook_data.status);
2679 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2680 * additional variant to handle the case where the option value needs to be
2681 * some kind of integer, but not a specific size. In addition to their use
2682 * here, these functions are also called by the protocol-level pr_ctloutput()
2686 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2691 * If the user gives us more than we wanted, we ignore it, but if we
2692 * don't get the minimum length the caller wants, we return EINVAL.
2693 * On success, sopt->sopt_valsize is set to however much we actually
2696 if ((valsize = sopt->sopt_valsize) < minlen)
2699 sopt->sopt_valsize = valsize = len;
2701 if (sopt->sopt_td != NULL)
2702 return (copyin(sopt->sopt_val, buf, valsize));
2704 bcopy(sopt->sopt_val, buf, valsize);
2709 * Kernel version of setsockopt(2).
2711 * XXX: optlen is size_t, not socklen_t
2714 so_setsockopt(struct socket *so, int level, int optname, void *optval,
2717 struct sockopt sopt;
2719 sopt.sopt_level = level;
2720 sopt.sopt_name = optname;
2721 sopt.sopt_dir = SOPT_SET;
2722 sopt.sopt_val = optval;
2723 sopt.sopt_valsize = optlen;
2724 sopt.sopt_td = NULL;
2725 return (sosetopt(so, &sopt));
2729 sosetopt(struct socket *so, struct sockopt *sopt)
2740 CURVNET_SET(so->so_vnet);
2742 if (sopt->sopt_level != SOL_SOCKET) {
2743 if (so->so_proto->pr_ctloutput != NULL) {
2744 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2748 error = ENOPROTOOPT;
2750 switch (sopt->sopt_name) {
2751 case SO_ACCEPTFILTER:
2752 error = accept_filt_setopt(so, sopt);
2758 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2763 so->so_linger = l.l_linger;
2765 so->so_options |= SO_LINGER;
2767 so->so_options &= ~SO_LINGER;
2774 case SO_USELOOPBACK:
2784 error = sooptcopyin(sopt, &optval, sizeof optval,
2790 so->so_options |= sopt->sopt_name;
2792 so->so_options &= ~sopt->sopt_name;
2797 error = sooptcopyin(sopt, &optval, sizeof optval,
2802 if (optval < 0 || optval >= rt_numfibs) {
2806 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
2807 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
2808 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
2809 so->so_fibnum = optval;
2814 case SO_USER_COOKIE:
2815 error = sooptcopyin(sopt, &val32, sizeof val32,
2819 so->so_user_cookie = val32;
2826 error = sooptcopyin(sopt, &optval, sizeof optval,
2832 * Values < 1 make no sense for any of these options,
2840 error = sbsetopt(so, sopt->sopt_name, optval);
2845 #ifdef COMPAT_FREEBSD32
2846 if (SV_CURPROC_FLAG(SV_ILP32)) {
2847 struct timeval32 tv32;
2849 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2851 CP(tv32, tv, tv_sec);
2852 CP(tv32, tv, tv_usec);
2855 error = sooptcopyin(sopt, &tv, sizeof tv,
2859 if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
2860 tv.tv_usec >= 1000000) {
2864 if (tv.tv_sec > INT32_MAX)
2868 switch (sopt->sopt_name) {
2870 so->so_snd.sb_timeo = val;
2873 so->so_rcv.sb_timeo = val;
2880 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2884 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2892 error = sooptcopyin(sopt, &optval, sizeof optval,
2896 if (optval < 0 || optval > SO_TS_CLOCK_MAX) {
2900 so->so_ts_clock = optval;
2903 case SO_MAX_PACING_RATE:
2904 error = sooptcopyin(sopt, &val32, sizeof(val32),
2908 so->so_max_pacing_rate = val32;
2912 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2913 error = hhook_run_socket(so, sopt,
2916 error = ENOPROTOOPT;
2919 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
2920 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
2928 * Helper routine for getsockopt.
2931 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2939 * Documented get behavior is that we always return a value, possibly
2940 * truncated to fit in the user's buffer. Traditional behavior is
2941 * that we always tell the user precisely how much we copied, rather
2942 * than something useful like the total amount we had available for
2943 * her. Note that this interface is not idempotent; the entire
2944 * answer must be generated ahead of time.
2946 valsize = min(len, sopt->sopt_valsize);
2947 sopt->sopt_valsize = valsize;
2948 if (sopt->sopt_val != NULL) {
2949 if (sopt->sopt_td != NULL)
2950 error = copyout(buf, sopt->sopt_val, valsize);
2952 bcopy(buf, sopt->sopt_val, valsize);
2958 sogetopt(struct socket *so, struct sockopt *sopt)
2967 CURVNET_SET(so->so_vnet);
2969 if (sopt->sopt_level != SOL_SOCKET) {
2970 if (so->so_proto->pr_ctloutput != NULL)
2971 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2973 error = ENOPROTOOPT;
2977 switch (sopt->sopt_name) {
2978 case SO_ACCEPTFILTER:
2979 error = accept_filt_getopt(so, sopt);
2984 l.l_onoff = so->so_options & SO_LINGER;
2985 l.l_linger = so->so_linger;
2987 error = sooptcopyout(sopt, &l, sizeof l);
2990 case SO_USELOOPBACK:
3002 optval = so->so_options & sopt->sopt_name;
3004 error = sooptcopyout(sopt, &optval, sizeof optval);
3008 optval = so->so_type;
3012 optval = so->so_proto->pr_protocol;
3017 optval = so->so_error;
3023 optval = SOLISTENING(so) ? so->sol_sbsnd_hiwat :
3024 so->so_snd.sb_hiwat;
3028 optval = SOLISTENING(so) ? so->sol_sbrcv_hiwat :
3029 so->so_rcv.sb_hiwat;
3033 optval = SOLISTENING(so) ? so->sol_sbsnd_lowat :
3034 so->so_snd.sb_lowat;
3038 optval = SOLISTENING(so) ? so->sol_sbrcv_lowat :
3039 so->so_rcv.sb_lowat;
3044 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
3045 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
3046 #ifdef COMPAT_FREEBSD32
3047 if (SV_CURPROC_FLAG(SV_ILP32)) {
3048 struct timeval32 tv32;
3050 CP(tv, tv32, tv_sec);
3051 CP(tv, tv32, tv_usec);
3052 error = sooptcopyout(sopt, &tv32, sizeof tv32);
3055 error = sooptcopyout(sopt, &tv, sizeof tv);
3060 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
3064 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
3068 error = sooptcopyout(sopt, &extmac, sizeof extmac);
3076 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
3080 error = mac_getsockopt_peerlabel(
3081 sopt->sopt_td->td_ucred, so, &extmac);
3084 error = sooptcopyout(sopt, &extmac, sizeof extmac);
3090 case SO_LISTENQLIMIT:
3091 optval = SOLISTENING(so) ? so->sol_qlimit : 0;
3095 optval = SOLISTENING(so) ? so->sol_qlen : 0;
3098 case SO_LISTENINCQLEN:
3099 optval = SOLISTENING(so) ? so->sol_incqlen : 0;
3103 optval = so->so_ts_clock;
3106 case SO_MAX_PACING_RATE:
3107 optval = so->so_max_pacing_rate;
3111 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
3112 error = hhook_run_socket(so, sopt,
3115 error = ENOPROTOOPT;
3127 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
3129 struct mbuf *m, *m_prev;
3130 int sopt_size = sopt->sopt_valsize;
3132 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
3135 if (sopt_size > MLEN) {
3136 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
3137 if ((m->m_flags & M_EXT) == 0) {
3141 m->m_len = min(MCLBYTES, sopt_size);
3143 m->m_len = min(MLEN, sopt_size);
3145 sopt_size -= m->m_len;
3150 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
3155 if (sopt_size > MLEN) {
3156 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
3158 if ((m->m_flags & M_EXT) == 0) {
3163 m->m_len = min(MCLBYTES, sopt_size);
3165 m->m_len = min(MLEN, sopt_size);
3167 sopt_size -= m->m_len;
3175 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
3177 struct mbuf *m0 = m;
3179 if (sopt->sopt_val == NULL)
3181 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
3182 if (sopt->sopt_td != NULL) {
3185 error = copyin(sopt->sopt_val, mtod(m, char *),
3192 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
3193 sopt->sopt_valsize -= m->m_len;
3194 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3197 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
3198 panic("ip6_sooptmcopyin");
3203 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
3205 struct mbuf *m0 = m;
3208 if (sopt->sopt_val == NULL)
3210 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
3211 if (sopt->sopt_td != NULL) {
3214 error = copyout(mtod(m, char *), sopt->sopt_val,
3221 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
3222 sopt->sopt_valsize -= m->m_len;
3223 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3224 valsize += m->m_len;
3228 /* enough soopt buffer should be given from user-land */
3232 sopt->sopt_valsize = valsize;
3237 * sohasoutofband(): protocol notifies socket layer of the arrival of new
3238 * out-of-band data, which will then notify socket consumers.
3241 sohasoutofband(struct socket *so)
3244 if (so->so_sigio != NULL)
3245 pgsigio(&so->so_sigio, SIGURG, 0);
3246 selwakeuppri(&so->so_rdsel, PSOCK);
3250 sopoll(struct socket *so, int events, struct ucred *active_cred,
3255 * We do not need to set or assert curvnet as long as everyone uses
3258 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
3263 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3269 if (SOLISTENING(so)) {
3270 if (!(events & (POLLIN | POLLRDNORM)))
3272 else if (!TAILQ_EMPTY(&so->sol_comp))
3273 revents = events & (POLLIN | POLLRDNORM);
3275 selrecord(td, &so->so_rdsel);
3280 SOCKBUF_LOCK(&so->so_snd);
3281 SOCKBUF_LOCK(&so->so_rcv);
3282 if (events & (POLLIN | POLLRDNORM))
3283 if (soreadabledata(so))
3284 revents |= events & (POLLIN | POLLRDNORM);
3285 if (events & (POLLOUT | POLLWRNORM))
3286 if (sowriteable(so))
3287 revents |= events & (POLLOUT | POLLWRNORM);
3288 if (events & (POLLPRI | POLLRDBAND))
3289 if (so->so_oobmark ||
3290 (so->so_rcv.sb_state & SBS_RCVATMARK))
3291 revents |= events & (POLLPRI | POLLRDBAND);
3292 if ((events & POLLINIGNEOF) == 0) {
3293 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3294 revents |= events & (POLLIN | POLLRDNORM);
3295 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3301 (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
3302 selrecord(td, &so->so_rdsel);
3303 so->so_rcv.sb_flags |= SB_SEL;
3305 if (events & (POLLOUT | POLLWRNORM)) {
3306 selrecord(td, &so->so_wrsel);
3307 so->so_snd.sb_flags |= SB_SEL;
3310 SOCKBUF_UNLOCK(&so->so_rcv);
3311 SOCKBUF_UNLOCK(&so->so_snd);
3318 soo_kqfilter(struct file *fp, struct knote *kn)
3320 struct socket *so = kn->kn_fp->f_data;
3324 switch (kn->kn_filter) {
3326 kn->kn_fop = &soread_filtops;
3327 knl = &so->so_rdsel.si_note;
3331 kn->kn_fop = &sowrite_filtops;
3332 knl = &so->so_wrsel.si_note;
3336 kn->kn_fop = &soempty_filtops;
3337 knl = &so->so_wrsel.si_note;
3345 if (SOLISTENING(so)) {
3346 knlist_add(knl, kn, 1);
3349 knlist_add(knl, kn, 1);
3350 sb->sb_flags |= SB_KNOTE;
3358 * Some routines that return EOPNOTSUPP for entry points that are not
3359 * supported by a protocol. Fill in as needed.
3362 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3369 pru_aio_queue_notsupp(struct socket *so, struct kaiocb *job)
3376 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3383 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3390 pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3398 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3405 pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3413 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3420 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3421 struct ifnet *ifp, struct thread *td)
3428 pru_disconnect_notsupp(struct socket *so)
3435 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3442 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3449 pru_rcvd_notsupp(struct socket *so, int flags)
3456 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3463 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3464 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3471 pru_ready_notsupp(struct socket *so, struct mbuf *m, int count)
3474 return (EOPNOTSUPP);
3478 * This isn't really a ``null'' operation, but it's the default one and
3479 * doesn't do anything destructive.
3482 pru_sense_null(struct socket *so, struct stat *sb)
3485 sb->st_blksize = so->so_snd.sb_hiwat;
3490 pru_shutdown_notsupp(struct socket *so)
3497 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3504 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3505 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3512 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3513 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3520 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3528 filt_sordetach(struct knote *kn)
3530 struct socket *so = kn->kn_fp->f_data;
3533 knlist_remove(&so->so_rdsel.si_note, kn, 1);
3534 if (!SOLISTENING(so) && knlist_empty(&so->so_rdsel.si_note))
3535 so->so_rcv.sb_flags &= ~SB_KNOTE;
3536 so_rdknl_unlock(so);
3541 filt_soread(struct knote *kn, long hint)
3545 so = kn->kn_fp->f_data;
3547 if (SOLISTENING(so)) {
3548 SOCK_LOCK_ASSERT(so);
3549 kn->kn_data = so->sol_qlen;
3550 return (!TAILQ_EMPTY(&so->sol_comp));
3553 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3555 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
3556 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3557 kn->kn_flags |= EV_EOF;
3558 kn->kn_fflags = so->so_error;
3560 } else if (so->so_error) /* temporary udp error */
3563 if (kn->kn_sfflags & NOTE_LOWAT) {
3564 if (kn->kn_data >= kn->kn_sdata)
3566 } else if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
3569 /* This hook returning non-zero indicates an event, not error */
3570 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
3574 filt_sowdetach(struct knote *kn)
3576 struct socket *so = kn->kn_fp->f_data;
3579 knlist_remove(&so->so_wrsel.si_note, kn, 1);
3580 if (!SOLISTENING(so) && knlist_empty(&so->so_wrsel.si_note))
3581 so->so_snd.sb_flags &= ~SB_KNOTE;
3582 so_wrknl_unlock(so);
3587 filt_sowrite(struct knote *kn, long hint)
3591 so = kn->kn_fp->f_data;
3593 if (SOLISTENING(so))
3596 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3597 kn->kn_data = sbspace(&so->so_snd);
3599 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
3601 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3602 kn->kn_flags |= EV_EOF;
3603 kn->kn_fflags = so->so_error;
3605 } else if (so->so_error) /* temporary udp error */
3607 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3608 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3610 else if (kn->kn_sfflags & NOTE_LOWAT)
3611 return (kn->kn_data >= kn->kn_sdata);
3613 return (kn->kn_data >= so->so_snd.sb_lowat);
3617 filt_soempty(struct knote *kn, long hint)
3621 so = kn->kn_fp->f_data;
3623 if (SOLISTENING(so))
3626 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3627 kn->kn_data = sbused(&so->so_snd);
3629 if (kn->kn_data == 0)
3636 socheckuid(struct socket *so, uid_t uid)
3641 if (so->so_cred->cr_uid != uid)
3647 * These functions are used by protocols to notify the socket layer (and its
3648 * consumers) of state changes in the sockets driven by protocol-side events.
3652 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3654 * Normal sequence from the active (originating) side is that
3655 * soisconnecting() is called during processing of connect() call, resulting
3656 * in an eventual call to soisconnected() if/when the connection is
3657 * established. When the connection is torn down soisdisconnecting() is
3658 * called during processing of disconnect() call, and soisdisconnected() is
3659 * called when the connection to the peer is totally severed. The semantics
3660 * of these routines are such that connectionless protocols can call
3661 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3662 * calls when setting up a ``connection'' takes no time.
3664 * From the passive side, a socket is created with two queues of sockets:
3665 * so_incomp for connections in progress and so_comp for connections already
3666 * made and awaiting user acceptance. As a protocol is preparing incoming
3667 * connections, it creates a socket structure queued on so_incomp by calling
3668 * sonewconn(). When the connection is established, soisconnected() is
3669 * called, and transfers the socket structure to so_comp, making it available
3672 * If a socket is closed with sockets on either so_incomp or so_comp, these
3673 * sockets are dropped.
3675 * If higher-level protocols are implemented in the kernel, the wakeups done
3676 * here will sometimes cause software-interrupt process scheduling.
3679 soisconnecting(struct socket *so)
3683 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3684 so->so_state |= SS_ISCONNECTING;
3689 soisconnected(struct socket *so)
3693 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3694 so->so_state |= SS_ISCONNECTED;
3696 if (so->so_qstate == SQ_INCOMP) {
3697 struct socket *head = so->so_listen;
3700 KASSERT(head, ("%s: so %p on incomp of NULL", __func__, so));
3702 * Promoting a socket from incomplete queue to complete, we
3703 * need to go through reverse order of locking. We first do
3704 * trylock, and if that doesn't succeed, we go the hard way
3705 * leaving a reference and rechecking consistency after proper
3708 if (__predict_false(SOLISTEN_TRYLOCK(head) == 0)) {
3711 SOLISTEN_LOCK(head);
3713 if (__predict_false(head != so->so_listen)) {
3715 * The socket went off the listen queue,
3716 * should be lost race to close(2) of sol.
3717 * The socket is about to soabort().
3723 /* Not the last one, as so holds a ref. */
3724 refcount_release(&head->so_count);
3727 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3728 TAILQ_REMOVE(&head->sol_incomp, so, so_list);
3729 head->sol_incqlen--;
3730 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
3732 so->so_qstate = SQ_COMP;
3734 solisten_wakeup(head); /* unlocks */
3736 SOCKBUF_LOCK(&so->so_rcv);
3737 soupcall_set(so, SO_RCV,
3738 head->sol_accept_filter->accf_callback,
3739 head->sol_accept_filter_arg);
3740 so->so_options &= ~SO_ACCEPTFILTER;
3741 ret = head->sol_accept_filter->accf_callback(so,
3742 head->sol_accept_filter_arg, M_NOWAIT);
3743 if (ret == SU_ISCONNECTED) {
3744 soupcall_clear(so, SO_RCV);
3745 SOCKBUF_UNLOCK(&so->so_rcv);
3748 SOCKBUF_UNLOCK(&so->so_rcv);
3750 SOLISTEN_UNLOCK(head);
3755 wakeup(&so->so_timeo);
3761 soisdisconnecting(struct socket *so)
3765 so->so_state &= ~SS_ISCONNECTING;
3766 so->so_state |= SS_ISDISCONNECTING;
3768 if (!SOLISTENING(so)) {
3769 SOCKBUF_LOCK(&so->so_rcv);
3770 socantrcvmore_locked(so);
3771 SOCKBUF_LOCK(&so->so_snd);
3772 socantsendmore_locked(so);
3775 wakeup(&so->so_timeo);
3779 soisdisconnected(struct socket *so)
3783 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3784 so->so_state |= SS_ISDISCONNECTED;
3786 if (!SOLISTENING(so)) {
3788 SOCKBUF_LOCK(&so->so_rcv);
3789 socantrcvmore_locked(so);
3790 SOCKBUF_LOCK(&so->so_snd);
3791 sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
3792 socantsendmore_locked(so);
3795 wakeup(&so->so_timeo);
3799 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3802 sodupsockaddr(const struct sockaddr *sa, int mflags)
3804 struct sockaddr *sa2;
3806 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3808 bcopy(sa, sa2, sa->sa_len);
3813 * Register per-socket buffer upcalls.
3816 soupcall_set(struct socket *so, int which, so_upcall_t func, void *arg)
3820 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
3830 panic("soupcall_set: bad which");
3832 SOCKBUF_LOCK_ASSERT(sb);
3833 sb->sb_upcall = func;
3834 sb->sb_upcallarg = arg;
3835 sb->sb_flags |= SB_UPCALL;
3839 soupcall_clear(struct socket *so, int which)
3843 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
3853 panic("soupcall_clear: bad which");
3855 SOCKBUF_LOCK_ASSERT(sb);
3856 KASSERT(sb->sb_upcall != NULL,
3857 ("%s: so %p no upcall to clear", __func__, so));
3858 sb->sb_upcall = NULL;
3859 sb->sb_upcallarg = NULL;
3860 sb->sb_flags &= ~SB_UPCALL;
3864 solisten_upcall_set(struct socket *so, so_upcall_t func, void *arg)
3867 SOLISTEN_LOCK_ASSERT(so);
3868 so->sol_upcall = func;
3869 so->sol_upcallarg = arg;
3873 so_rdknl_lock(void *arg)
3875 struct socket *so = arg;
3877 if (SOLISTENING(so))
3880 SOCKBUF_LOCK(&so->so_rcv);
3884 so_rdknl_unlock(void *arg)
3886 struct socket *so = arg;
3888 if (SOLISTENING(so))
3891 SOCKBUF_UNLOCK(&so->so_rcv);
3895 so_rdknl_assert_locked(void *arg)
3897 struct socket *so = arg;
3899 if (SOLISTENING(so))
3900 SOCK_LOCK_ASSERT(so);
3902 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3906 so_rdknl_assert_unlocked(void *arg)
3908 struct socket *so = arg;
3910 if (SOLISTENING(so))
3911 SOCK_UNLOCK_ASSERT(so);
3913 SOCKBUF_UNLOCK_ASSERT(&so->so_rcv);
3917 so_wrknl_lock(void *arg)
3919 struct socket *so = arg;
3921 if (SOLISTENING(so))
3924 SOCKBUF_LOCK(&so->so_snd);
3928 so_wrknl_unlock(void *arg)
3930 struct socket *so = arg;
3932 if (SOLISTENING(so))
3935 SOCKBUF_UNLOCK(&so->so_snd);
3939 so_wrknl_assert_locked(void *arg)
3941 struct socket *so = arg;
3943 if (SOLISTENING(so))
3944 SOCK_LOCK_ASSERT(so);
3946 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3950 so_wrknl_assert_unlocked(void *arg)
3952 struct socket *so = arg;
3954 if (SOLISTENING(so))
3955 SOCK_UNLOCK_ASSERT(so);
3957 SOCKBUF_UNLOCK_ASSERT(&so->so_snd);
3961 * Create an external-format (``xsocket'') structure using the information in
3962 * the kernel-format socket structure pointed to by so. This is done to
3963 * reduce the spew of irrelevant information over this interface, to isolate
3964 * user code from changes in the kernel structure, and potentially to provide
3965 * information-hiding if we decide that some of this information should be
3966 * hidden from users.
3969 sotoxsocket(struct socket *so, struct xsocket *xso)
3972 xso->xso_len = sizeof *xso;
3974 xso->so_type = so->so_type;
3975 xso->so_options = so->so_options;
3976 xso->so_linger = so->so_linger;
3977 xso->so_state = so->so_state;
3978 xso->so_pcb = so->so_pcb;
3979 xso->xso_protocol = so->so_proto->pr_protocol;
3980 xso->xso_family = so->so_proto->pr_domain->dom_family;
3981 xso->so_timeo = so->so_timeo;
3982 xso->so_error = so->so_error;
3983 xso->so_uid = so->so_cred->cr_uid;
3984 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3985 if (SOLISTENING(so)) {
3986 xso->so_qlen = so->sol_qlen;
3987 xso->so_incqlen = so->sol_incqlen;
3988 xso->so_qlimit = so->sol_qlimit;
3989 xso->so_oobmark = 0;
3990 bzero(&xso->so_snd, sizeof(xso->so_snd));
3991 bzero(&xso->so_rcv, sizeof(xso->so_rcv));
3993 xso->so_state |= so->so_qstate;
3994 xso->so_qlen = xso->so_incqlen = xso->so_qlimit = 0;
3995 xso->so_oobmark = so->so_oobmark;
3996 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3997 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
4002 so_sockbuf_rcv(struct socket *so)
4005 return (&so->so_rcv);
4009 so_sockbuf_snd(struct socket *so)
4012 return (&so->so_snd);
4016 so_state_get(const struct socket *so)
4019 return (so->so_state);
4023 so_state_set(struct socket *so, int val)
4030 so_options_get(const struct socket *so)
4033 return (so->so_options);
4037 so_options_set(struct socket *so, int val)
4040 so->so_options = val;
4044 so_error_get(const struct socket *so)
4047 return (so->so_error);
4051 so_error_set(struct socket *so, int val)
4058 so_linger_get(const struct socket *so)
4061 return (so->so_linger);
4065 so_linger_set(struct socket *so, int val)
4068 so->so_linger = val;
4072 so_protosw_get(const struct socket *so)
4075 return (so->so_proto);
4079 so_protosw_set(struct socket *so, struct protosw *val)
4086 so_sorwakeup(struct socket *so)
4093 so_sowwakeup(struct socket *so)
4100 so_sorwakeup_locked(struct socket *so)
4103 sorwakeup_locked(so);
4107 so_sowwakeup_locked(struct socket *so)
4110 sowwakeup_locked(so);
4114 so_lock(struct socket *so)
4121 so_unlock(struct socket *so)