2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2004 The FreeBSD Foundation
7 * Copyright (c) 2004-2008 Robert N. M. Watson
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
38 * Comments on the socket life cycle:
40 * soalloc() sets of socket layer state for a socket, called only by
41 * socreate() and sonewconn(). Socket layer private.
43 * sodealloc() tears down socket layer state for a socket, called only by
44 * sofree() and sonewconn(). Socket layer private.
46 * pru_attach() associates protocol layer state with an allocated socket;
47 * called only once, may fail, aborting socket allocation. This is called
48 * from socreate() and sonewconn(). Socket layer private.
50 * pru_detach() disassociates protocol layer state from an attached socket,
51 * and will be called exactly once for sockets in which pru_attach() has
52 * been successfully called. If pru_attach() returned an error,
53 * pru_detach() will not be called. Socket layer private.
55 * pru_abort() and pru_close() notify the protocol layer that the last
56 * consumer of a socket is starting to tear down the socket, and that the
57 * protocol should terminate the connection. Historically, pru_abort() also
58 * detached protocol state from the socket state, but this is no longer the
61 * socreate() creates a socket and attaches protocol state. This is a public
62 * interface that may be used by socket layer consumers to create new
65 * sonewconn() creates a socket and attaches protocol state. This is a
66 * public interface that may be used by protocols to create new sockets when
67 * a new connection is received and will be available for accept() on a
70 * soclose() destroys a socket after possibly waiting for it to disconnect.
71 * This is a public interface that socket consumers should use to close and
72 * release a socket when done with it.
74 * soabort() destroys a socket without waiting for it to disconnect (used
75 * only for incoming connections that are already partially or fully
76 * connected). This is used internally by the socket layer when clearing
77 * listen socket queues (due to overflow or close on the listen socket), but
78 * is also a public interface protocols may use to abort connections in
79 * their incomplete listen queues should they no longer be required. Sockets
80 * placed in completed connection listen queues should not be aborted for
81 * reasons described in the comment above the soclose() implementation. This
82 * is not a general purpose close routine, and except in the specific
83 * circumstances described here, should not be used.
85 * sofree() will free a socket and its protocol state if all references on
86 * the socket have been released, and is the public interface to attempt to
87 * free a socket when a reference is removed. This is a socket layer private
90 * NOTE: In addition to socreate() and soclose(), which provide a single
91 * socket reference to the consumer to be managed as required, there are two
92 * calls to explicitly manage socket references, soref(), and sorele().
93 * Currently, these are generally required only when transitioning a socket
94 * from a listen queue to a file descriptor, in order to prevent garbage
95 * collection of the socket at an untimely moment. For a number of reasons,
96 * these interfaces are not preferred, and should be avoided.
98 * NOTE: With regard to VNETs the general rule is that callers do not set
99 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
100 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
101 * and sorflush(), which are usually called from a pre-set VNET context.
102 * sopoll() currently does not need a VNET context to be set.
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
108 #include "opt_inet.h"
109 #include "opt_inet6.h"
110 #include "opt_kern_tls.h"
111 #include "opt_sctp.h"
113 #include <sys/param.h>
114 #include <sys/systm.h>
115 #include <sys/capsicum.h>
116 #include <sys/fcntl.h>
117 #include <sys/limits.h>
118 #include <sys/lock.h>
120 #include <sys/malloc.h>
121 #include <sys/mbuf.h>
122 #include <sys/mutex.h>
123 #include <sys/domain.h>
124 #include <sys/file.h> /* for struct knote */
125 #include <sys/hhook.h>
126 #include <sys/kernel.h>
127 #include <sys/khelp.h>
128 #include <sys/ktls.h>
129 #include <sys/event.h>
130 #include <sys/eventhandler.h>
131 #include <sys/poll.h>
132 #include <sys/proc.h>
133 #include <sys/protosw.h>
134 #include <sys/sbuf.h>
135 #include <sys/socket.h>
136 #include <sys/socketvar.h>
137 #include <sys/resourcevar.h>
138 #include <net/route.h>
139 #include <sys/signalvar.h>
140 #include <sys/stat.h>
142 #include <sys/sysctl.h>
143 #include <sys/taskqueue.h>
146 #include <sys/unpcb.h>
147 #include <sys/jail.h>
148 #include <sys/syslog.h>
149 #include <netinet/in.h>
150 #include <netinet/in_pcb.h>
151 #include <netinet/tcp.h>
153 #include <net/vnet.h>
155 #include <security/mac/mac_framework.h>
159 #ifdef COMPAT_FREEBSD32
160 #include <sys/mount.h>
161 #include <sys/sysent.h>
162 #include <compat/freebsd32/freebsd32.h>
165 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
167 static void so_rdknl_lock(void *);
168 static void so_rdknl_unlock(void *);
169 static void so_rdknl_assert_lock(void *, int);
170 static void so_wrknl_lock(void *);
171 static void so_wrknl_unlock(void *);
172 static void so_wrknl_assert_lock(void *, int);
174 static void filt_sordetach(struct knote *kn);
175 static int filt_soread(struct knote *kn, long hint);
176 static void filt_sowdetach(struct knote *kn);
177 static int filt_sowrite(struct knote *kn, long hint);
178 static int filt_soempty(struct knote *kn, long hint);
179 static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
180 fo_kqfilter_t soo_kqfilter;
182 static struct filterops soread_filtops = {
184 .f_detach = filt_sordetach,
185 .f_event = filt_soread,
187 static struct filterops sowrite_filtops = {
189 .f_detach = filt_sowdetach,
190 .f_event = filt_sowrite,
192 static struct filterops soempty_filtops = {
194 .f_detach = filt_sowdetach,
195 .f_event = filt_soempty,
198 so_gen_t so_gencnt; /* generation count for sockets */
200 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
201 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
203 #define VNET_SO_ASSERT(so) \
204 VNET_ASSERT(curvnet != NULL, \
205 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
207 VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
208 #define V_socket_hhh VNET(socket_hhh)
211 * Limit on the number of connections in the listen queue waiting
213 * NB: The original sysctl somaxconn is still available but hidden
214 * to prevent confusion about the actual purpose of this number.
216 static u_int somaxconn = SOMAXCONN;
219 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
225 error = sysctl_handle_int(oidp, &val, 0, req);
226 if (error || !req->newptr )
230 * The purpose of the UINT_MAX / 3 limit, is so that the formula
232 * below, will not overflow.
235 if (val < 1 || val > UINT_MAX / 3)
241 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue,
242 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, sizeof(int),
243 sysctl_somaxconn, "I",
244 "Maximum listen socket pending connection accept queue size");
245 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
246 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_NEEDGIANT, 0,
247 sizeof(int), sysctl_somaxconn, "I",
248 "Maximum listen socket pending connection accept queue size (compat)");
250 static int numopensockets;
251 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
252 &numopensockets, 0, "Number of open sockets");
255 * accept_mtx locks down per-socket fields relating to accept queues. See
256 * socketvar.h for an annotation of the protected fields of struct socket.
258 struct mtx accept_mtx;
259 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
262 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
265 static struct mtx so_global_mtx;
266 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
269 * General IPC sysctl name space, used by sockets and a variety of other IPC
272 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
276 * Initialize the socket subsystem and set up the socket
279 static uma_zone_t socket_zone;
283 socket_zone_change(void *tag)
286 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
290 socket_hhook_register(int subtype)
293 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
294 &V_socket_hhh[subtype],
295 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
296 printf("%s: WARNING: unable to register hook\n", __func__);
300 socket_hhook_deregister(int subtype)
303 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
304 printf("%s: WARNING: unable to deregister hook\n", __func__);
308 socket_init(void *tag)
311 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
312 NULL, NULL, UMA_ALIGN_PTR, 0);
313 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
314 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
315 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
316 EVENTHANDLER_PRI_FIRST);
318 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
321 socket_vnet_init(const void *unused __unused)
325 /* We expect a contiguous range */
326 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
327 socket_hhook_register(i);
329 VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
330 socket_vnet_init, NULL);
333 socket_vnet_uninit(const void *unused __unused)
337 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
338 socket_hhook_deregister(i);
340 VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
341 socket_vnet_uninit, NULL);
344 * Initialise maxsockets. This SYSINIT must be run after
348 init_maxsockets(void *ignored)
351 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
352 maxsockets = imax(maxsockets, maxfiles);
354 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
357 * Sysctl to get and set the maximum global sockets limit. Notify protocols
358 * of the change so that they can update their dependent limits as required.
361 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
363 int error, newmaxsockets;
365 newmaxsockets = maxsockets;
366 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
367 if (error == 0 && req->newptr) {
368 if (newmaxsockets > maxsockets &&
369 newmaxsockets <= maxfiles) {
370 maxsockets = newmaxsockets;
371 EVENTHANDLER_INVOKE(maxsockets_change);
377 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets,
378 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &maxsockets, 0,
379 sysctl_maxsockets, "IU",
380 "Maximum number of sockets available");
383 * Socket operation routines. These routines are called by the routines in
384 * sys_socket.c or from a system process, and implement the semantics of
385 * socket operations by switching out to the protocol specific routines.
389 * Get a socket structure from our zone, and initialize it. Note that it
390 * would probably be better to allocate socket and PCB at the same time, but
391 * I'm not convinced that all the protocols can be easily modified to do
394 * soalloc() returns a socket with a ref count of 0.
396 static struct socket *
397 soalloc(struct vnet *vnet)
401 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
405 if (mac_socket_init(so, M_NOWAIT) != 0) {
406 uma_zfree(socket_zone, so);
410 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
411 uma_zfree(socket_zone, so);
416 * The socket locking protocol allows to lock 2 sockets at a time,
417 * however, the first one must be a listening socket. WITNESS lacks
418 * a feature to change class of an existing lock, so we use DUPOK.
420 mtx_init(&so->so_lock, "socket", NULL, MTX_DEF | MTX_DUPOK);
421 so->so_snd.sb_mtx = &so->so_snd_mtx;
422 so->so_rcv.sb_mtx = &so->so_rcv_mtx;
423 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
424 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
425 so->so_rcv.sb_sel = &so->so_rdsel;
426 so->so_snd.sb_sel = &so->so_wrsel;
427 sx_init(&so->so_snd_sx, "so_snd_sx");
428 sx_init(&so->so_rcv_sx, "so_rcv_sx");
429 TAILQ_INIT(&so->so_snd.sb_aiojobq);
430 TAILQ_INIT(&so->so_rcv.sb_aiojobq);
431 TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so);
432 TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so);
434 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
435 __func__, __LINE__, so));
438 /* We shouldn't need the so_global_mtx */
439 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
440 /* Do we need more comprehensive error returns? */
441 uma_zfree(socket_zone, so);
444 mtx_lock(&so_global_mtx);
445 so->so_gencnt = ++so_gencnt;
448 vnet->vnet_sockcnt++;
450 mtx_unlock(&so_global_mtx);
456 * Free the storage associated with a socket at the socket layer, tear down
457 * locks, labels, etc. All protocol state is assumed already to have been
458 * torn down (and possibly never set up) by the caller.
461 sodealloc(struct socket *so)
464 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
465 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
467 mtx_lock(&so_global_mtx);
468 so->so_gencnt = ++so_gencnt;
469 --numopensockets; /* Could be below, but faster here. */
471 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
472 __func__, __LINE__, so));
473 so->so_vnet->vnet_sockcnt--;
475 mtx_unlock(&so_global_mtx);
477 mac_socket_destroy(so);
479 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
481 khelp_destroy_osd(&so->osd);
482 if (SOLISTENING(so)) {
483 if (so->sol_accept_filter != NULL)
484 accept_filt_setopt(so, NULL);
486 if (so->so_rcv.sb_hiwat)
487 (void)chgsbsize(so->so_cred->cr_uidinfo,
488 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
489 if (so->so_snd.sb_hiwat)
490 (void)chgsbsize(so->so_cred->cr_uidinfo,
491 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
492 sx_destroy(&so->so_snd_sx);
493 sx_destroy(&so->so_rcv_sx);
494 SOCKBUF_LOCK_DESTROY(&so->so_snd);
495 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
498 mtx_destroy(&so->so_lock);
499 uma_zfree(socket_zone, so);
503 * socreate returns a socket with a ref count of 1. The socket should be
504 * closed with soclose().
507 socreate(int dom, struct socket **aso, int type, int proto,
508 struct ucred *cred, struct thread *td)
515 prp = pffindproto(dom, proto, type);
517 prp = pffindtype(dom, type);
520 /* No support for domain. */
521 if (pffinddomain(dom) == NULL)
522 return (EAFNOSUPPORT);
523 /* No support for socket type. */
524 if (proto == 0 && type != 0)
526 return (EPROTONOSUPPORT);
528 if (prp->pr_usrreqs->pru_attach == NULL ||
529 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
530 return (EPROTONOSUPPORT);
532 if (IN_CAPABILITY_MODE(td) && (prp->pr_flags & PR_CAPATTACH) == 0)
535 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
536 return (EPROTONOSUPPORT);
538 if (prp->pr_type != type)
540 so = soalloc(CRED_TO_VNET(cred));
545 so->so_cred = crhold(cred);
546 if ((prp->pr_domain->dom_family == PF_INET) ||
547 (prp->pr_domain->dom_family == PF_INET6) ||
548 (prp->pr_domain->dom_family == PF_ROUTE))
549 so->so_fibnum = td->td_proc->p_fibnum;
554 mac_socket_create(cred, so);
556 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
557 so_rdknl_assert_lock);
558 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
559 so_wrknl_assert_lock);
561 * Auto-sizing of socket buffers is managed by the protocols and
562 * the appropriate flags must be set in the pru_attach function.
564 CURVNET_SET(so->so_vnet);
565 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
577 static int regression_sonewconn_earlytest = 1;
578 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
579 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
582 static struct timeval overinterval = { 60, 0 };
583 SYSCTL_TIMEVAL_SEC(_kern_ipc, OID_AUTO, sooverinterval, CTLFLAG_RW,
585 "Delay in seconds between warnings for listen socket overflows");
588 * When an attempt at a new connection is noted on a socket which accepts
589 * connections, sonewconn is called. If the connection is possible (subject
590 * to space constraints, etc.) then we allocate a new structure, properly
591 * linked into the data structure of the original socket, and return this.
592 * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED.
594 * Note: the ref count on the socket is 0 on return.
597 sonewconn(struct socket *head, int connstatus)
603 const char localprefix[] = "local:";
604 char descrbuf[SUNPATHLEN + sizeof(localprefix)];
606 char addrbuf[INET6_ADDRSTRLEN];
608 char addrbuf[INET_ADDRSTRLEN];
613 over = (head->sol_qlen > 3 * head->sol_qlimit / 2);
615 if (regression_sonewconn_earlytest && over) {
619 head->sol_overcount++;
620 dolog = !!ratecheck(&head->sol_lastover, &overinterval);
623 * If we're going to log, copy the overflow count and queue
624 * length from the listen socket before dropping the lock.
625 * Also, reset the overflow count.
628 overcount = head->sol_overcount;
629 head->sol_overcount = 0;
630 qlen = head->sol_qlen;
632 SOLISTEN_UNLOCK(head);
636 * Try to print something descriptive about the
637 * socket for the error message.
639 sbuf_new(&descrsb, descrbuf, sizeof(descrbuf),
641 switch (head->so_proto->pr_domain->dom_family) {
642 #if defined(INET) || defined(INET6)
648 if (head->so_proto->pr_domain->dom_family ==
650 (sotoinpcb(head)->inp_inc.inc_flags &
653 &sotoinpcb(head)->inp_inc.inc6_laddr);
654 sbuf_printf(&descrsb, "[%s]", addrbuf);
660 sotoinpcb(head)->inp_inc.inc_laddr,
662 sbuf_cat(&descrsb, addrbuf);
665 sbuf_printf(&descrsb, ":%hu (proto %u)",
666 ntohs(sotoinpcb(head)->inp_inc.inc_lport),
667 head->so_proto->pr_protocol);
669 #endif /* INET || INET6 */
671 sbuf_cat(&descrsb, localprefix);
672 if (sotounpcb(head)->unp_addr != NULL)
674 sotounpcb(head)->unp_addr->sun_len -
675 offsetof(struct sockaddr_un,
681 sotounpcb(head)->unp_addr->sun_path,
684 sbuf_cat(&descrsb, "(unknown)");
689 * If we can't print something more specific, at least
690 * print the domain name.
692 if (sbuf_finish(&descrsb) != 0 ||
693 sbuf_len(&descrsb) <= 0) {
694 sbuf_clear(&descrsb);
696 head->so_proto->pr_domain->dom_name ?:
698 sbuf_finish(&descrsb);
700 KASSERT(sbuf_len(&descrsb) > 0,
701 ("%s: sbuf creation failed", __func__));
703 "%s: pcb %p (%s): Listen queue overflow: "
704 "%i already in queue awaiting acceptance "
705 "(%d occurrences)\n",
706 __func__, head->so_pcb, sbuf_data(&descrsb),
708 sbuf_delete(&descrsb);
715 SOLISTEN_UNLOCK(head);
716 VNET_ASSERT(head->so_vnet != NULL, ("%s: so %p vnet is NULL",
718 so = soalloc(head->so_vnet);
720 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
721 "limit reached or out of memory\n",
722 __func__, head->so_pcb);
725 so->so_listen = head;
726 so->so_type = head->so_type;
727 so->so_options = head->so_options & ~SO_ACCEPTCONN;
728 so->so_linger = head->so_linger;
729 so->so_state = head->so_state | SS_NOFDREF;
730 so->so_fibnum = head->so_fibnum;
731 so->so_proto = head->so_proto;
732 so->so_cred = crhold(head->so_cred);
734 mac_socket_newconn(head, so);
736 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
737 so_rdknl_assert_lock);
738 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
739 so_wrknl_assert_lock);
740 VNET_SO_ASSERT(head);
741 if (soreserve(so, head->sol_sbsnd_hiwat, head->sol_sbrcv_hiwat)) {
743 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
744 __func__, head->so_pcb);
747 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
749 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
750 __func__, head->so_pcb);
753 so->so_rcv.sb_lowat = head->sol_sbrcv_lowat;
754 so->so_snd.sb_lowat = head->sol_sbsnd_lowat;
755 so->so_rcv.sb_timeo = head->sol_sbrcv_timeo;
756 so->so_snd.sb_timeo = head->sol_sbsnd_timeo;
757 so->so_rcv.sb_flags |= head->sol_sbrcv_flags & SB_AUTOSIZE;
758 so->so_snd.sb_flags |= head->sol_sbsnd_flags & SB_AUTOSIZE;
761 if (head->sol_accept_filter != NULL)
763 so->so_state |= connstatus;
764 soref(head); /* A socket on (in)complete queue refs head. */
766 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
767 so->so_qstate = SQ_COMP;
769 solisten_wakeup(head); /* unlocks */
772 * Keep removing sockets from the head until there's room for
773 * us to insert on the tail. In pre-locking revisions, this
774 * was a simple if(), but as we could be racing with other
775 * threads and soabort() requires dropping locks, we must
776 * loop waiting for the condition to be true.
778 while (head->sol_incqlen > head->sol_qlimit) {
781 sp = TAILQ_FIRST(&head->sol_incomp);
782 TAILQ_REMOVE(&head->sol_incomp, sp, so_list);
785 sp->so_qstate = SQ_NONE;
786 sp->so_listen = NULL;
788 sorele(head); /* does SOLISTEN_UNLOCK, head stays */
792 TAILQ_INSERT_TAIL(&head->sol_incomp, so, so_list);
793 so->so_qstate = SQ_INCOMP;
795 SOLISTEN_UNLOCK(head);
800 #if defined(SCTP) || defined(SCTP_SUPPORT)
802 * Socket part of sctp_peeloff(). Detach a new socket from an
803 * association. The new socket is returned with a reference.
806 sopeeloff(struct socket *head)
810 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
811 __func__, __LINE__, head));
812 so = soalloc(head->so_vnet);
814 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
815 "limit reached or out of memory\n",
816 __func__, head->so_pcb);
819 so->so_type = head->so_type;
820 so->so_options = head->so_options;
821 so->so_linger = head->so_linger;
822 so->so_state = (head->so_state & SS_NBIO) | SS_ISCONNECTED;
823 so->so_fibnum = head->so_fibnum;
824 so->so_proto = head->so_proto;
825 so->so_cred = crhold(head->so_cred);
827 mac_socket_newconn(head, so);
829 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
830 so_rdknl_assert_lock);
831 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
832 so_wrknl_assert_lock);
833 VNET_SO_ASSERT(head);
834 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
836 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
837 __func__, head->so_pcb);
840 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
842 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
843 __func__, head->so_pcb);
846 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
847 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
848 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
849 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
850 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
851 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
860 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
864 CURVNET_SET(so->so_vnet);
865 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
871 sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
875 CURVNET_SET(so->so_vnet);
876 error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td);
882 * solisten() transitions a socket from a non-listening state to a listening
883 * state, but can also be used to update the listen queue depth on an
884 * existing listen socket. The protocol will call back into the sockets
885 * layer using solisten_proto_check() and solisten_proto() to check and set
886 * socket-layer listen state. Call backs are used so that the protocol can
887 * acquire both protocol and socket layer locks in whatever order is required
890 * Protocol implementors are advised to hold the socket lock across the
891 * socket-layer test and set to avoid races at the socket layer.
894 solisten(struct socket *so, int backlog, struct thread *td)
898 CURVNET_SET(so->so_vnet);
899 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
905 * Prepare for a call to solisten_proto(). Acquire all socket buffer locks in
906 * order to interlock with socket I/O.
909 solisten_proto_check(struct socket *so)
911 SOCK_LOCK_ASSERT(so);
913 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
914 SS_ISDISCONNECTING)) != 0)
918 * Sleeping is not permitted here, so simply fail if userspace is
919 * attempting to transmit or receive on the socket. This kind of
920 * transient failure is not ideal, but it should occur only if userspace
921 * is misusing the socket interfaces.
923 if (!sx_try_xlock(&so->so_snd_sx))
925 if (!sx_try_xlock(&so->so_rcv_sx)) {
926 sx_xunlock(&so->so_snd_sx);
929 mtx_lock(&so->so_snd_mtx);
930 mtx_lock(&so->so_rcv_mtx);
932 /* Interlock with soo_aio_queue(). */
933 if ((so->so_snd.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0 ||
934 (so->so_rcv.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0) {
935 solisten_proto_abort(so);
942 * Undo the setup done by solisten_proto_check().
945 solisten_proto_abort(struct socket *so)
947 mtx_unlock(&so->so_snd_mtx);
948 mtx_unlock(&so->so_rcv_mtx);
949 sx_xunlock(&so->so_snd_sx);
950 sx_xunlock(&so->so_rcv_sx);
954 solisten_proto(struct socket *so, int backlog)
956 int sbrcv_lowat, sbsnd_lowat;
957 u_int sbrcv_hiwat, sbsnd_hiwat;
958 short sbrcv_flags, sbsnd_flags;
959 sbintime_t sbrcv_timeo, sbsnd_timeo;
961 SOCK_LOCK_ASSERT(so);
962 KASSERT((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
963 SS_ISDISCONNECTING)) == 0,
964 ("%s: bad socket state %p", __func__, so));
970 * Change this socket to listening state.
972 sbrcv_lowat = so->so_rcv.sb_lowat;
973 sbsnd_lowat = so->so_snd.sb_lowat;
974 sbrcv_hiwat = so->so_rcv.sb_hiwat;
975 sbsnd_hiwat = so->so_snd.sb_hiwat;
976 sbrcv_flags = so->so_rcv.sb_flags;
977 sbsnd_flags = so->so_snd.sb_flags;
978 sbrcv_timeo = so->so_rcv.sb_timeo;
979 sbsnd_timeo = so->so_snd.sb_timeo;
981 sbdestroy(&so->so_snd, so);
982 sbdestroy(&so->so_rcv, so);
986 sizeof(struct socket) - offsetof(struct socket, so_rcv));
989 so->sol_sbrcv_lowat = sbrcv_lowat;
990 so->sol_sbsnd_lowat = sbsnd_lowat;
991 so->sol_sbrcv_hiwat = sbrcv_hiwat;
992 so->sol_sbsnd_hiwat = sbsnd_hiwat;
993 so->sol_sbrcv_flags = sbrcv_flags;
994 so->sol_sbsnd_flags = sbsnd_flags;
995 so->sol_sbrcv_timeo = sbrcv_timeo;
996 so->sol_sbsnd_timeo = sbsnd_timeo;
998 so->sol_qlen = so->sol_incqlen = 0;
999 TAILQ_INIT(&so->sol_incomp);
1000 TAILQ_INIT(&so->sol_comp);
1002 so->sol_accept_filter = NULL;
1003 so->sol_accept_filter_arg = NULL;
1004 so->sol_accept_filter_str = NULL;
1006 so->sol_upcall = NULL;
1007 so->sol_upcallarg = NULL;
1009 so->so_options |= SO_ACCEPTCONN;
1012 if (backlog < 0 || backlog > somaxconn)
1013 backlog = somaxconn;
1014 so->sol_qlimit = backlog;
1016 mtx_unlock(&so->so_snd_mtx);
1017 mtx_unlock(&so->so_rcv_mtx);
1018 sx_xunlock(&so->so_snd_sx);
1019 sx_xunlock(&so->so_rcv_sx);
1023 * Wakeup listeners/subsystems once we have a complete connection.
1024 * Enters with lock, returns unlocked.
1027 solisten_wakeup(struct socket *sol)
1030 if (sol->sol_upcall != NULL)
1031 (void )sol->sol_upcall(sol, sol->sol_upcallarg, M_NOWAIT);
1033 selwakeuppri(&sol->so_rdsel, PSOCK);
1034 KNOTE_LOCKED(&sol->so_rdsel.si_note, 0);
1036 SOLISTEN_UNLOCK(sol);
1037 wakeup_one(&sol->sol_comp);
1038 if ((sol->so_state & SS_ASYNC) && sol->so_sigio != NULL)
1039 pgsigio(&sol->so_sigio, SIGIO, 0);
1043 * Return single connection off a listening socket queue. Main consumer of
1044 * the function is kern_accept4(). Some modules, that do their own accept
1045 * management also use the function.
1047 * Listening socket must be locked on entry and is returned unlocked on
1049 * The flags argument is set of accept4(2) flags and ACCEPT4_INHERIT.
1052 solisten_dequeue(struct socket *head, struct socket **ret, int flags)
1057 SOLISTEN_LOCK_ASSERT(head);
1059 while (!(head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp) &&
1060 head->so_error == 0) {
1061 error = msleep(&head->sol_comp, SOCK_MTX(head), PSOCK | PCATCH,
1064 SOLISTEN_UNLOCK(head);
1068 if (head->so_error) {
1069 error = head->so_error;
1071 } else if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp))
1072 error = EWOULDBLOCK;
1076 SOLISTEN_UNLOCK(head);
1079 so = TAILQ_FIRST(&head->sol_comp);
1081 KASSERT(so->so_qstate == SQ_COMP,
1082 ("%s: so %p not SQ_COMP", __func__, so));
1085 so->so_qstate = SQ_NONE;
1086 so->so_listen = NULL;
1087 TAILQ_REMOVE(&head->sol_comp, so, so_list);
1088 if (flags & ACCEPT4_INHERIT)
1089 so->so_state |= (head->so_state & SS_NBIO);
1091 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
1100 * Evaluate the reference count and named references on a socket; if no
1101 * references remain, free it. This should be called whenever a reference is
1102 * released, such as in sorele(), but also when named reference flags are
1103 * cleared in socket or protocol code.
1105 * sofree() will free the socket if:
1107 * - There are no outstanding file descriptor references or related consumers
1110 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
1112 * - The protocol does not have an outstanding strong reference on the socket
1115 * - The socket is not in a completed connection queue, so a process has been
1116 * notified that it is present. If it is removed, the user process may
1117 * block in accept() despite select() saying the socket was ready.
1120 sofree(struct socket *so)
1122 struct protosw *pr = so->so_proto;
1123 bool last __diagused;
1125 SOCK_LOCK_ASSERT(so);
1127 if ((so->so_state & (SS_NOFDREF | SS_PROTOREF)) != SS_NOFDREF ||
1128 refcount_load(&so->so_count) != 0 || so->so_qstate == SQ_COMP) {
1133 if (!SOLISTENING(so) && so->so_qstate == SQ_INCOMP) {
1136 sol = so->so_listen;
1137 KASSERT(sol, ("%s: so %p on incomp of NULL", __func__, so));
1140 * To solve race between close of a listening socket and
1141 * a socket on its incomplete queue, we need to lock both.
1142 * The order is first listening socket, then regular.
1143 * Since we don't have SS_NOFDREF neither SS_PROTOREF, this
1144 * function and the listening socket are the only pointers
1145 * to so. To preserve so and sol, we reference both and then
1147 * After relock the socket may not move to so_comp since it
1148 * doesn't have PCB already, but it may be removed from
1149 * so_incomp. If that happens, we share responsiblity on
1150 * freeing the socket, but soclose() has already removed
1158 if (so->so_qstate == SQ_INCOMP) {
1159 KASSERT(so->so_listen == sol,
1160 ("%s: so %p migrated out of sol %p",
1161 __func__, so, sol));
1162 TAILQ_REMOVE(&sol->sol_incomp, so, so_list);
1164 last = refcount_release(&sol->so_count);
1165 KASSERT(!last, ("%s: released last reference for %p",
1167 so->so_qstate = SQ_NONE;
1168 so->so_listen = NULL;
1170 KASSERT(so->so_listen == NULL,
1171 ("%s: so %p not on (in)comp with so_listen",
1174 KASSERT(refcount_load(&so->so_count) == 1,
1175 ("%s: so %p count %u", __func__, so, so->so_count));
1178 if (SOLISTENING(so))
1179 so->so_error = ECONNABORTED;
1182 if (so->so_dtor != NULL)
1186 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
1187 (*pr->pr_domain->dom_dispose)(so);
1188 if (pr->pr_usrreqs->pru_detach != NULL)
1189 (*pr->pr_usrreqs->pru_detach)(so);
1192 * From this point on, we assume that no other references to this
1193 * socket exist anywhere else in the stack. Therefore, no locks need
1194 * to be acquired or held.
1196 * We used to do a lot of socket buffer and socket locking here, as
1197 * well as invoke sorflush() and perform wakeups. The direct call to
1198 * dom_dispose() and sbdestroy() are an inlining of what was
1199 * necessary from sorflush().
1201 * Notice that the socket buffer and kqueue state are torn down
1202 * before calling pru_detach. This means that protocols shold not
1203 * assume they can perform socket wakeups, etc, in their detach code.
1205 if (!SOLISTENING(so)) {
1206 sbdestroy(&so->so_snd, so);
1207 sbdestroy(&so->so_rcv, so);
1209 seldrain(&so->so_rdsel);
1210 seldrain(&so->so_wrsel);
1211 knlist_destroy(&so->so_rdsel.si_note);
1212 knlist_destroy(&so->so_wrsel.si_note);
1217 * Close a socket on last file table reference removal. Initiate disconnect
1218 * if connected. Free socket when disconnect complete.
1220 * This function will sorele() the socket. Note that soclose() may be called
1221 * prior to the ref count reaching zero. The actual socket structure will
1222 * not be freed until the ref count reaches zero.
1225 soclose(struct socket *so)
1227 struct accept_queue lqueue;
1229 bool listening, last __diagused;
1231 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
1233 CURVNET_SET(so->so_vnet);
1234 funsetown(&so->so_sigio);
1235 if (so->so_state & SS_ISCONNECTED) {
1236 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
1237 error = sodisconnect(so);
1239 if (error == ENOTCONN)
1245 if ((so->so_options & SO_LINGER) != 0 && so->so_linger != 0) {
1246 if ((so->so_state & SS_ISDISCONNECTING) &&
1247 (so->so_state & SS_NBIO))
1249 while (so->so_state & SS_ISCONNECTED) {
1250 error = tsleep(&so->so_timeo,
1251 PSOCK | PCATCH, "soclos",
1252 so->so_linger * hz);
1260 if (so->so_proto->pr_usrreqs->pru_close != NULL)
1261 (*so->so_proto->pr_usrreqs->pru_close)(so);
1264 if ((listening = SOLISTENING(so))) {
1267 TAILQ_INIT(&lqueue);
1268 TAILQ_SWAP(&lqueue, &so->sol_incomp, socket, so_list);
1269 TAILQ_CONCAT(&lqueue, &so->sol_comp, so_list);
1271 so->sol_qlen = so->sol_incqlen = 0;
1273 TAILQ_FOREACH(sp, &lqueue, so_list) {
1275 sp->so_qstate = SQ_NONE;
1276 sp->so_listen = NULL;
1278 last = refcount_release(&so->so_count);
1279 KASSERT(!last, ("%s: released last reference for %p",
1283 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
1284 so->so_state |= SS_NOFDREF;
1287 struct socket *sp, *tsp;
1289 TAILQ_FOREACH_SAFE(sp, &lqueue, so_list, tsp) {
1291 if (refcount_load(&sp->so_count) == 0) {
1295 /* See the handling of queued sockets
1306 * soabort() is used to abruptly tear down a connection, such as when a
1307 * resource limit is reached (listen queue depth exceeded), or if a listen
1308 * socket is closed while there are sockets waiting to be accepted.
1310 * This interface is tricky, because it is called on an unreferenced socket,
1311 * and must be called only by a thread that has actually removed the socket
1312 * from the listen queue it was on, or races with other threads are risked.
1314 * This interface will call into the protocol code, so must not be called
1315 * with any socket locks held. Protocols do call it while holding their own
1316 * recursible protocol mutexes, but this is something that should be subject
1317 * to review in the future.
1320 soabort(struct socket *so)
1324 * In as much as is possible, assert that no references to this
1325 * socket are held. This is not quite the same as asserting that the
1326 * current thread is responsible for arranging for no references, but
1327 * is as close as we can get for now.
1329 KASSERT(so->so_count == 0, ("soabort: so_count"));
1330 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
1331 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
1334 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
1335 (*so->so_proto->pr_usrreqs->pru_abort)(so);
1341 soaccept(struct socket *so, struct sockaddr **nam)
1346 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
1347 so->so_state &= ~SS_NOFDREF;
1350 CURVNET_SET(so->so_vnet);
1351 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
1357 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
1360 return (soconnectat(AT_FDCWD, so, nam, td));
1364 soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
1368 CURVNET_SET(so->so_vnet);
1370 * If protocol is connection-based, can only connect once.
1371 * Otherwise, if connected, try to disconnect first. This allows
1372 * user to disconnect by connecting to, e.g., a null address.
1374 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
1375 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
1376 (error = sodisconnect(so)))) {
1380 * Prevent accumulated error from previous connection from
1384 if (fd == AT_FDCWD) {
1385 error = (*so->so_proto->pr_usrreqs->pru_connect)(so,
1388 error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd,
1398 soconnect2(struct socket *so1, struct socket *so2)
1402 CURVNET_SET(so1->so_vnet);
1403 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
1409 sodisconnect(struct socket *so)
1413 if ((so->so_state & SS_ISCONNECTED) == 0)
1415 if (so->so_state & SS_ISDISCONNECTING)
1418 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
1423 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1424 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1428 int clen = 0, error, dontroute;
1430 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1431 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1432 ("sosend_dgram: !PR_ATOMIC"));
1435 resid = uio->uio_resid;
1437 resid = top->m_pkthdr.len;
1439 * In theory resid should be unsigned. However, space must be
1440 * signed, as it might be less than 0 if we over-committed, and we
1441 * must use a signed comparison of space and resid. On the other
1442 * hand, a negative resid causes us to loop sending 0-length
1443 * segments to the protocol.
1451 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1453 td->td_ru.ru_msgsnd++;
1454 if (control != NULL)
1455 clen = control->m_len;
1457 SOCKBUF_LOCK(&so->so_snd);
1458 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1459 SOCKBUF_UNLOCK(&so->so_snd);
1464 error = so->so_error;
1466 SOCKBUF_UNLOCK(&so->so_snd);
1469 if ((so->so_state & SS_ISCONNECTED) == 0) {
1471 * `sendto' and `sendmsg' is allowed on a connection-based
1472 * socket if it supports implied connect. Return ENOTCONN if
1473 * not connected and no address is supplied.
1475 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1476 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1477 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1478 !(resid == 0 && clen != 0)) {
1479 SOCKBUF_UNLOCK(&so->so_snd);
1483 } else if (addr == NULL) {
1484 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1487 error = EDESTADDRREQ;
1488 SOCKBUF_UNLOCK(&so->so_snd);
1494 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1495 * problem and need fixing.
1497 space = sbspace(&so->so_snd);
1498 if (flags & MSG_OOB)
1501 SOCKBUF_UNLOCK(&so->so_snd);
1502 if (resid > space) {
1508 if (flags & MSG_EOR)
1509 top->m_flags |= M_EOR;
1512 * Copy the data from userland into a mbuf chain.
1513 * If no data is to be copied in, a single empty mbuf
1516 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1517 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1519 error = EFAULT; /* only possible error */
1522 space -= resid - uio->uio_resid;
1523 resid = uio->uio_resid;
1525 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1527 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1532 so->so_options |= SO_DONTROUTE;
1536 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1537 * of date. We could have received a reset packet in an interrupt or
1538 * maybe we slept while doing page faults in uiomove() etc. We could
1539 * probably recheck again inside the locking protection here, but
1540 * there are probably other places that this also happens. We must
1544 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1545 (flags & MSG_OOB) ? PRUS_OOB :
1547 * If the user set MSG_EOF, the protocol understands this flag and
1548 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1550 ((flags & MSG_EOF) &&
1551 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1554 /* If there is more to send set PRUS_MORETOCOME */
1555 (flags & MSG_MORETOCOME) ||
1556 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1557 top, addr, control, td);
1560 so->so_options &= ~SO_DONTROUTE;
1569 if (control != NULL)
1575 * Send on a socket. If send must go all at once and message is larger than
1576 * send buffering, then hard error. Lock against other senders. If must go
1577 * all at once and not enough room now, then inform user that this would
1578 * block and do nothing. Otherwise, if nonblocking, send as much as
1579 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1580 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1581 * in mbuf chain must be small enough to send all at once.
1583 * Returns nonzero on error, timeout or signal; callers must check for short
1584 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1588 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1589 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1593 int clen = 0, error, dontroute;
1594 int atomic = sosendallatonce(so) || top;
1597 struct ktls_session *tls;
1598 int tls_enq_cnt, tls_pruflag;
1602 tls_rtype = TLS_RLTYPE_APP;
1605 resid = uio->uio_resid;
1606 else if ((top->m_flags & M_PKTHDR) != 0)
1607 resid = top->m_pkthdr.len;
1609 resid = m_length(top, NULL);
1611 * In theory resid should be unsigned. However, space must be
1612 * signed, as it might be less than 0 if we over-committed, and we
1613 * must use a signed comparison of space and resid. On the other
1614 * hand, a negative resid causes us to loop sending 0-length
1615 * segments to the protocol.
1617 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1618 * type sockets since that's an error.
1620 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1626 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1627 (so->so_proto->pr_flags & PR_ATOMIC);
1629 td->td_ru.ru_msgsnd++;
1630 if (control != NULL)
1631 clen = control->m_len;
1633 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
1639 tls = ktls_hold(so->so_snd.sb_tls_info);
1641 if (tls->mode == TCP_TLS_MODE_SW)
1642 tls_pruflag = PRUS_NOTREADY;
1644 if (control != NULL) {
1645 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
1647 if (clen >= sizeof(*cm) &&
1648 cm->cmsg_type == TLS_SET_RECORD_TYPE) {
1649 tls_rtype = *((uint8_t *)CMSG_DATA(cm));
1661 SOCKBUF_LOCK(&so->so_snd);
1662 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1663 SOCKBUF_UNLOCK(&so->so_snd);
1668 error = so->so_error;
1670 SOCKBUF_UNLOCK(&so->so_snd);
1673 if ((so->so_state & SS_ISCONNECTED) == 0) {
1675 * `sendto' and `sendmsg' is allowed on a connection-
1676 * based socket if it supports implied connect.
1677 * Return ENOTCONN if not connected and no address is
1680 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1681 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1682 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1683 !(resid == 0 && clen != 0)) {
1684 SOCKBUF_UNLOCK(&so->so_snd);
1688 } else if (addr == NULL) {
1689 SOCKBUF_UNLOCK(&so->so_snd);
1690 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1693 error = EDESTADDRREQ;
1697 space = sbspace(&so->so_snd);
1698 if (flags & MSG_OOB)
1700 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1701 clen > so->so_snd.sb_hiwat) {
1702 SOCKBUF_UNLOCK(&so->so_snd);
1706 if (space < resid + clen &&
1707 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1708 if ((so->so_state & SS_NBIO) ||
1709 (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) {
1710 SOCKBUF_UNLOCK(&so->so_snd);
1711 error = EWOULDBLOCK;
1714 error = sbwait(&so->so_snd);
1715 SOCKBUF_UNLOCK(&so->so_snd);
1720 SOCKBUF_UNLOCK(&so->so_snd);
1725 if (flags & MSG_EOR)
1726 top->m_flags |= M_EOR;
1729 ktls_frame(top, tls, &tls_enq_cnt,
1731 tls_rtype = TLS_RLTYPE_APP;
1736 * Copy the data from userland into a mbuf
1737 * chain. If resid is 0, which can happen
1738 * only if we have control to send, then
1739 * a single empty mbuf is returned. This
1740 * is a workaround to prevent protocol send
1745 top = m_uiotombuf(uio, M_WAITOK, space,
1746 tls->params.max_frame_len,
1748 ((flags & MSG_EOR) ? M_EOR : 0));
1750 ktls_frame(top, tls,
1751 &tls_enq_cnt, tls_rtype);
1753 tls_rtype = TLS_RLTYPE_APP;
1756 top = m_uiotombuf(uio, M_WAITOK, space,
1757 (atomic ? max_hdr : 0),
1758 (atomic ? M_PKTHDR : 0) |
1759 ((flags & MSG_EOR) ? M_EOR : 0));
1761 error = EFAULT; /* only possible error */
1764 space -= resid - uio->uio_resid;
1765 resid = uio->uio_resid;
1769 so->so_options |= SO_DONTROUTE;
1773 * XXX all the SBS_CANTSENDMORE checks previously
1774 * done could be out of date. We could have received
1775 * a reset packet in an interrupt or maybe we slept
1776 * while doing page faults in uiomove() etc. We
1777 * could probably recheck again inside the locking
1778 * protection here, but there are probably other
1779 * places that this also happens. We must rethink
1784 pru_flag = (flags & MSG_OOB) ? PRUS_OOB :
1786 * If the user set MSG_EOF, the protocol understands
1787 * this flag and nothing left to send then use
1788 * PRU_SEND_EOF instead of PRU_SEND.
1790 ((flags & MSG_EOF) &&
1791 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1794 /* If there is more to send set PRUS_MORETOCOME. */
1795 (flags & MSG_MORETOCOME) ||
1796 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
1799 pru_flag |= tls_pruflag;
1802 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1803 pru_flag, top, addr, control, td);
1807 so->so_options &= ~SO_DONTROUTE;
1812 if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) {
1818 ktls_enqueue(top, so, tls_enq_cnt);
1827 } while (resid && space > 0);
1831 SOCK_IO_SEND_UNLOCK(so);
1839 if (control != NULL)
1845 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1846 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1850 CURVNET_SET(so->so_vnet);
1851 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio,
1852 top, control, flags, td);
1858 * The part of soreceive() that implements reading non-inline out-of-band
1859 * data from a socket. For more complete comments, see soreceive(), from
1860 * which this code originated.
1862 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1863 * unable to return an mbuf chain to the caller.
1866 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1868 struct protosw *pr = so->so_proto;
1872 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1875 m = m_get(M_WAITOK, MT_DATA);
1876 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1880 error = uiomove(mtod(m, void *),
1881 (int) min(uio->uio_resid, m->m_len), uio);
1883 } while (uio->uio_resid && error == 0 && m);
1891 * Following replacement or removal of the first mbuf on the first mbuf chain
1892 * of a socket buffer, push necessary state changes back into the socket
1893 * buffer so that other consumers see the values consistently. 'nextrecord'
1894 * is the callers locally stored value of the original value of
1895 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1896 * NOTE: 'nextrecord' may be NULL.
1898 static __inline void
1899 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1902 SOCKBUF_LOCK_ASSERT(sb);
1904 * First, update for the new value of nextrecord. If necessary, make
1905 * it the first record.
1907 if (sb->sb_mb != NULL)
1908 sb->sb_mb->m_nextpkt = nextrecord;
1910 sb->sb_mb = nextrecord;
1913 * Now update any dependent socket buffer fields to reflect the new
1914 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1915 * addition of a second clause that takes care of the case where
1916 * sb_mb has been updated, but remains the last record.
1918 if (sb->sb_mb == NULL) {
1919 sb->sb_mbtail = NULL;
1920 sb->sb_lastrecord = NULL;
1921 } else if (sb->sb_mb->m_nextpkt == NULL)
1922 sb->sb_lastrecord = sb->sb_mb;
1926 * Implement receive operations on a socket. We depend on the way that
1927 * records are added to the sockbuf by sbappend. In particular, each record
1928 * (mbufs linked through m_next) must begin with an address if the protocol
1929 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1930 * data, and then zero or more mbufs of data. In order to allow parallelism
1931 * between network receive and copying to user space, as well as avoid
1932 * sleeping with a mutex held, we release the socket buffer mutex during the
1933 * user space copy. Although the sockbuf is locked, new data may still be
1934 * appended, and thus we must maintain consistency of the sockbuf during that
1937 * The caller may receive the data as a single mbuf chain by supplying an
1938 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1939 * the count in uio_resid.
1942 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1943 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1945 struct mbuf *m, **mp;
1946 int flags, error, offset;
1948 struct protosw *pr = so->so_proto;
1949 struct mbuf *nextrecord;
1951 ssize_t orig_resid = uio->uio_resid;
1956 if (controlp != NULL)
1959 flags = *flagsp &~ MSG_EOR;
1962 if (flags & MSG_OOB)
1963 return (soreceive_rcvoob(so, uio, flags));
1966 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1967 && uio->uio_resid) {
1969 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1972 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
1977 SOCKBUF_LOCK(&so->so_rcv);
1978 m = so->so_rcv.sb_mb;
1980 * If we have less data than requested, block awaiting more (subject
1981 * to any timeout) if:
1982 * 1. the current count is less than the low water mark, or
1983 * 2. MSG_DONTWAIT is not set
1985 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1986 sbavail(&so->so_rcv) < uio->uio_resid) &&
1987 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
1988 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1989 KASSERT(m != NULL || !sbavail(&so->so_rcv),
1990 ("receive: m == %p sbavail == %u",
1991 m, sbavail(&so->so_rcv)));
1992 if (so->so_error || so->so_rerror) {
1996 error = so->so_error;
1998 error = so->so_rerror;
1999 if ((flags & MSG_PEEK) == 0) {
2005 SOCKBUF_UNLOCK(&so->so_rcv);
2008 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2009 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2013 else if (so->so_rcv.sb_tlsdcc == 0 &&
2014 so->so_rcv.sb_tlscc == 0) {
2018 SOCKBUF_UNLOCK(&so->so_rcv);
2022 for (; m != NULL; m = m->m_next)
2023 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
2024 m = so->so_rcv.sb_mb;
2027 if ((so->so_state & (SS_ISCONNECTING | SS_ISCONNECTED |
2028 SS_ISDISCONNECTING | SS_ISDISCONNECTED)) == 0 &&
2029 (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) {
2030 SOCKBUF_UNLOCK(&so->so_rcv);
2034 if (uio->uio_resid == 0) {
2035 SOCKBUF_UNLOCK(&so->so_rcv);
2038 if ((so->so_state & SS_NBIO) ||
2039 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2040 SOCKBUF_UNLOCK(&so->so_rcv);
2041 error = EWOULDBLOCK;
2044 SBLASTRECORDCHK(&so->so_rcv);
2045 SBLASTMBUFCHK(&so->so_rcv);
2046 error = sbwait(&so->so_rcv);
2047 SOCKBUF_UNLOCK(&so->so_rcv);
2054 * From this point onward, we maintain 'nextrecord' as a cache of the
2055 * pointer to the next record in the socket buffer. We must keep the
2056 * various socket buffer pointers and local stack versions of the
2057 * pointers in sync, pushing out modifications before dropping the
2058 * socket buffer mutex, and re-reading them when picking it up.
2060 * Otherwise, we will race with the network stack appending new data
2061 * or records onto the socket buffer by using inconsistent/stale
2062 * versions of the field, possibly resulting in socket buffer
2065 * By holding the high-level sblock(), we prevent simultaneous
2066 * readers from pulling off the front of the socket buffer.
2068 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2070 uio->uio_td->td_ru.ru_msgrcv++;
2071 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
2072 SBLASTRECORDCHK(&so->so_rcv);
2073 SBLASTMBUFCHK(&so->so_rcv);
2074 nextrecord = m->m_nextpkt;
2075 if (pr->pr_flags & PR_ADDR) {
2076 KASSERT(m->m_type == MT_SONAME,
2077 ("m->m_type == %d", m->m_type));
2080 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2082 if (flags & MSG_PEEK) {
2085 sbfree(&so->so_rcv, m);
2086 so->so_rcv.sb_mb = m_free(m);
2087 m = so->so_rcv.sb_mb;
2088 sockbuf_pushsync(&so->so_rcv, nextrecord);
2093 * Process one or more MT_CONTROL mbufs present before any data mbufs
2094 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
2095 * just copy the data; if !MSG_PEEK, we call into the protocol to
2096 * perform externalization (or freeing if controlp == NULL).
2098 if (m != NULL && m->m_type == MT_CONTROL) {
2099 struct mbuf *cm = NULL, *cmn;
2100 struct mbuf **cme = &cm;
2102 struct cmsghdr *cmsg;
2103 struct tls_get_record tgr;
2106 * For MSG_TLSAPPDATA, check for a non-application data
2107 * record. If found, return ENXIO without removing
2108 * it from the receive queue. This allows a subsequent
2109 * call without MSG_TLSAPPDATA to receive it.
2110 * Note that, for TLS, there should only be a single
2111 * control mbuf with the TLS_GET_RECORD message in it.
2113 if (flags & MSG_TLSAPPDATA) {
2114 cmsg = mtod(m, struct cmsghdr *);
2115 if (cmsg->cmsg_type == TLS_GET_RECORD &&
2116 cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) {
2117 memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr));
2118 /* This will need to change for TLS 1.3. */
2119 if (tgr.tls_type != TLS_RLTYPE_APP) {
2120 SOCKBUF_UNLOCK(&so->so_rcv);
2129 if (flags & MSG_PEEK) {
2130 if (controlp != NULL) {
2131 *controlp = m_copym(m, 0, m->m_len,
2133 controlp = &(*controlp)->m_next;
2137 sbfree(&so->so_rcv, m);
2138 so->so_rcv.sb_mb = m->m_next;
2141 cme = &(*cme)->m_next;
2142 m = so->so_rcv.sb_mb;
2144 } while (m != NULL && m->m_type == MT_CONTROL);
2145 if ((flags & MSG_PEEK) == 0)
2146 sockbuf_pushsync(&so->so_rcv, nextrecord);
2147 while (cm != NULL) {
2150 if (pr->pr_domain->dom_externalize != NULL) {
2151 SOCKBUF_UNLOCK(&so->so_rcv);
2153 error = (*pr->pr_domain->dom_externalize)
2154 (cm, controlp, flags);
2155 SOCKBUF_LOCK(&so->so_rcv);
2156 } else if (controlp != NULL)
2160 if (controlp != NULL) {
2162 while (*controlp != NULL)
2163 controlp = &(*controlp)->m_next;
2168 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
2170 nextrecord = so->so_rcv.sb_mb;
2174 if ((flags & MSG_PEEK) == 0) {
2175 KASSERT(m->m_nextpkt == nextrecord,
2176 ("soreceive: post-control, nextrecord !sync"));
2177 if (nextrecord == NULL) {
2178 KASSERT(so->so_rcv.sb_mb == m,
2179 ("soreceive: post-control, sb_mb!=m"));
2180 KASSERT(so->so_rcv.sb_lastrecord == m,
2181 ("soreceive: post-control, lastrecord!=m"));
2185 if (type == MT_OOBDATA)
2188 if ((flags & MSG_PEEK) == 0) {
2189 KASSERT(so->so_rcv.sb_mb == nextrecord,
2190 ("soreceive: sb_mb != nextrecord"));
2191 if (so->so_rcv.sb_mb == NULL) {
2192 KASSERT(so->so_rcv.sb_lastrecord == NULL,
2193 ("soreceive: sb_lastercord != NULL"));
2197 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2198 SBLASTRECORDCHK(&so->so_rcv);
2199 SBLASTMBUFCHK(&so->so_rcv);
2202 * Now continue to read any data mbufs off of the head of the socket
2203 * buffer until the read request is satisfied. Note that 'type' is
2204 * used to store the type of any mbuf reads that have happened so far
2205 * such that soreceive() can stop reading if the type changes, which
2206 * causes soreceive() to return only one of regular data and inline
2207 * out-of-band data in a single socket receive operation.
2211 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
2214 * If the type of mbuf has changed since the last mbuf
2215 * examined ('type'), end the receive operation.
2217 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2218 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
2219 if (type != m->m_type)
2221 } else if (type == MT_OOBDATA)
2224 KASSERT(m->m_type == MT_DATA,
2225 ("m->m_type == %d", m->m_type));
2226 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
2227 len = uio->uio_resid;
2228 if (so->so_oobmark && len > so->so_oobmark - offset)
2229 len = so->so_oobmark - offset;
2230 if (len > m->m_len - moff)
2231 len = m->m_len - moff;
2233 * If mp is set, just pass back the mbufs. Otherwise copy
2234 * them out via the uio, then free. Sockbuf must be
2235 * consistent here (points to current mbuf, it points to next
2236 * record) when we drop priority; we must note any additions
2237 * to the sockbuf when we block interrupts again.
2240 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2241 SBLASTRECORDCHK(&so->so_rcv);
2242 SBLASTMBUFCHK(&so->so_rcv);
2243 SOCKBUF_UNLOCK(&so->so_rcv);
2244 if ((m->m_flags & M_EXTPG) != 0)
2245 error = m_unmapped_uiomove(m, moff, uio,
2248 error = uiomove(mtod(m, char *) + moff,
2250 SOCKBUF_LOCK(&so->so_rcv);
2253 * The MT_SONAME mbuf has already been removed
2254 * from the record, so it is necessary to
2255 * remove the data mbufs, if any, to preserve
2256 * the invariant in the case of PR_ADDR that
2257 * requires MT_SONAME mbufs at the head of
2260 if (pr->pr_flags & PR_ATOMIC &&
2261 ((flags & MSG_PEEK) == 0))
2262 (void)sbdroprecord_locked(&so->so_rcv);
2263 SOCKBUF_UNLOCK(&so->so_rcv);
2267 uio->uio_resid -= len;
2268 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2269 if (len == m->m_len - moff) {
2270 if (m->m_flags & M_EOR)
2272 if (flags & MSG_PEEK) {
2276 nextrecord = m->m_nextpkt;
2277 sbfree(&so->so_rcv, m);
2279 m->m_nextpkt = NULL;
2282 so->so_rcv.sb_mb = m = m->m_next;
2285 so->so_rcv.sb_mb = m_free(m);
2286 m = so->so_rcv.sb_mb;
2288 sockbuf_pushsync(&so->so_rcv, nextrecord);
2289 SBLASTRECORDCHK(&so->so_rcv);
2290 SBLASTMBUFCHK(&so->so_rcv);
2293 if (flags & MSG_PEEK)
2297 if (flags & MSG_DONTWAIT) {
2298 *mp = m_copym(m, 0, len,
2302 * m_copym() couldn't
2304 * Adjust uio_resid back
2306 * down by len bytes,
2307 * which we didn't end
2308 * up "copying" over).
2310 uio->uio_resid += len;
2314 SOCKBUF_UNLOCK(&so->so_rcv);
2315 *mp = m_copym(m, 0, len,
2317 SOCKBUF_LOCK(&so->so_rcv);
2320 sbcut_locked(&so->so_rcv, len);
2323 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2324 if (so->so_oobmark) {
2325 if ((flags & MSG_PEEK) == 0) {
2326 so->so_oobmark -= len;
2327 if (so->so_oobmark == 0) {
2328 so->so_rcv.sb_state |= SBS_RCVATMARK;
2333 if (offset == so->so_oobmark)
2337 if (flags & MSG_EOR)
2340 * If the MSG_WAITALL flag is set (for non-atomic socket), we
2341 * must not quit until "uio->uio_resid == 0" or an error
2342 * termination. If a signal/timeout occurs, return with a
2343 * short count but without error. Keep sockbuf locked
2344 * against other readers.
2346 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
2347 !sosendallatonce(so) && nextrecord == NULL) {
2348 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2349 if (so->so_error || so->so_rerror ||
2350 so->so_rcv.sb_state & SBS_CANTRCVMORE)
2353 * Notify the protocol that some data has been
2354 * drained before blocking.
2356 if (pr->pr_flags & PR_WANTRCVD) {
2357 SOCKBUF_UNLOCK(&so->so_rcv);
2359 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
2360 SOCKBUF_LOCK(&so->so_rcv);
2362 SBLASTRECORDCHK(&so->so_rcv);
2363 SBLASTMBUFCHK(&so->so_rcv);
2365 * We could receive some data while was notifying
2366 * the protocol. Skip blocking in this case.
2368 if (so->so_rcv.sb_mb == NULL) {
2369 error = sbwait(&so->so_rcv);
2371 SOCKBUF_UNLOCK(&so->so_rcv);
2375 m = so->so_rcv.sb_mb;
2377 nextrecord = m->m_nextpkt;
2381 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2382 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
2384 if ((flags & MSG_PEEK) == 0)
2385 (void) sbdroprecord_locked(&so->so_rcv);
2387 if ((flags & MSG_PEEK) == 0) {
2390 * First part is an inline SB_EMPTY_FIXUP(). Second
2391 * part makes sure sb_lastrecord is up-to-date if
2392 * there is still data in the socket buffer.
2394 so->so_rcv.sb_mb = nextrecord;
2395 if (so->so_rcv.sb_mb == NULL) {
2396 so->so_rcv.sb_mbtail = NULL;
2397 so->so_rcv.sb_lastrecord = NULL;
2398 } else if (nextrecord->m_nextpkt == NULL)
2399 so->so_rcv.sb_lastrecord = nextrecord;
2401 SBLASTRECORDCHK(&so->so_rcv);
2402 SBLASTMBUFCHK(&so->so_rcv);
2404 * If soreceive() is being done from the socket callback,
2405 * then don't need to generate ACK to peer to update window,
2406 * since ACK will be generated on return to TCP.
2408 if (!(flags & MSG_SOCALLBCK) &&
2409 (pr->pr_flags & PR_WANTRCVD)) {
2410 SOCKBUF_UNLOCK(&so->so_rcv);
2412 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
2413 SOCKBUF_LOCK(&so->so_rcv);
2416 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2417 if (orig_resid == uio->uio_resid && orig_resid &&
2418 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
2419 SOCKBUF_UNLOCK(&so->so_rcv);
2422 SOCKBUF_UNLOCK(&so->so_rcv);
2427 SOCK_IO_RECV_UNLOCK(so);
2432 * Optimized version of soreceive() for stream (TCP) sockets.
2435 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
2436 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2438 int len = 0, error = 0, flags, oresid;
2440 struct mbuf *m, *n = NULL;
2442 /* We only do stream sockets. */
2443 if (so->so_type != SOCK_STREAM)
2448 flags = *flagsp &~ MSG_EOR;
2451 if (controlp != NULL)
2453 if (flags & MSG_OOB)
2454 return (soreceive_rcvoob(so, uio, flags));
2462 * KTLS store TLS records as records with a control message to
2463 * describe the framing.
2465 * We check once here before acquiring locks to optimize the
2468 if (sb->sb_tls_info != NULL)
2469 return (soreceive_generic(so, psa, uio, mp0, controlp,
2473 /* Prevent other readers from entering the socket. */
2474 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
2480 if (sb->sb_tls_info != NULL) {
2482 SOCK_IO_RECV_UNLOCK(so);
2483 return (soreceive_generic(so, psa, uio, mp0, controlp,
2488 /* Easy one, no space to copyout anything. */
2489 if (uio->uio_resid == 0) {
2493 oresid = uio->uio_resid;
2495 /* We will never ever get anything unless we are or were connected. */
2496 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
2502 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2504 /* Abort if socket has reported problems. */
2506 if (sbavail(sb) > 0)
2508 if (oresid > uio->uio_resid)
2510 error = so->so_error;
2511 if (!(flags & MSG_PEEK))
2516 /* Door is closed. Deliver what is left, if any. */
2517 if (sb->sb_state & SBS_CANTRCVMORE) {
2518 if (sbavail(sb) > 0)
2524 /* Socket buffer is empty and we shall not block. */
2525 if (sbavail(sb) == 0 &&
2526 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
2531 /* Socket buffer got some data that we shall deliver now. */
2532 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
2533 ((so->so_state & SS_NBIO) ||
2534 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2535 sbavail(sb) >= sb->sb_lowat ||
2536 sbavail(sb) >= uio->uio_resid ||
2537 sbavail(sb) >= sb->sb_hiwat) ) {
2541 /* On MSG_WAITALL we must wait until all data or error arrives. */
2542 if ((flags & MSG_WAITALL) &&
2543 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
2547 * Wait and block until (more) data comes in.
2548 * NB: Drops the sockbuf lock during wait.
2556 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2557 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
2558 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2562 uio->uio_td->td_ru.ru_msgrcv++;
2564 /* Fill uio until full or current end of socket buffer is reached. */
2565 len = min(uio->uio_resid, sbavail(sb));
2567 /* Dequeue as many mbufs as possible. */
2568 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2572 m_cat(*mp0, sb->sb_mb);
2574 m != NULL && m->m_len <= len;
2576 KASSERT(!(m->m_flags & M_NOTAVAIL),
2577 ("%s: m %p not available", __func__, m));
2579 uio->uio_resid -= m->m_len;
2585 sb->sb_lastrecord = sb->sb_mb;
2586 if (sb->sb_mb == NULL)
2589 /* Copy the remainder. */
2591 KASSERT(sb->sb_mb != NULL,
2592 ("%s: len > 0 && sb->sb_mb empty", __func__));
2594 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2596 len = 0; /* Don't flush data from sockbuf. */
2598 uio->uio_resid -= len;
2609 /* NB: Must unlock socket buffer as uiomove may sleep. */
2611 error = m_mbuftouio(uio, sb->sb_mb, len);
2616 SBLASTRECORDCHK(sb);
2620 * Remove the delivered data from the socket buffer unless we
2621 * were only peeking.
2623 if (!(flags & MSG_PEEK)) {
2625 sbdrop_locked(sb, len);
2627 /* Notify protocol that we drained some data. */
2628 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2629 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2630 !(flags & MSG_SOCALLBCK))) {
2633 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2639 * For MSG_WAITALL we may have to loop again and wait for
2640 * more data to come in.
2642 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2645 SBLASTRECORDCHK(sb);
2648 SOCK_IO_RECV_UNLOCK(so);
2653 * Optimized version of soreceive() for simple datagram cases from userspace.
2654 * Unlike in the stream case, we're able to drop a datagram if copyout()
2655 * fails, and because we handle datagrams atomically, we don't need to use a
2656 * sleep lock to prevent I/O interlacing.
2659 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2660 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2662 struct mbuf *m, *m2;
2665 struct protosw *pr = so->so_proto;
2666 struct mbuf *nextrecord;
2670 if (controlp != NULL)
2673 flags = *flagsp &~ MSG_EOR;
2678 * For any complicated cases, fall back to the full
2679 * soreceive_generic().
2681 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2682 return (soreceive_generic(so, psa, uio, mp0, controlp,
2686 * Enforce restrictions on use.
2688 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2689 ("soreceive_dgram: wantrcvd"));
2690 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2691 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2692 ("soreceive_dgram: SBS_RCVATMARK"));
2693 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2694 ("soreceive_dgram: P_CONNREQUIRED"));
2697 * Loop blocking while waiting for a datagram.
2699 SOCKBUF_LOCK(&so->so_rcv);
2700 while ((m = so->so_rcv.sb_mb) == NULL) {
2701 KASSERT(sbavail(&so->so_rcv) == 0,
2702 ("soreceive_dgram: sb_mb NULL but sbavail %u",
2703 sbavail(&so->so_rcv)));
2705 error = so->so_error;
2707 SOCKBUF_UNLOCK(&so->so_rcv);
2710 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2711 uio->uio_resid == 0) {
2712 SOCKBUF_UNLOCK(&so->so_rcv);
2715 if ((so->so_state & SS_NBIO) ||
2716 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2717 SOCKBUF_UNLOCK(&so->so_rcv);
2718 return (EWOULDBLOCK);
2720 SBLASTRECORDCHK(&so->so_rcv);
2721 SBLASTMBUFCHK(&so->so_rcv);
2722 error = sbwait(&so->so_rcv);
2724 SOCKBUF_UNLOCK(&so->so_rcv);
2728 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2731 uio->uio_td->td_ru.ru_msgrcv++;
2732 SBLASTRECORDCHK(&so->so_rcv);
2733 SBLASTMBUFCHK(&so->so_rcv);
2734 nextrecord = m->m_nextpkt;
2735 if (nextrecord == NULL) {
2736 KASSERT(so->so_rcv.sb_lastrecord == m,
2737 ("soreceive_dgram: lastrecord != m"));
2740 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2741 ("soreceive_dgram: m_nextpkt != nextrecord"));
2744 * Pull 'm' and its chain off the front of the packet queue.
2746 so->so_rcv.sb_mb = NULL;
2747 sockbuf_pushsync(&so->so_rcv, nextrecord);
2750 * Walk 'm's chain and free that many bytes from the socket buffer.
2752 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2753 sbfree(&so->so_rcv, m2);
2756 * Do a few last checks before we let go of the lock.
2758 SBLASTRECORDCHK(&so->so_rcv);
2759 SBLASTMBUFCHK(&so->so_rcv);
2760 SOCKBUF_UNLOCK(&so->so_rcv);
2762 if (pr->pr_flags & PR_ADDR) {
2763 KASSERT(m->m_type == MT_SONAME,
2764 ("m->m_type == %d", m->m_type));
2766 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2771 /* XXXRW: Can this happen? */
2776 * Packet to copyout() is now in 'm' and it is disconnected from the
2779 * Process one or more MT_CONTROL mbufs present before any data mbufs
2780 * in the first mbuf chain on the socket buffer. We call into the
2781 * protocol to perform externalization (or freeing if controlp ==
2782 * NULL). In some cases there can be only MT_CONTROL mbufs without
2785 if (m->m_type == MT_CONTROL) {
2786 struct mbuf *cm = NULL, *cmn;
2787 struct mbuf **cme = &cm;
2793 cme = &(*cme)->m_next;
2795 } while (m != NULL && m->m_type == MT_CONTROL);
2796 while (cm != NULL) {
2799 if (pr->pr_domain->dom_externalize != NULL) {
2800 error = (*pr->pr_domain->dom_externalize)
2801 (cm, controlp, flags);
2802 } else if (controlp != NULL)
2806 if (controlp != NULL) {
2807 while (*controlp != NULL)
2808 controlp = &(*controlp)->m_next;
2813 KASSERT(m == NULL || m->m_type == MT_DATA,
2814 ("soreceive_dgram: !data"));
2815 while (m != NULL && uio->uio_resid > 0) {
2816 len = uio->uio_resid;
2819 error = uiomove(mtod(m, char *), (int)len, uio);
2824 if (len == m->m_len)
2841 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2842 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2846 CURVNET_SET(so->so_vnet);
2847 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio,
2848 mp0, controlp, flagsp));
2854 soshutdown(struct socket *so, int how)
2857 int error, soerror_enotconn;
2859 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2862 soerror_enotconn = 0;
2865 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
2867 * POSIX mandates us to return ENOTCONN when shutdown(2) is
2868 * invoked on a datagram sockets, however historically we would
2869 * actually tear socket down. This is known to be leveraged by
2870 * some applications to unblock process waiting in recvXXX(2)
2871 * by other process that it shares that socket with. Try to meet
2872 * both backward-compatibility and POSIX requirements by forcing
2873 * ENOTCONN but still asking protocol to perform pru_shutdown().
2875 if (so->so_type != SOCK_DGRAM && !SOLISTENING(so)) {
2879 soerror_enotconn = 1;
2882 if (SOLISTENING(so)) {
2883 if (how != SHUT_WR) {
2884 so->so_error = ECONNABORTED;
2885 solisten_wakeup(so); /* unlocks so */
2893 CURVNET_SET(so->so_vnet);
2895 if (pr->pr_usrreqs->pru_flush != NULL)
2896 (*pr->pr_usrreqs->pru_flush)(so, how);
2899 if (how != SHUT_RD) {
2900 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2901 wakeup(&so->so_timeo);
2903 return ((error == 0 && soerror_enotconn) ? ENOTCONN : error);
2905 wakeup(&so->so_timeo);
2909 return (soerror_enotconn ? ENOTCONN : 0);
2913 sorflush(struct socket *so)
2922 * In order to avoid calling dom_dispose with the socket buffer mutex
2923 * held, we make a partial copy of the socket buffer and clear the
2924 * original. The new socket buffer copy won't have initialized locks so
2925 * we can only call routines that won't use or assert those locks.
2926 * Ideally calling socantrcvmore() would prevent data from being added
2927 * to the buffer, but currently it merely prevents buffered data from
2928 * being read by userspace. We make this effort to free buffered data
2931 * Dislodge threads currently blocked in receive and wait to acquire
2932 * a lock against other simultaneous readers before clearing the
2933 * socket buffer. Don't let our acquire be interrupted by a signal
2934 * despite any existing socket disposition on interruptable waiting.
2938 error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR);
2940 KASSERT(SOLISTENING(so),
2941 ("%s: soiolock(%p) failed", __func__, so));
2945 SOCK_RECVBUF_LOCK(so);
2946 bzero(&aso, sizeof(aso));
2947 aso.so_pcb = so->so_pcb;
2948 bcopy(&so->so_rcv.sb_startzero, &aso.so_rcv.sb_startzero,
2949 offsetof(struct sockbuf, sb_endzero) -
2950 offsetof(struct sockbuf, sb_startzero));
2951 bzero(&so->so_rcv.sb_startzero,
2952 offsetof(struct sockbuf, sb_endzero) -
2953 offsetof(struct sockbuf, sb_startzero));
2954 SOCK_RECVBUF_UNLOCK(so);
2955 SOCK_IO_RECV_UNLOCK(so);
2958 * Dispose of special rights and flush the copied socket. Don't call
2959 * any unsafe routines (that rely on locks being initialized) on aso.
2962 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2963 (*pr->pr_domain->dom_dispose)(&aso);
2964 sbrelease_internal(&aso.so_rcv, so);
2968 * Wrapper for Socket established helper hook.
2969 * Parameters: socket, context of the hook point, hook id.
2972 hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
2974 struct socket_hhook_data hhook_data = {
2981 CURVNET_SET(so->so_vnet);
2982 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
2985 /* Ugly but needed, since hhooks return void for now */
2986 return (hhook_data.status);
2990 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2991 * additional variant to handle the case where the option value needs to be
2992 * some kind of integer, but not a specific size. In addition to their use
2993 * here, these functions are also called by the protocol-level pr_ctloutput()
2997 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
3002 * If the user gives us more than we wanted, we ignore it, but if we
3003 * don't get the minimum length the caller wants, we return EINVAL.
3004 * On success, sopt->sopt_valsize is set to however much we actually
3007 if ((valsize = sopt->sopt_valsize) < minlen)
3010 sopt->sopt_valsize = valsize = len;
3012 if (sopt->sopt_td != NULL)
3013 return (copyin(sopt->sopt_val, buf, valsize));
3015 bcopy(sopt->sopt_val, buf, valsize);
3020 * Kernel version of setsockopt(2).
3022 * XXX: optlen is size_t, not socklen_t
3025 so_setsockopt(struct socket *so, int level, int optname, void *optval,
3028 struct sockopt sopt;
3030 sopt.sopt_level = level;
3031 sopt.sopt_name = optname;
3032 sopt.sopt_dir = SOPT_SET;
3033 sopt.sopt_val = optval;
3034 sopt.sopt_valsize = optlen;
3035 sopt.sopt_td = NULL;
3036 return (sosetopt(so, &sopt));
3040 sosetopt(struct socket *so, struct sockopt *sopt)
3051 CURVNET_SET(so->so_vnet);
3053 if (sopt->sopt_level != SOL_SOCKET) {
3054 if (so->so_proto->pr_ctloutput != NULL)
3055 error = (*so->so_proto->pr_ctloutput)(so, sopt);
3057 error = ENOPROTOOPT;
3059 switch (sopt->sopt_name) {
3060 case SO_ACCEPTFILTER:
3061 error = accept_filt_setopt(so, sopt);
3067 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
3070 if (l.l_linger < 0 ||
3071 l.l_linger > USHRT_MAX ||
3072 l.l_linger > (INT_MAX / hz)) {
3077 so->so_linger = l.l_linger;
3079 so->so_options |= SO_LINGER;
3081 so->so_options &= ~SO_LINGER;
3088 case SO_USELOOPBACK:
3092 case SO_REUSEPORT_LB:
3100 error = sooptcopyin(sopt, &optval, sizeof optval,
3106 so->so_options |= sopt->sopt_name;
3108 so->so_options &= ~sopt->sopt_name;
3113 error = sooptcopyin(sopt, &optval, sizeof optval,
3118 if (optval < 0 || optval >= rt_numfibs) {
3122 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
3123 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
3124 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
3125 so->so_fibnum = optval;
3130 case SO_USER_COOKIE:
3131 error = sooptcopyin(sopt, &val32, sizeof val32,
3135 so->so_user_cookie = val32;
3142 error = sooptcopyin(sopt, &optval, sizeof optval,
3148 * Values < 1 make no sense for any of these options,
3156 error = sbsetopt(so, sopt->sopt_name, optval);
3161 #ifdef COMPAT_FREEBSD32
3162 if (SV_CURPROC_FLAG(SV_ILP32)) {
3163 struct timeval32 tv32;
3165 error = sooptcopyin(sopt, &tv32, sizeof tv32,
3167 CP(tv32, tv, tv_sec);
3168 CP(tv32, tv, tv_usec);
3171 error = sooptcopyin(sopt, &tv, sizeof tv,
3175 if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
3176 tv.tv_usec >= 1000000) {
3180 if (tv.tv_sec > INT32_MAX)
3184 switch (sopt->sopt_name) {
3186 so->so_snd.sb_timeo = val;
3189 so->so_rcv.sb_timeo = val;
3196 error = sooptcopyin(sopt, &extmac, sizeof extmac,
3200 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
3208 error = sooptcopyin(sopt, &optval, sizeof optval,
3212 if (optval < 0 || optval > SO_TS_CLOCK_MAX) {
3216 so->so_ts_clock = optval;
3219 case SO_MAX_PACING_RATE:
3220 error = sooptcopyin(sopt, &val32, sizeof(val32),
3224 so->so_max_pacing_rate = val32;
3228 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
3229 error = hhook_run_socket(so, sopt,
3232 error = ENOPROTOOPT;
3235 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
3236 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
3244 * Helper routine for getsockopt.
3247 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
3255 * Documented get behavior is that we always return a value, possibly
3256 * truncated to fit in the user's buffer. Traditional behavior is
3257 * that we always tell the user precisely how much we copied, rather
3258 * than something useful like the total amount we had available for
3259 * her. Note that this interface is not idempotent; the entire
3260 * answer must be generated ahead of time.
3262 valsize = min(len, sopt->sopt_valsize);
3263 sopt->sopt_valsize = valsize;
3264 if (sopt->sopt_val != NULL) {
3265 if (sopt->sopt_td != NULL)
3266 error = copyout(buf, sopt->sopt_val, valsize);
3268 bcopy(buf, sopt->sopt_val, valsize);
3274 sogetopt(struct socket *so, struct sockopt *sopt)
3283 CURVNET_SET(so->so_vnet);
3285 if (sopt->sopt_level != SOL_SOCKET) {
3286 if (so->so_proto->pr_ctloutput != NULL)
3287 error = (*so->so_proto->pr_ctloutput)(so, sopt);
3289 error = ENOPROTOOPT;
3293 switch (sopt->sopt_name) {
3294 case SO_ACCEPTFILTER:
3295 error = accept_filt_getopt(so, sopt);
3300 l.l_onoff = so->so_options & SO_LINGER;
3301 l.l_linger = so->so_linger;
3303 error = sooptcopyout(sopt, &l, sizeof l);
3306 case SO_USELOOPBACK:
3312 case SO_REUSEPORT_LB:
3322 optval = so->so_options & sopt->sopt_name;
3324 error = sooptcopyout(sopt, &optval, sizeof optval);
3328 optval = so->so_proto->pr_domain->dom_family;
3332 optval = so->so_type;
3336 optval = so->so_proto->pr_protocol;
3342 optval = so->so_error;
3345 optval = so->so_rerror;
3352 optval = SOLISTENING(so) ? so->sol_sbsnd_hiwat :
3353 so->so_snd.sb_hiwat;
3357 optval = SOLISTENING(so) ? so->sol_sbrcv_hiwat :
3358 so->so_rcv.sb_hiwat;
3362 optval = SOLISTENING(so) ? so->sol_sbsnd_lowat :
3363 so->so_snd.sb_lowat;
3367 optval = SOLISTENING(so) ? so->sol_sbrcv_lowat :
3368 so->so_rcv.sb_lowat;
3373 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
3374 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
3375 #ifdef COMPAT_FREEBSD32
3376 if (SV_CURPROC_FLAG(SV_ILP32)) {
3377 struct timeval32 tv32;
3379 CP(tv, tv32, tv_sec);
3380 CP(tv, tv32, tv_usec);
3381 error = sooptcopyout(sopt, &tv32, sizeof tv32);
3384 error = sooptcopyout(sopt, &tv, sizeof tv);
3389 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
3393 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
3397 error = sooptcopyout(sopt, &extmac, sizeof extmac);
3405 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
3409 error = mac_getsockopt_peerlabel(
3410 sopt->sopt_td->td_ucred, so, &extmac);
3413 error = sooptcopyout(sopt, &extmac, sizeof extmac);
3419 case SO_LISTENQLIMIT:
3420 optval = SOLISTENING(so) ? so->sol_qlimit : 0;
3424 optval = SOLISTENING(so) ? so->sol_qlen : 0;
3427 case SO_LISTENINCQLEN:
3428 optval = SOLISTENING(so) ? so->sol_incqlen : 0;
3432 optval = so->so_ts_clock;
3435 case SO_MAX_PACING_RATE:
3436 optval = so->so_max_pacing_rate;
3440 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
3441 error = hhook_run_socket(so, sopt,
3444 error = ENOPROTOOPT;
3456 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
3458 struct mbuf *m, *m_prev;
3459 int sopt_size = sopt->sopt_valsize;
3461 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
3464 if (sopt_size > MLEN) {
3465 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
3466 if ((m->m_flags & M_EXT) == 0) {
3470 m->m_len = min(MCLBYTES, sopt_size);
3472 m->m_len = min(MLEN, sopt_size);
3474 sopt_size -= m->m_len;
3479 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
3484 if (sopt_size > MLEN) {
3485 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
3487 if ((m->m_flags & M_EXT) == 0) {
3492 m->m_len = min(MCLBYTES, sopt_size);
3494 m->m_len = min(MLEN, sopt_size);
3496 sopt_size -= m->m_len;
3504 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
3506 struct mbuf *m0 = m;
3508 if (sopt->sopt_val == NULL)
3510 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
3511 if (sopt->sopt_td != NULL) {
3514 error = copyin(sopt->sopt_val, mtod(m, char *),
3521 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
3522 sopt->sopt_valsize -= m->m_len;
3523 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3526 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
3527 panic("ip6_sooptmcopyin");
3532 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
3534 struct mbuf *m0 = m;
3537 if (sopt->sopt_val == NULL)
3539 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
3540 if (sopt->sopt_td != NULL) {
3543 error = copyout(mtod(m, char *), sopt->sopt_val,
3550 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
3551 sopt->sopt_valsize -= m->m_len;
3552 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3553 valsize += m->m_len;
3557 /* enough soopt buffer should be given from user-land */
3561 sopt->sopt_valsize = valsize;
3566 * sohasoutofband(): protocol notifies socket layer of the arrival of new
3567 * out-of-band data, which will then notify socket consumers.
3570 sohasoutofband(struct socket *so)
3573 if (so->so_sigio != NULL)
3574 pgsigio(&so->so_sigio, SIGURG, 0);
3575 selwakeuppri(&so->so_rdsel, PSOCK);
3579 sopoll(struct socket *so, int events, struct ucred *active_cred,
3584 * We do not need to set or assert curvnet as long as everyone uses
3587 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
3592 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3598 if (SOLISTENING(so)) {
3599 if (!(events & (POLLIN | POLLRDNORM)))
3601 else if (!TAILQ_EMPTY(&so->sol_comp))
3602 revents = events & (POLLIN | POLLRDNORM);
3603 else if ((events & POLLINIGNEOF) == 0 && so->so_error)
3604 revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP;
3606 selrecord(td, &so->so_rdsel);
3611 SOCKBUF_LOCK(&so->so_snd);
3612 SOCKBUF_LOCK(&so->so_rcv);
3613 if (events & (POLLIN | POLLRDNORM))
3614 if (soreadabledata(so))
3615 revents |= events & (POLLIN | POLLRDNORM);
3616 if (events & (POLLOUT | POLLWRNORM))
3617 if (sowriteable(so))
3618 revents |= events & (POLLOUT | POLLWRNORM);
3619 if (events & (POLLPRI | POLLRDBAND))
3620 if (so->so_oobmark ||
3621 (so->so_rcv.sb_state & SBS_RCVATMARK))
3622 revents |= events & (POLLPRI | POLLRDBAND);
3623 if ((events & POLLINIGNEOF) == 0) {
3624 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3625 revents |= events & (POLLIN | POLLRDNORM);
3626 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3630 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3631 revents |= events & POLLRDHUP;
3634 (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND | POLLRDHUP)) {
3635 selrecord(td, &so->so_rdsel);
3636 so->so_rcv.sb_flags |= SB_SEL;
3638 if (events & (POLLOUT | POLLWRNORM)) {
3639 selrecord(td, &so->so_wrsel);
3640 so->so_snd.sb_flags |= SB_SEL;
3643 SOCKBUF_UNLOCK(&so->so_rcv);
3644 SOCKBUF_UNLOCK(&so->so_snd);
3651 soo_kqfilter(struct file *fp, struct knote *kn)
3653 struct socket *so = kn->kn_fp->f_data;
3657 switch (kn->kn_filter) {
3659 kn->kn_fop = &soread_filtops;
3660 knl = &so->so_rdsel.si_note;
3664 kn->kn_fop = &sowrite_filtops;
3665 knl = &so->so_wrsel.si_note;
3669 kn->kn_fop = &soempty_filtops;
3670 knl = &so->so_wrsel.si_note;
3678 if (SOLISTENING(so)) {
3679 knlist_add(knl, kn, 1);
3682 knlist_add(knl, kn, 1);
3683 sb->sb_flags |= SB_KNOTE;
3691 * Some routines that return EOPNOTSUPP for entry points that are not
3692 * supported by a protocol. Fill in as needed.
3695 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3702 pru_aio_queue_notsupp(struct socket *so, struct kaiocb *job)
3709 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3716 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3723 pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3731 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3738 pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3746 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3753 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3754 struct ifnet *ifp, struct thread *td)
3761 pru_disconnect_notsupp(struct socket *so)
3768 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3775 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3782 pru_rcvd_notsupp(struct socket *so, int flags)
3789 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3796 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3797 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3800 if (control != NULL)
3802 if ((flags & PRUS_NOTREADY) == 0)
3804 return (EOPNOTSUPP);
3808 pru_ready_notsupp(struct socket *so, struct mbuf *m, int count)
3811 return (EOPNOTSUPP);
3815 * This isn't really a ``null'' operation, but it's the default one and
3816 * doesn't do anything destructive.
3819 pru_sense_null(struct socket *so, struct stat *sb)
3822 sb->st_blksize = so->so_snd.sb_hiwat;
3827 pru_shutdown_notsupp(struct socket *so)
3834 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3841 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3842 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3849 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3850 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3857 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3865 filt_sordetach(struct knote *kn)
3867 struct socket *so = kn->kn_fp->f_data;
3870 knlist_remove(&so->so_rdsel.si_note, kn, 1);
3871 if (!SOLISTENING(so) && knlist_empty(&so->so_rdsel.si_note))
3872 so->so_rcv.sb_flags &= ~SB_KNOTE;
3873 so_rdknl_unlock(so);
3878 filt_soread(struct knote *kn, long hint)
3882 so = kn->kn_fp->f_data;
3884 if (SOLISTENING(so)) {
3885 SOCK_LOCK_ASSERT(so);
3886 kn->kn_data = so->sol_qlen;
3888 kn->kn_flags |= EV_EOF;
3889 kn->kn_fflags = so->so_error;
3892 return (!TAILQ_EMPTY(&so->sol_comp));
3895 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3897 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
3898 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3899 kn->kn_flags |= EV_EOF;
3900 kn->kn_fflags = so->so_error;
3902 } else if (so->so_error || so->so_rerror)
3905 if (kn->kn_sfflags & NOTE_LOWAT) {
3906 if (kn->kn_data >= kn->kn_sdata)
3908 } else if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
3911 /* This hook returning non-zero indicates an event, not error */
3912 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
3916 filt_sowdetach(struct knote *kn)
3918 struct socket *so = kn->kn_fp->f_data;
3921 knlist_remove(&so->so_wrsel.si_note, kn, 1);
3922 if (!SOLISTENING(so) && knlist_empty(&so->so_wrsel.si_note))
3923 so->so_snd.sb_flags &= ~SB_KNOTE;
3924 so_wrknl_unlock(so);
3929 filt_sowrite(struct knote *kn, long hint)
3933 so = kn->kn_fp->f_data;
3935 if (SOLISTENING(so))
3938 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3939 kn->kn_data = sbspace(&so->so_snd);
3941 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
3943 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3944 kn->kn_flags |= EV_EOF;
3945 kn->kn_fflags = so->so_error;
3947 } else if (so->so_error) /* temporary udp error */
3949 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3950 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3952 else if (kn->kn_sfflags & NOTE_LOWAT)
3953 return (kn->kn_data >= kn->kn_sdata);
3955 return (kn->kn_data >= so->so_snd.sb_lowat);
3959 filt_soempty(struct knote *kn, long hint)
3963 so = kn->kn_fp->f_data;
3965 if (SOLISTENING(so))
3968 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3969 kn->kn_data = sbused(&so->so_snd);
3971 if (kn->kn_data == 0)
3978 socheckuid(struct socket *so, uid_t uid)
3983 if (so->so_cred->cr_uid != uid)
3989 * These functions are used by protocols to notify the socket layer (and its
3990 * consumers) of state changes in the sockets driven by protocol-side events.
3994 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3996 * Normal sequence from the active (originating) side is that
3997 * soisconnecting() is called during processing of connect() call, resulting
3998 * in an eventual call to soisconnected() if/when the connection is
3999 * established. When the connection is torn down soisdisconnecting() is
4000 * called during processing of disconnect() call, and soisdisconnected() is
4001 * called when the connection to the peer is totally severed. The semantics
4002 * of these routines are such that connectionless protocols can call
4003 * soisconnected() and soisdisconnected() only, bypassing the in-progress
4004 * calls when setting up a ``connection'' takes no time.
4006 * From the passive side, a socket is created with two queues of sockets:
4007 * so_incomp for connections in progress and so_comp for connections already
4008 * made and awaiting user acceptance. As a protocol is preparing incoming
4009 * connections, it creates a socket structure queued on so_incomp by calling
4010 * sonewconn(). When the connection is established, soisconnected() is
4011 * called, and transfers the socket structure to so_comp, making it available
4014 * If a socket is closed with sockets on either so_incomp or so_comp, these
4015 * sockets are dropped.
4017 * If higher-level protocols are implemented in the kernel, the wakeups done
4018 * here will sometimes cause software-interrupt process scheduling.
4021 soisconnecting(struct socket *so)
4025 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
4026 so->so_state |= SS_ISCONNECTING;
4031 soisconnected(struct socket *so)
4033 bool last __diagused;
4036 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
4037 so->so_state |= SS_ISCONNECTED;
4039 if (so->so_qstate == SQ_INCOMP) {
4040 struct socket *head = so->so_listen;
4043 KASSERT(head, ("%s: so %p on incomp of NULL", __func__, so));
4045 * Promoting a socket from incomplete queue to complete, we
4046 * need to go through reverse order of locking. We first do
4047 * trylock, and if that doesn't succeed, we go the hard way
4048 * leaving a reference and rechecking consistency after proper
4051 if (__predict_false(SOLISTEN_TRYLOCK(head) == 0)) {
4054 SOLISTEN_LOCK(head);
4056 if (__predict_false(head != so->so_listen)) {
4058 * The socket went off the listen queue,
4059 * should be lost race to close(2) of sol.
4060 * The socket is about to soabort().
4066 last = refcount_release(&head->so_count);
4067 KASSERT(!last, ("%s: released last reference for %p",
4071 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
4072 TAILQ_REMOVE(&head->sol_incomp, so, so_list);
4073 head->sol_incqlen--;
4074 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
4076 so->so_qstate = SQ_COMP;
4078 solisten_wakeup(head); /* unlocks */
4080 SOCKBUF_LOCK(&so->so_rcv);
4081 soupcall_set(so, SO_RCV,
4082 head->sol_accept_filter->accf_callback,
4083 head->sol_accept_filter_arg);
4084 so->so_options &= ~SO_ACCEPTFILTER;
4085 ret = head->sol_accept_filter->accf_callback(so,
4086 head->sol_accept_filter_arg, M_NOWAIT);
4087 if (ret == SU_ISCONNECTED) {
4088 soupcall_clear(so, SO_RCV);
4089 SOCKBUF_UNLOCK(&so->so_rcv);
4092 SOCKBUF_UNLOCK(&so->so_rcv);
4094 SOLISTEN_UNLOCK(head);
4099 wakeup(&so->so_timeo);
4105 soisdisconnecting(struct socket *so)
4109 so->so_state &= ~SS_ISCONNECTING;
4110 so->so_state |= SS_ISDISCONNECTING;
4112 if (!SOLISTENING(so)) {
4113 SOCKBUF_LOCK(&so->so_rcv);
4114 socantrcvmore_locked(so);
4115 SOCKBUF_LOCK(&so->so_snd);
4116 socantsendmore_locked(so);
4119 wakeup(&so->so_timeo);
4123 soisdisconnected(struct socket *so)
4129 * There is at least one reader of so_state that does not
4130 * acquire socket lock, namely soreceive_generic(). Ensure
4131 * that it never sees all flags that track connection status
4132 * cleared, by ordering the update with a barrier semantic of
4133 * our release thread fence.
4135 so->so_state |= SS_ISDISCONNECTED;
4136 atomic_thread_fence_rel();
4137 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
4139 if (!SOLISTENING(so)) {
4141 SOCKBUF_LOCK(&so->so_rcv);
4142 socantrcvmore_locked(so);
4143 SOCKBUF_LOCK(&so->so_snd);
4144 sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
4145 socantsendmore_locked(so);
4148 wakeup(&so->so_timeo);
4152 soiolock(struct socket *so, struct sx *sx, int flags)
4156 KASSERT((flags & SBL_VALID) == flags,
4157 ("soiolock: invalid flags %#x", flags));
4159 if ((flags & SBL_WAIT) != 0) {
4160 if ((flags & SBL_NOINTR) != 0) {
4163 error = sx_xlock_sig(sx);
4167 } else if (!sx_try_xlock(sx)) {
4168 return (EWOULDBLOCK);
4171 if (__predict_false(SOLISTENING(so))) {
4179 soiounlock(struct sx *sx)
4185 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
4188 sodupsockaddr(const struct sockaddr *sa, int mflags)
4190 struct sockaddr *sa2;
4192 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
4194 bcopy(sa, sa2, sa->sa_len);
4199 * Register per-socket destructor.
4202 sodtor_set(struct socket *so, so_dtor_t *func)
4205 SOCK_LOCK_ASSERT(so);
4210 * Register per-socket buffer upcalls.
4213 soupcall_set(struct socket *so, int which, so_upcall_t func, void *arg)
4217 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
4227 panic("soupcall_set: bad which");
4229 SOCKBUF_LOCK_ASSERT(sb);
4230 sb->sb_upcall = func;
4231 sb->sb_upcallarg = arg;
4232 sb->sb_flags |= SB_UPCALL;
4236 soupcall_clear(struct socket *so, int which)
4240 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
4250 panic("soupcall_clear: bad which");
4252 SOCKBUF_LOCK_ASSERT(sb);
4253 KASSERT(sb->sb_upcall != NULL,
4254 ("%s: so %p no upcall to clear", __func__, so));
4255 sb->sb_upcall = NULL;
4256 sb->sb_upcallarg = NULL;
4257 sb->sb_flags &= ~SB_UPCALL;
4261 solisten_upcall_set(struct socket *so, so_upcall_t func, void *arg)
4264 SOLISTEN_LOCK_ASSERT(so);
4265 so->sol_upcall = func;
4266 so->sol_upcallarg = arg;
4270 so_rdknl_lock(void *arg)
4272 struct socket *so = arg;
4274 if (SOLISTENING(so))
4277 SOCKBUF_LOCK(&so->so_rcv);
4281 so_rdknl_unlock(void *arg)
4283 struct socket *so = arg;
4285 if (SOLISTENING(so))
4288 SOCKBUF_UNLOCK(&so->so_rcv);
4292 so_rdknl_assert_lock(void *arg, int what)
4294 struct socket *so = arg;
4296 if (what == LA_LOCKED) {
4297 if (SOLISTENING(so))
4298 SOCK_LOCK_ASSERT(so);
4300 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
4302 if (SOLISTENING(so))
4303 SOCK_UNLOCK_ASSERT(so);
4305 SOCKBUF_UNLOCK_ASSERT(&so->so_rcv);
4310 so_wrknl_lock(void *arg)
4312 struct socket *so = arg;
4314 if (SOLISTENING(so))
4317 SOCKBUF_LOCK(&so->so_snd);
4321 so_wrknl_unlock(void *arg)
4323 struct socket *so = arg;
4325 if (SOLISTENING(so))
4328 SOCKBUF_UNLOCK(&so->so_snd);
4332 so_wrknl_assert_lock(void *arg, int what)
4334 struct socket *so = arg;
4336 if (what == LA_LOCKED) {
4337 if (SOLISTENING(so))
4338 SOCK_LOCK_ASSERT(so);
4340 SOCKBUF_LOCK_ASSERT(&so->so_snd);
4342 if (SOLISTENING(so))
4343 SOCK_UNLOCK_ASSERT(so);
4345 SOCKBUF_UNLOCK_ASSERT(&so->so_snd);
4350 * Create an external-format (``xsocket'') structure using the information in
4351 * the kernel-format socket structure pointed to by so. This is done to
4352 * reduce the spew of irrelevant information over this interface, to isolate
4353 * user code from changes in the kernel structure, and potentially to provide
4354 * information-hiding if we decide that some of this information should be
4355 * hidden from users.
4358 sotoxsocket(struct socket *so, struct xsocket *xso)
4361 bzero(xso, sizeof(*xso));
4362 xso->xso_len = sizeof *xso;
4363 xso->xso_so = (uintptr_t)so;
4364 xso->so_type = so->so_type;
4365 xso->so_options = so->so_options;
4366 xso->so_linger = so->so_linger;
4367 xso->so_state = so->so_state;
4368 xso->so_pcb = (uintptr_t)so->so_pcb;
4369 xso->xso_protocol = so->so_proto->pr_protocol;
4370 xso->xso_family = so->so_proto->pr_domain->dom_family;
4371 xso->so_timeo = so->so_timeo;
4372 xso->so_error = so->so_error;
4373 xso->so_uid = so->so_cred->cr_uid;
4374 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
4375 if (SOLISTENING(so)) {
4376 xso->so_qlen = so->sol_qlen;
4377 xso->so_incqlen = so->sol_incqlen;
4378 xso->so_qlimit = so->sol_qlimit;
4379 xso->so_oobmark = 0;
4381 xso->so_state |= so->so_qstate;
4382 xso->so_qlen = xso->so_incqlen = xso->so_qlimit = 0;
4383 xso->so_oobmark = so->so_oobmark;
4384 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
4385 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
4390 so_sockbuf_rcv(struct socket *so)
4393 return (&so->so_rcv);
4397 so_sockbuf_snd(struct socket *so)
4400 return (&so->so_snd);
4404 so_state_get(const struct socket *so)
4407 return (so->so_state);
4411 so_state_set(struct socket *so, int val)
4418 so_options_get(const struct socket *so)
4421 return (so->so_options);
4425 so_options_set(struct socket *so, int val)
4428 so->so_options = val;
4432 so_error_get(const struct socket *so)
4435 return (so->so_error);
4439 so_error_set(struct socket *so, int val)
4446 so_linger_get(const struct socket *so)
4449 return (so->so_linger);
4453 so_linger_set(struct socket *so, int val)
4456 KASSERT(val >= 0 && val <= USHRT_MAX && val <= (INT_MAX / hz),
4457 ("%s: val %d out of range", __func__, val));
4459 so->so_linger = val;
4463 so_protosw_get(const struct socket *so)
4466 return (so->so_proto);
4470 so_protosw_set(struct socket *so, struct protosw *val)
4477 so_sorwakeup(struct socket *so)
4484 so_sowwakeup(struct socket *so)
4491 so_sorwakeup_locked(struct socket *so)
4494 sorwakeup_locked(so);
4498 so_sowwakeup_locked(struct socket *so)
4501 sowwakeup_locked(so);
4505 so_lock(struct socket *so)
4512 so_unlock(struct socket *so)