2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2004 The FreeBSD Foundation
7 * Copyright (c) 2004-2008 Robert N. M. Watson
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Comments on the socket life cycle:
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
103 #include <sys/cdefs.h>
104 #include "opt_inet.h"
105 #include "opt_inet6.h"
106 #include "opt_kern_tls.h"
107 #include "opt_sctp.h"
109 #include <sys/param.h>
110 #include <sys/systm.h>
111 #include <sys/capsicum.h>
112 #include <sys/fcntl.h>
113 #include <sys/limits.h>
114 #include <sys/lock.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/domain.h>
120 #include <sys/file.h> /* for struct knote */
121 #include <sys/hhook.h>
122 #include <sys/kernel.h>
123 #include <sys/khelp.h>
124 #include <sys/ktls.h>
125 #include <sys/event.h>
126 #include <sys/eventhandler.h>
127 #include <sys/poll.h>
128 #include <sys/proc.h>
129 #include <sys/protosw.h>
130 #include <sys/sbuf.h>
131 #include <sys/socket.h>
132 #include <sys/socketvar.h>
133 #include <sys/resourcevar.h>
134 #include <net/route.h>
135 #include <sys/signalvar.h>
136 #include <sys/stat.h>
138 #include <sys/sysctl.h>
139 #include <sys/taskqueue.h>
142 #include <sys/unpcb.h>
143 #include <sys/jail.h>
144 #include <sys/syslog.h>
145 #include <netinet/in.h>
146 #include <netinet/in_pcb.h>
147 #include <netinet/tcp.h>
149 #include <net/vnet.h>
151 #include <security/mac/mac_framework.h>
155 #ifdef COMPAT_FREEBSD32
156 #include <sys/mount.h>
157 #include <sys/sysent.h>
158 #include <compat/freebsd32/freebsd32.h>
161 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
163 static void so_rdknl_lock(void *);
164 static void so_rdknl_unlock(void *);
165 static void so_rdknl_assert_lock(void *, int);
166 static void so_wrknl_lock(void *);
167 static void so_wrknl_unlock(void *);
168 static void so_wrknl_assert_lock(void *, int);
170 static void filt_sordetach(struct knote *kn);
171 static int filt_soread(struct knote *kn, long hint);
172 static void filt_sowdetach(struct knote *kn);
173 static int filt_sowrite(struct knote *kn, long hint);
174 static int filt_soempty(struct knote *kn, long hint);
175 static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
176 fo_kqfilter_t soo_kqfilter;
178 static struct filterops soread_filtops = {
180 .f_detach = filt_sordetach,
181 .f_event = filt_soread,
183 static struct filterops sowrite_filtops = {
185 .f_detach = filt_sowdetach,
186 .f_event = filt_sowrite,
188 static struct filterops soempty_filtops = {
190 .f_detach = filt_sowdetach,
191 .f_event = filt_soempty,
194 so_gen_t so_gencnt; /* generation count for sockets */
196 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
197 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
199 #define VNET_SO_ASSERT(so) \
200 VNET_ASSERT(curvnet != NULL, \
201 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
203 VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
204 #define V_socket_hhh VNET(socket_hhh)
207 * Limit on the number of connections in the listen queue waiting
209 * NB: The original sysctl somaxconn is still available but hidden
210 * to prevent confusion about the actual purpose of this number.
212 static u_int somaxconn = SOMAXCONN;
215 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
221 error = sysctl_handle_int(oidp, &val, 0, req);
222 if (error || !req->newptr )
226 * The purpose of the UINT_MAX / 3 limit, is so that the formula
228 * below, will not overflow.
231 if (val < 1 || val > UINT_MAX / 3)
237 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue,
238 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int),
239 sysctl_somaxconn, "I",
240 "Maximum listen socket pending connection accept queue size");
241 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
242 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, 0,
243 sizeof(int), sysctl_somaxconn, "I",
244 "Maximum listen socket pending connection accept queue size (compat)");
246 static int numopensockets;
247 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
248 &numopensockets, 0, "Number of open sockets");
251 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
254 static struct mtx so_global_mtx;
255 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
258 * General IPC sysctl name space, used by sockets and a variety of other IPC
261 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
265 * Initialize the socket subsystem and set up the socket
268 static uma_zone_t socket_zone;
272 socket_zone_change(void *tag)
275 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
279 socket_hhook_register(int subtype)
282 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
283 &V_socket_hhh[subtype],
284 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
285 printf("%s: WARNING: unable to register hook\n", __func__);
289 socket_hhook_deregister(int subtype)
292 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
293 printf("%s: WARNING: unable to deregister hook\n", __func__);
297 socket_init(void *tag)
300 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
301 NULL, NULL, UMA_ALIGN_PTR, 0);
302 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
303 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
304 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
305 EVENTHANDLER_PRI_FIRST);
307 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
310 socket_vnet_init(const void *unused __unused)
314 /* We expect a contiguous range */
315 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
316 socket_hhook_register(i);
318 VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
319 socket_vnet_init, NULL);
322 socket_vnet_uninit(const void *unused __unused)
326 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
327 socket_hhook_deregister(i);
329 VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
330 socket_vnet_uninit, NULL);
333 * Initialise maxsockets. This SYSINIT must be run after
337 init_maxsockets(void *ignored)
340 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
341 maxsockets = imax(maxsockets, maxfiles);
343 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
346 * Sysctl to get and set the maximum global sockets limit. Notify protocols
347 * of the change so that they can update their dependent limits as required.
350 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
352 int error, newmaxsockets;
354 newmaxsockets = maxsockets;
355 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
356 if (error == 0 && req->newptr && newmaxsockets != maxsockets) {
357 if (newmaxsockets > maxsockets &&
358 newmaxsockets <= maxfiles) {
359 maxsockets = newmaxsockets;
360 EVENTHANDLER_INVOKE(maxsockets_change);
366 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets,
367 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
368 &maxsockets, 0, sysctl_maxsockets, "IU",
369 "Maximum number of sockets available");
372 * Socket operation routines. These routines are called by the routines in
373 * sys_socket.c or from a system process, and implement the semantics of
374 * socket operations by switching out to the protocol specific routines.
378 * Get a socket structure from our zone, and initialize it. Note that it
379 * would probably be better to allocate socket and PCB at the same time, but
380 * I'm not convinced that all the protocols can be easily modified to do
383 * soalloc() returns a socket with a ref count of 0.
385 static struct socket *
386 soalloc(struct vnet *vnet)
390 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
394 if (mac_socket_init(so, M_NOWAIT) != 0) {
395 uma_zfree(socket_zone, so);
399 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
400 uma_zfree(socket_zone, so);
405 * The socket locking protocol allows to lock 2 sockets at a time,
406 * however, the first one must be a listening socket. WITNESS lacks
407 * a feature to change class of an existing lock, so we use DUPOK.
409 mtx_init(&so->so_lock, "socket", NULL, MTX_DEF | MTX_DUPOK);
410 mtx_init(&so->so_snd_mtx, "so_snd", NULL, MTX_DEF);
411 mtx_init(&so->so_rcv_mtx, "so_rcv", NULL, MTX_DEF);
412 so->so_rcv.sb_sel = &so->so_rdsel;
413 so->so_snd.sb_sel = &so->so_wrsel;
414 sx_init(&so->so_snd_sx, "so_snd_sx");
415 sx_init(&so->so_rcv_sx, "so_rcv_sx");
416 TAILQ_INIT(&so->so_snd.sb_aiojobq);
417 TAILQ_INIT(&so->so_rcv.sb_aiojobq);
418 TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so);
419 TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so);
421 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
422 __func__, __LINE__, so));
425 /* We shouldn't need the so_global_mtx */
426 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
427 /* Do we need more comprehensive error returns? */
428 uma_zfree(socket_zone, so);
431 mtx_lock(&so_global_mtx);
432 so->so_gencnt = ++so_gencnt;
435 vnet->vnet_sockcnt++;
437 mtx_unlock(&so_global_mtx);
443 * Free the storage associated with a socket at the socket layer, tear down
444 * locks, labels, etc. All protocol state is assumed already to have been
445 * torn down (and possibly never set up) by the caller.
448 sodealloc(struct socket *so)
451 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
452 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
454 mtx_lock(&so_global_mtx);
455 so->so_gencnt = ++so_gencnt;
456 --numopensockets; /* Could be below, but faster here. */
458 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
459 __func__, __LINE__, so));
460 so->so_vnet->vnet_sockcnt--;
462 mtx_unlock(&so_global_mtx);
464 mac_socket_destroy(so);
466 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
468 khelp_destroy_osd(&so->osd);
469 if (SOLISTENING(so)) {
470 if (so->sol_accept_filter != NULL)
471 accept_filt_setopt(so, NULL);
473 if (so->so_rcv.sb_hiwat)
474 (void)chgsbsize(so->so_cred->cr_uidinfo,
475 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
476 if (so->so_snd.sb_hiwat)
477 (void)chgsbsize(so->so_cred->cr_uidinfo,
478 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
479 sx_destroy(&so->so_snd_sx);
480 sx_destroy(&so->so_rcv_sx);
481 mtx_destroy(&so->so_snd_mtx);
482 mtx_destroy(&so->so_rcv_mtx);
485 mtx_destroy(&so->so_lock);
486 uma_zfree(socket_zone, so);
490 * socreate returns a socket with a ref count of 1 and a file descriptor
491 * reference. The socket should be closed with soclose().
494 socreate(int dom, struct socket **aso, int type, int proto,
495 struct ucred *cred, struct thread *td)
502 * XXX: divert(4) historically abused PF_INET. Keep this compatibility
503 * shim until all applications have been updated.
505 if (__predict_false(dom == PF_INET && type == SOCK_RAW &&
506 proto == IPPROTO_DIVERT)) {
508 printf("%s uses obsolete way to create divert(4) socket\n",
509 td->td_proc->p_comm);
512 prp = pffindproto(dom, type, proto);
514 /* No support for domain. */
515 if (pffinddomain(dom) == NULL)
516 return (EAFNOSUPPORT);
517 /* No support for socket type. */
518 if (proto == 0 && type != 0)
520 return (EPROTONOSUPPORT);
523 MPASS(prp->pr_attach);
525 if (IN_CAPABILITY_MODE(td) && (prp->pr_flags & PR_CAPATTACH) == 0)
528 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
529 return (EPROTONOSUPPORT);
531 so = soalloc(CRED_TO_VNET(cred));
536 so->so_cred = crhold(cred);
537 if ((prp->pr_domain->dom_family == PF_INET) ||
538 (prp->pr_domain->dom_family == PF_INET6) ||
539 (prp->pr_domain->dom_family == PF_ROUTE))
540 so->so_fibnum = td->td_proc->p_fibnum;
545 mac_socket_create(cred, so);
547 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
548 so_rdknl_assert_lock);
549 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
550 so_wrknl_assert_lock);
551 if ((prp->pr_flags & PR_SOCKBUF) == 0) {
552 so->so_snd.sb_mtx = &so->so_snd_mtx;
553 so->so_rcv.sb_mtx = &so->so_rcv_mtx;
556 * Auto-sizing of socket buffers is managed by the protocols and
557 * the appropriate flags must be set in the pru_attach function.
559 CURVNET_SET(so->so_vnet);
560 error = prp->pr_attach(so, proto, td);
572 static int regression_sonewconn_earlytest = 1;
573 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
574 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
577 static int sooverprio = LOG_DEBUG;
578 SYSCTL_INT(_kern_ipc, OID_AUTO, sooverprio, CTLFLAG_RW,
579 &sooverprio, 0, "Log priority for listen socket overflows: 0..7 or -1 to disable");
581 static struct timeval overinterval = { 60, 0 };
582 SYSCTL_TIMEVAL_SEC(_kern_ipc, OID_AUTO, sooverinterval, CTLFLAG_RW,
584 "Delay in seconds between warnings for listen socket overflows");
587 * When an attempt at a new connection is noted on a socket which supports
588 * accept(2), the protocol has two options:
589 * 1) Call legacy sonewconn() function, which would call protocol attach
590 * method, same as used for socket(2).
591 * 2) Call solisten_clone(), do attach that is specific to a cloned connection,
592 * and then call solisten_enqueue().
594 * Note: the ref count on the socket is 0 on return.
597 solisten_clone(struct socket *head)
603 const char localprefix[] = "local:";
604 char descrbuf[SUNPATHLEN + sizeof(localprefix)];
606 char addrbuf[INET6_ADDRSTRLEN];
608 char addrbuf[INET_ADDRSTRLEN];
613 over = (head->sol_qlen > 3 * head->sol_qlimit / 2);
615 if (regression_sonewconn_earlytest && over) {
619 head->sol_overcount++;
620 dolog = (sooverprio >= 0) &&
621 !!ratecheck(&head->sol_lastover, &overinterval);
624 * If we're going to log, copy the overflow count and queue
625 * length from the listen socket before dropping the lock.
626 * Also, reset the overflow count.
629 overcount = head->sol_overcount;
630 head->sol_overcount = 0;
631 qlen = head->sol_qlen;
633 SOLISTEN_UNLOCK(head);
637 * Try to print something descriptive about the
638 * socket for the error message.
640 sbuf_new(&descrsb, descrbuf, sizeof(descrbuf),
642 switch (head->so_proto->pr_domain->dom_family) {
643 #if defined(INET) || defined(INET6)
649 if (head->so_proto->pr_domain->dom_family ==
651 (sotoinpcb(head)->inp_inc.inc_flags &
654 &sotoinpcb(head)->inp_inc.inc6_laddr);
655 sbuf_printf(&descrsb, "[%s]", addrbuf);
661 sotoinpcb(head)->inp_inc.inc_laddr,
663 sbuf_cat(&descrsb, addrbuf);
666 sbuf_printf(&descrsb, ":%hu (proto %u)",
667 ntohs(sotoinpcb(head)->inp_inc.inc_lport),
668 head->so_proto->pr_protocol);
670 #endif /* INET || INET6 */
672 sbuf_cat(&descrsb, localprefix);
673 if (sotounpcb(head)->unp_addr != NULL)
675 sotounpcb(head)->unp_addr->sun_len -
676 offsetof(struct sockaddr_un,
682 sotounpcb(head)->unp_addr->sun_path,
685 sbuf_cat(&descrsb, "(unknown)");
690 * If we can't print something more specific, at least
691 * print the domain name.
693 if (sbuf_finish(&descrsb) != 0 ||
694 sbuf_len(&descrsb) <= 0) {
695 sbuf_clear(&descrsb);
697 head->so_proto->pr_domain->dom_name ?:
699 sbuf_finish(&descrsb);
701 KASSERT(sbuf_len(&descrsb) > 0,
702 ("%s: sbuf creation failed", __func__));
704 * Preserve the historic listen queue overflow log
705 * message, that starts with "sonewconn:". It has
706 * been known to sysadmins for years and also test
707 * sys/kern/sonewconn_overflow checks for it.
709 if (head->so_cred == 0) {
710 log(LOG_PRI(sooverprio),
711 "sonewconn: pcb %p (%s): "
712 "Listen queue overflow: %i already in "
713 "queue awaiting acceptance (%d "
714 "occurrences)\n", head->so_pcb,
718 log(LOG_PRI(sooverprio),
719 "sonewconn: pcb %p (%s): "
720 "Listen queue overflow: "
721 "%i already in queue awaiting acceptance "
722 "(%d occurrences), euid %d, rgid %d, jail %s\n",
723 head->so_pcb, sbuf_data(&descrsb), qlen,
724 overcount, head->so_cred->cr_uid,
725 head->so_cred->cr_rgid,
726 head->so_cred->cr_prison ?
727 head->so_cred->cr_prison->pr_name :
730 sbuf_delete(&descrsb);
737 SOLISTEN_UNLOCK(head);
738 VNET_ASSERT(head->so_vnet != NULL, ("%s: so %p vnet is NULL",
740 so = soalloc(head->so_vnet);
742 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
743 "limit reached or out of memory\n",
744 __func__, head->so_pcb);
747 so->so_listen = head;
748 so->so_type = head->so_type;
750 * POSIX is ambiguous on what options an accept(2)ed socket should
751 * inherit from the listener. Words "create a new socket" may be
752 * interpreted as not inheriting anything. Best programming practice
753 * for application developers is to not rely on such inheritance.
754 * FreeBSD had historically inherited all so_options excluding
755 * SO_ACCEPTCONN, which virtually means all SOL_SOCKET level options,
756 * including those completely irrelevant to a new born socket. For
757 * compatibility with older versions we will inherit a list of
758 * meaningful options.
760 so->so_options = head->so_options & (SO_KEEPALIVE | SO_DONTROUTE |
761 SO_LINGER | SO_OOBINLINE | SO_NOSIGPIPE);
762 so->so_linger = head->so_linger;
763 so->so_state = head->so_state;
764 so->so_fibnum = head->so_fibnum;
765 so->so_proto = head->so_proto;
766 so->so_cred = crhold(head->so_cred);
768 mac_socket_newconn(head, so);
770 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
771 so_rdknl_assert_lock);
772 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
773 so_wrknl_assert_lock);
774 VNET_SO_ASSERT(head);
775 if (soreserve(so, head->sol_sbsnd_hiwat, head->sol_sbrcv_hiwat)) {
777 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
778 __func__, head->so_pcb);
781 so->so_rcv.sb_lowat = head->sol_sbrcv_lowat;
782 so->so_snd.sb_lowat = head->sol_sbsnd_lowat;
783 so->so_rcv.sb_timeo = head->sol_sbrcv_timeo;
784 so->so_snd.sb_timeo = head->sol_sbsnd_timeo;
785 so->so_rcv.sb_flags = head->sol_sbrcv_flags & SB_AUTOSIZE;
786 so->so_snd.sb_flags = head->sol_sbsnd_flags & SB_AUTOSIZE;
787 if ((so->so_proto->pr_flags & PR_SOCKBUF) == 0) {
788 so->so_snd.sb_mtx = &so->so_snd_mtx;
789 so->so_rcv.sb_mtx = &so->so_rcv_mtx;
795 /* Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED. */
797 sonewconn(struct socket *head, int connstatus)
801 if ((so = solisten_clone(head)) == NULL)
804 if (so->so_proto->pr_attach(so, 0, NULL) != 0) {
806 log(LOG_DEBUG, "%s: pcb %p: pr_attach() failed\n",
807 __func__, head->so_pcb);
811 (void)solisten_enqueue(so, connstatus);
817 * Enqueue socket cloned by solisten_clone() to the listen queue of the
818 * listener it has been cloned from.
820 * Return 'true' if socket landed on complete queue, otherwise 'false'.
823 solisten_enqueue(struct socket *so, int connstatus)
825 struct socket *head = so->so_listen;
827 MPASS(refcount_load(&so->so_count) == 0);
828 refcount_init(&so->so_count, 1);
831 if (head->sol_accept_filter != NULL)
833 so->so_state |= connstatus;
834 soref(head); /* A socket on (in)complete queue refs head. */
836 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
837 so->so_qstate = SQ_COMP;
839 solisten_wakeup(head); /* unlocks */
843 * Keep removing sockets from the head until there's room for
844 * us to insert on the tail. In pre-locking revisions, this
845 * was a simple if(), but as we could be racing with other
846 * threads and soabort() requires dropping locks, we must
847 * loop waiting for the condition to be true.
849 while (head->sol_incqlen > head->sol_qlimit) {
852 sp = TAILQ_FIRST(&head->sol_incomp);
853 TAILQ_REMOVE(&head->sol_incomp, sp, so_list);
856 sp->so_qstate = SQ_NONE;
857 sp->so_listen = NULL;
859 sorele_locked(head); /* does SOLISTEN_UNLOCK, head stays */
863 TAILQ_INSERT_TAIL(&head->sol_incomp, so, so_list);
864 so->so_qstate = SQ_INCOMP;
866 SOLISTEN_UNLOCK(head);
871 #if defined(SCTP) || defined(SCTP_SUPPORT)
873 * Socket part of sctp_peeloff(). Detach a new socket from an
874 * association. The new socket is returned with a reference.
876 * XXXGL: reduce copy-paste with solisten_clone().
879 sopeeloff(struct socket *head)
883 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
884 __func__, __LINE__, head));
885 so = soalloc(head->so_vnet);
887 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
888 "limit reached or out of memory\n",
889 __func__, head->so_pcb);
892 so->so_type = head->so_type;
893 so->so_options = head->so_options;
894 so->so_linger = head->so_linger;
895 so->so_state = (head->so_state & SS_NBIO) | SS_ISCONNECTED;
896 so->so_fibnum = head->so_fibnum;
897 so->so_proto = head->so_proto;
898 so->so_cred = crhold(head->so_cred);
900 mac_socket_newconn(head, so);
902 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock,
903 so_rdknl_assert_lock);
904 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock,
905 so_wrknl_assert_lock);
906 VNET_SO_ASSERT(head);
907 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
909 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
910 __func__, head->so_pcb);
913 if ((*so->so_proto->pr_attach)(so, 0, NULL)) {
915 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
916 __func__, head->so_pcb);
919 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
920 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
921 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
922 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
923 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
924 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
933 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
937 CURVNET_SET(so->so_vnet);
938 error = so->so_proto->pr_bind(so, nam, td);
944 sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
948 CURVNET_SET(so->so_vnet);
949 error = so->so_proto->pr_bindat(fd, so, nam, td);
955 * solisten() transitions a socket from a non-listening state to a listening
956 * state, but can also be used to update the listen queue depth on an
957 * existing listen socket. The protocol will call back into the sockets
958 * layer using solisten_proto_check() and solisten_proto() to check and set
959 * socket-layer listen state. Call backs are used so that the protocol can
960 * acquire both protocol and socket layer locks in whatever order is required
963 * Protocol implementors are advised to hold the socket lock across the
964 * socket-layer test and set to avoid races at the socket layer.
967 solisten(struct socket *so, int backlog, struct thread *td)
971 CURVNET_SET(so->so_vnet);
972 error = so->so_proto->pr_listen(so, backlog, td);
978 * Prepare for a call to solisten_proto(). Acquire all socket buffer locks in
979 * order to interlock with socket I/O.
982 solisten_proto_check(struct socket *so)
984 SOCK_LOCK_ASSERT(so);
986 if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
987 SS_ISDISCONNECTING)) != 0)
991 * Sleeping is not permitted here, so simply fail if userspace is
992 * attempting to transmit or receive on the socket. This kind of
993 * transient failure is not ideal, but it should occur only if userspace
994 * is misusing the socket interfaces.
996 if (!sx_try_xlock(&so->so_snd_sx))
998 if (!sx_try_xlock(&so->so_rcv_sx)) {
999 sx_xunlock(&so->so_snd_sx);
1002 mtx_lock(&so->so_snd_mtx);
1003 mtx_lock(&so->so_rcv_mtx);
1005 /* Interlock with soo_aio_queue() and KTLS. */
1006 if (!SOLISTENING(so)) {
1010 ktls = so->so_snd.sb_tls_info != NULL ||
1011 so->so_rcv.sb_tls_info != NULL;
1016 (so->so_snd.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0 ||
1017 (so->so_rcv.sb_flags & (SB_AIO | SB_AIO_RUNNING)) != 0) {
1018 solisten_proto_abort(so);
1027 * Undo the setup done by solisten_proto_check().
1030 solisten_proto_abort(struct socket *so)
1032 mtx_unlock(&so->so_snd_mtx);
1033 mtx_unlock(&so->so_rcv_mtx);
1034 sx_xunlock(&so->so_snd_sx);
1035 sx_xunlock(&so->so_rcv_sx);
1039 solisten_proto(struct socket *so, int backlog)
1041 int sbrcv_lowat, sbsnd_lowat;
1042 u_int sbrcv_hiwat, sbsnd_hiwat;
1043 short sbrcv_flags, sbsnd_flags;
1044 sbintime_t sbrcv_timeo, sbsnd_timeo;
1046 SOCK_LOCK_ASSERT(so);
1047 KASSERT((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
1048 SS_ISDISCONNECTING)) == 0,
1049 ("%s: bad socket state %p", __func__, so));
1051 if (SOLISTENING(so))
1055 * Change this socket to listening state.
1057 sbrcv_lowat = so->so_rcv.sb_lowat;
1058 sbsnd_lowat = so->so_snd.sb_lowat;
1059 sbrcv_hiwat = so->so_rcv.sb_hiwat;
1060 sbsnd_hiwat = so->so_snd.sb_hiwat;
1061 sbrcv_flags = so->so_rcv.sb_flags;
1062 sbsnd_flags = so->so_snd.sb_flags;
1063 sbrcv_timeo = so->so_rcv.sb_timeo;
1064 sbsnd_timeo = so->so_snd.sb_timeo;
1066 sbdestroy(so, SO_SND);
1067 sbdestroy(so, SO_RCV);
1071 sizeof(struct socket) - offsetof(struct socket, so_rcv));
1074 so->sol_sbrcv_lowat = sbrcv_lowat;
1075 so->sol_sbsnd_lowat = sbsnd_lowat;
1076 so->sol_sbrcv_hiwat = sbrcv_hiwat;
1077 so->sol_sbsnd_hiwat = sbsnd_hiwat;
1078 so->sol_sbrcv_flags = sbrcv_flags;
1079 so->sol_sbsnd_flags = sbsnd_flags;
1080 so->sol_sbrcv_timeo = sbrcv_timeo;
1081 so->sol_sbsnd_timeo = sbsnd_timeo;
1083 so->sol_qlen = so->sol_incqlen = 0;
1084 TAILQ_INIT(&so->sol_incomp);
1085 TAILQ_INIT(&so->sol_comp);
1087 so->sol_accept_filter = NULL;
1088 so->sol_accept_filter_arg = NULL;
1089 so->sol_accept_filter_str = NULL;
1091 so->sol_upcall = NULL;
1092 so->sol_upcallarg = NULL;
1094 so->so_options |= SO_ACCEPTCONN;
1097 if (backlog < 0 || backlog > somaxconn)
1098 backlog = somaxconn;
1099 so->sol_qlimit = backlog;
1101 mtx_unlock(&so->so_snd_mtx);
1102 mtx_unlock(&so->so_rcv_mtx);
1103 sx_xunlock(&so->so_snd_sx);
1104 sx_xunlock(&so->so_rcv_sx);
1108 * Wakeup listeners/subsystems once we have a complete connection.
1109 * Enters with lock, returns unlocked.
1112 solisten_wakeup(struct socket *sol)
1115 if (sol->sol_upcall != NULL)
1116 (void )sol->sol_upcall(sol, sol->sol_upcallarg, M_NOWAIT);
1118 selwakeuppri(&sol->so_rdsel, PSOCK);
1119 KNOTE_LOCKED(&sol->so_rdsel.si_note, 0);
1121 SOLISTEN_UNLOCK(sol);
1122 wakeup_one(&sol->sol_comp);
1123 if ((sol->so_state & SS_ASYNC) && sol->so_sigio != NULL)
1124 pgsigio(&sol->so_sigio, SIGIO, 0);
1128 * Return single connection off a listening socket queue. Main consumer of
1129 * the function is kern_accept4(). Some modules, that do their own accept
1130 * management also use the function. The socket reference held by the
1131 * listen queue is handed to the caller.
1133 * Listening socket must be locked on entry and is returned unlocked on
1135 * The flags argument is set of accept4(2) flags and ACCEPT4_INHERIT.
1138 solisten_dequeue(struct socket *head, struct socket **ret, int flags)
1143 SOLISTEN_LOCK_ASSERT(head);
1145 while (!(head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp) &&
1146 head->so_error == 0) {
1147 error = msleep(&head->sol_comp, SOCK_MTX(head), PSOCK | PCATCH,
1150 SOLISTEN_UNLOCK(head);
1154 if (head->so_error) {
1155 error = head->so_error;
1157 } else if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp))
1158 error = EWOULDBLOCK;
1162 SOLISTEN_UNLOCK(head);
1165 so = TAILQ_FIRST(&head->sol_comp);
1167 KASSERT(so->so_qstate == SQ_COMP,
1168 ("%s: so %p not SQ_COMP", __func__, so));
1170 so->so_qstate = SQ_NONE;
1171 so->so_listen = NULL;
1172 TAILQ_REMOVE(&head->sol_comp, so, so_list);
1173 if (flags & ACCEPT4_INHERIT)
1174 so->so_state |= (head->so_state & SS_NBIO);
1176 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0;
1178 sorele_locked(head);
1185 * Free socket upon release of the very last reference.
1188 sofree(struct socket *so)
1190 struct protosw *pr = so->so_proto;
1192 SOCK_LOCK_ASSERT(so);
1193 KASSERT(refcount_load(&so->so_count) == 0,
1194 ("%s: so %p has references", __func__, so));
1195 KASSERT(SOLISTENING(so) || so->so_qstate == SQ_NONE,
1196 ("%s: so %p is on listen queue", __func__, so));
1200 if (so->so_dtor != NULL)
1204 if ((pr->pr_flags & PR_RIGHTS) && !SOLISTENING(so)) {
1205 MPASS(pr->pr_domain->dom_dispose != NULL);
1206 (*pr->pr_domain->dom_dispose)(so);
1208 if (pr->pr_detach != NULL)
1212 * From this point on, we assume that no other references to this
1213 * socket exist anywhere else in the stack. Therefore, no locks need
1214 * to be acquired or held.
1216 if (!(pr->pr_flags & PR_SOCKBUF) && !SOLISTENING(so)) {
1217 sbdestroy(so, SO_SND);
1218 sbdestroy(so, SO_RCV);
1220 seldrain(&so->so_rdsel);
1221 seldrain(&so->so_wrsel);
1222 knlist_destroy(&so->so_rdsel.si_note);
1223 knlist_destroy(&so->so_wrsel.si_note);
1228 * Release a reference on a socket while holding the socket lock.
1229 * Unlocks the socket lock before returning.
1232 sorele_locked(struct socket *so)
1234 SOCK_LOCK_ASSERT(so);
1235 if (refcount_release(&so->so_count))
1242 * Close a socket on last file table reference removal. Initiate disconnect
1243 * if connected. Free socket when disconnect complete.
1245 * This function will sorele() the socket. Note that soclose() may be called
1246 * prior to the ref count reaching zero. The actual socket structure will
1247 * not be freed until the ref count reaches zero.
1250 soclose(struct socket *so)
1252 struct accept_queue lqueue;
1254 bool listening, last __diagused;
1256 CURVNET_SET(so->so_vnet);
1257 funsetown(&so->so_sigio);
1258 if (so->so_state & SS_ISCONNECTED) {
1259 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
1260 error = sodisconnect(so);
1262 if (error == ENOTCONN)
1268 if ((so->so_options & SO_LINGER) != 0 && so->so_linger != 0) {
1269 if ((so->so_state & SS_ISDISCONNECTING) &&
1270 (so->so_state & SS_NBIO))
1272 while (so->so_state & SS_ISCONNECTED) {
1273 error = tsleep(&so->so_timeo,
1274 PSOCK | PCATCH, "soclos",
1275 so->so_linger * hz);
1283 if (so->so_proto->pr_close != NULL)
1284 so->so_proto->pr_close(so);
1287 if ((listening = SOLISTENING(so))) {
1290 TAILQ_INIT(&lqueue);
1291 TAILQ_SWAP(&lqueue, &so->sol_incomp, socket, so_list);
1292 TAILQ_CONCAT(&lqueue, &so->sol_comp, so_list);
1294 so->sol_qlen = so->sol_incqlen = 0;
1296 TAILQ_FOREACH(sp, &lqueue, so_list) {
1298 sp->so_qstate = SQ_NONE;
1299 sp->so_listen = NULL;
1301 last = refcount_release(&so->so_count);
1302 KASSERT(!last, ("%s: released last reference for %p",
1308 struct socket *sp, *tsp;
1310 TAILQ_FOREACH_SAFE(sp, &lqueue, so_list, tsp)
1318 * soabort() is used to abruptly tear down a connection, such as when a
1319 * resource limit is reached (listen queue depth exceeded), or if a listen
1320 * socket is closed while there are sockets waiting to be accepted.
1322 * This interface is tricky, because it is called on an unreferenced socket,
1323 * and must be called only by a thread that has actually removed the socket
1324 * from the listen queue it was on. Likely this thread holds the last
1325 * reference on the socket and soabort() will proceed with sofree(). But
1326 * it might be not the last, as the sockets on the listen queues are seen
1327 * from the protocol side.
1329 * This interface will call into the protocol code, so must not be called
1330 * with any socket locks held. Protocols do call it while holding their own
1331 * recursible protocol mutexes, but this is something that should be subject
1332 * to review in the future.
1334 * Usually socket should have a single reference left, but this is not a
1335 * requirement. In the past, when we have had named references for file
1336 * descriptor and protocol, we asserted that none of them are being held.
1339 soabort(struct socket *so)
1344 if (so->so_proto->pr_abort != NULL)
1345 so->so_proto->pr_abort(so);
1351 soaccept(struct socket *so, struct sockaddr *sa)
1354 u_char len = sa->sa_len;
1358 CURVNET_SET(so->so_vnet);
1359 error = so->so_proto->pr_accept(so, sa);
1360 KASSERT(sa->sa_len <= len,
1361 ("%s: protocol %p sockaddr overflow", __func__, so->so_proto));
1367 sopeeraddr(struct socket *so, struct sockaddr *sa)
1370 u_char len = sa->sa_len;
1374 CURVNET_SET(so->so_vnet);
1375 error = so->so_proto->pr_peeraddr(so, sa);
1376 KASSERT(sa->sa_len <= len,
1377 ("%s: protocol %p sockaddr overflow", __func__, so->so_proto));
1384 sosockaddr(struct socket *so, struct sockaddr *sa)
1387 u_char len = sa->sa_len;
1391 CURVNET_SET(so->so_vnet);
1392 error = so->so_proto->pr_sockaddr(so, sa);
1393 KASSERT(sa->sa_len <= len,
1394 ("%s: protocol %p sockaddr overflow", __func__, so->so_proto));
1401 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
1404 return (soconnectat(AT_FDCWD, so, nam, td));
1408 soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
1412 CURVNET_SET(so->so_vnet);
1415 * If protocol is connection-based, can only connect once.
1416 * Otherwise, if connected, try to disconnect first. This allows
1417 * user to disconnect by connecting to, e.g., a null address.
1419 * Note, this check is racy and may need to be re-evaluated at the
1422 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
1423 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
1424 (error = sodisconnect(so)))) {
1428 * Prevent accumulated error from previous connection from
1432 if (fd == AT_FDCWD) {
1433 error = so->so_proto->pr_connect(so, nam, td);
1435 error = so->so_proto->pr_connectat(fd, so, nam, td);
1444 soconnect2(struct socket *so1, struct socket *so2)
1448 CURVNET_SET(so1->so_vnet);
1449 error = so1->so_proto->pr_connect2(so1, so2);
1455 sodisconnect(struct socket *so)
1459 if ((so->so_state & SS_ISCONNECTED) == 0)
1461 if (so->so_state & SS_ISDISCONNECTING)
1464 error = so->so_proto->pr_disconnect(so);
1469 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1470 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1474 int clen = 0, error, dontroute;
1476 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1477 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1478 ("sosend_dgram: !PR_ATOMIC"));
1481 resid = uio->uio_resid;
1483 resid = top->m_pkthdr.len;
1485 * In theory resid should be unsigned. However, space must be
1486 * signed, as it might be less than 0 if we over-committed, and we
1487 * must use a signed comparison of space and resid. On the other
1488 * hand, a negative resid causes us to loop sending 0-length
1489 * segments to the protocol.
1497 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1499 td->td_ru.ru_msgsnd++;
1500 if (control != NULL)
1501 clen = control->m_len;
1503 SOCKBUF_LOCK(&so->so_snd);
1504 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1505 SOCKBUF_UNLOCK(&so->so_snd);
1510 error = so->so_error;
1512 SOCKBUF_UNLOCK(&so->so_snd);
1515 if ((so->so_state & SS_ISCONNECTED) == 0) {
1517 * `sendto' and `sendmsg' is allowed on a connection-based
1518 * socket if it supports implied connect. Return ENOTCONN if
1519 * not connected and no address is supplied.
1521 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1522 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1523 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1524 !(resid == 0 && clen != 0)) {
1525 SOCKBUF_UNLOCK(&so->so_snd);
1529 } else if (addr == NULL) {
1530 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1533 error = EDESTADDRREQ;
1534 SOCKBUF_UNLOCK(&so->so_snd);
1540 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1541 * problem and need fixing.
1543 space = sbspace(&so->so_snd);
1544 if (flags & MSG_OOB)
1547 SOCKBUF_UNLOCK(&so->so_snd);
1548 if (resid > space) {
1554 if (flags & MSG_EOR)
1555 top->m_flags |= M_EOR;
1558 * Copy the data from userland into a mbuf chain.
1559 * If no data is to be copied in, a single empty mbuf
1562 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1563 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1565 error = EFAULT; /* only possible error */
1568 space -= resid - uio->uio_resid;
1569 resid = uio->uio_resid;
1571 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1573 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1578 so->so_options |= SO_DONTROUTE;
1582 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1583 * of date. We could have received a reset packet in an interrupt or
1584 * maybe we slept while doing page faults in uiomove() etc. We could
1585 * probably recheck again inside the locking protection here, but
1586 * there are probably other places that this also happens. We must
1590 error = so->so_proto->pr_send(so, (flags & MSG_OOB) ? PRUS_OOB :
1592 * If the user set MSG_EOF, the protocol understands this flag and
1593 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1595 ((flags & MSG_EOF) &&
1596 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1599 /* If there is more to send set PRUS_MORETOCOME */
1600 (flags & MSG_MORETOCOME) ||
1601 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1602 top, addr, control, td);
1605 so->so_options &= ~SO_DONTROUTE;
1614 if (control != NULL)
1620 * Send on a socket. If send must go all at once and message is larger than
1621 * send buffering, then hard error. Lock against other senders. If must go
1622 * all at once and not enough room now, then inform user that this would
1623 * block and do nothing. Otherwise, if nonblocking, send as much as
1624 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1625 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1626 * in mbuf chain must be small enough to send all at once.
1628 * Returns nonzero on error, timeout or signal; callers must check for short
1629 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1633 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1634 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1638 int clen = 0, error, dontroute;
1639 int atomic = sosendallatonce(so) || top;
1642 struct ktls_session *tls;
1643 int tls_enq_cnt, tls_send_flag;
1647 tls_rtype = TLS_RLTYPE_APP;
1650 resid = uio->uio_resid;
1651 else if ((top->m_flags & M_PKTHDR) != 0)
1652 resid = top->m_pkthdr.len;
1654 resid = m_length(top, NULL);
1656 * In theory resid should be unsigned. However, space must be
1657 * signed, as it might be less than 0 if we over-committed, and we
1658 * must use a signed comparison of space and resid. On the other
1659 * hand, a negative resid causes us to loop sending 0-length
1660 * segments to the protocol.
1662 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1663 * type sockets since that's an error.
1665 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1671 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1672 (so->so_proto->pr_flags & PR_ATOMIC);
1674 td->td_ru.ru_msgsnd++;
1675 if (control != NULL)
1676 clen = control->m_len;
1678 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
1684 tls = ktls_hold(so->so_snd.sb_tls_info);
1686 if (tls->mode == TCP_TLS_MODE_SW)
1687 tls_send_flag = PRUS_NOTREADY;
1689 if (control != NULL) {
1690 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
1692 if (clen >= sizeof(*cm) &&
1693 cm->cmsg_type == TLS_SET_RECORD_TYPE) {
1694 tls_rtype = *((uint8_t *)CMSG_DATA(cm));
1702 if (resid == 0 && !ktls_permit_empty_frames(tls)) {
1711 SOCKBUF_LOCK(&so->so_snd);
1712 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1713 SOCKBUF_UNLOCK(&so->so_snd);
1718 error = so->so_error;
1720 SOCKBUF_UNLOCK(&so->so_snd);
1723 if ((so->so_state & SS_ISCONNECTED) == 0) {
1725 * `sendto' and `sendmsg' is allowed on a connection-
1726 * based socket if it supports implied connect.
1727 * Return ENOTCONN if not connected and no address is
1730 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1731 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1732 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1733 !(resid == 0 && clen != 0)) {
1734 SOCKBUF_UNLOCK(&so->so_snd);
1738 } else if (addr == NULL) {
1739 SOCKBUF_UNLOCK(&so->so_snd);
1740 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1743 error = EDESTADDRREQ;
1747 space = sbspace(&so->so_snd);
1748 if (flags & MSG_OOB)
1750 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1751 clen > so->so_snd.sb_hiwat) {
1752 SOCKBUF_UNLOCK(&so->so_snd);
1756 if (space < resid + clen &&
1757 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1758 if ((so->so_state & SS_NBIO) ||
1759 (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) {
1760 SOCKBUF_UNLOCK(&so->so_snd);
1761 error = EWOULDBLOCK;
1764 error = sbwait(so, SO_SND);
1765 SOCKBUF_UNLOCK(&so->so_snd);
1770 SOCKBUF_UNLOCK(&so->so_snd);
1775 if (flags & MSG_EOR)
1776 top->m_flags |= M_EOR;
1779 ktls_frame(top, tls, &tls_enq_cnt,
1781 tls_rtype = TLS_RLTYPE_APP;
1786 * Copy the data from userland into a mbuf
1787 * chain. If resid is 0, which can happen
1788 * only if we have control to send, then
1789 * a single empty mbuf is returned. This
1790 * is a workaround to prevent protocol send
1795 top = m_uiotombuf(uio, M_WAITOK, space,
1796 tls->params.max_frame_len,
1798 ((flags & MSG_EOR) ? M_EOR : 0));
1800 ktls_frame(top, tls,
1801 &tls_enq_cnt, tls_rtype);
1803 tls_rtype = TLS_RLTYPE_APP;
1806 top = m_uiotombuf(uio, M_WAITOK, space,
1807 (atomic ? max_hdr : 0),
1808 (atomic ? M_PKTHDR : 0) |
1809 ((flags & MSG_EOR) ? M_EOR : 0));
1811 error = EFAULT; /* only possible error */
1814 space -= resid - uio->uio_resid;
1815 resid = uio->uio_resid;
1819 so->so_options |= SO_DONTROUTE;
1823 * XXX all the SBS_CANTSENDMORE checks previously
1824 * done could be out of date. We could have received
1825 * a reset packet in an interrupt or maybe we slept
1826 * while doing page faults in uiomove() etc. We
1827 * could probably recheck again inside the locking
1828 * protection here, but there are probably other
1829 * places that this also happens. We must rethink
1834 pr_send_flag = (flags & MSG_OOB) ? PRUS_OOB :
1836 * If the user set MSG_EOF, the protocol understands
1837 * this flag and nothing left to send then use
1838 * PRU_SEND_EOF instead of PRU_SEND.
1840 ((flags & MSG_EOF) &&
1841 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1844 /* If there is more to send set PRUS_MORETOCOME. */
1845 (flags & MSG_MORETOCOME) ||
1846 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
1849 pr_send_flag |= tls_send_flag;
1852 error = so->so_proto->pr_send(so, pr_send_flag, top,
1857 so->so_options &= ~SO_DONTROUTE;
1862 if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) {
1868 ktls_enqueue(top, so, tls_enq_cnt);
1877 } while (resid && space > 0);
1881 SOCK_IO_SEND_UNLOCK(so);
1889 if (control != NULL)
1895 * Send to a socket from a kernel thread.
1897 * XXXGL: in almost all cases uio is NULL and the mbuf is supplied.
1898 * Exception is nfs/bootp_subr.c. It is arguable that the VNET context needs
1899 * to be set at all. This function should just boil down to a static inline
1900 * calling the protocol method.
1903 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1904 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1908 CURVNET_SET(so->so_vnet);
1909 error = so->so_proto->pr_sosend(so, addr, uio,
1910 top, control, flags, td);
1916 * send(2), write(2) or aio_write(2) on a socket.
1919 sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1920 struct mbuf *control, int flags, struct proc *userproc)
1927 len = uio->uio_resid;
1928 CURVNET_SET(so->so_vnet);
1929 error = so->so_proto->pr_sosend(so, addr, uio, NULL, control, flags,
1934 * Clear transient errors for stream protocols if they made
1935 * some progress. Make exclusion for aio(4) that would
1936 * schedule a new write in case of EWOULDBLOCK and clear
1937 * error itself. See soaio_process_job().
1939 if (uio->uio_resid != len &&
1940 (so->so_proto->pr_flags & PR_ATOMIC) == 0 &&
1942 (error == ERESTART || error == EINTR ||
1943 error == EWOULDBLOCK))
1945 /* Generation of SIGPIPE can be controlled per socket. */
1946 if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0 &&
1947 (flags & MSG_NOSIGNAL) == 0) {
1948 if (userproc != NULL) {
1950 PROC_LOCK(userproc);
1951 kern_psignal(userproc, SIGPIPE);
1952 PROC_UNLOCK(userproc);
1954 PROC_LOCK(td->td_proc);
1955 tdsignal(td, SIGPIPE);
1956 PROC_UNLOCK(td->td_proc);
1964 * The part of soreceive() that implements reading non-inline out-of-band
1965 * data from a socket. For more complete comments, see soreceive(), from
1966 * which this code originated.
1968 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1969 * unable to return an mbuf chain to the caller.
1972 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1974 struct protosw *pr = so->so_proto;
1978 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1981 m = m_get(M_WAITOK, MT_DATA);
1982 error = pr->pr_rcvoob(so, m, flags & MSG_PEEK);
1986 error = uiomove(mtod(m, void *),
1987 (int) min(uio->uio_resid, m->m_len), uio);
1989 } while (uio->uio_resid && error == 0 && m);
1997 * Following replacement or removal of the first mbuf on the first mbuf chain
1998 * of a socket buffer, push necessary state changes back into the socket
1999 * buffer so that other consumers see the values consistently. 'nextrecord'
2000 * is the callers locally stored value of the original value of
2001 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
2002 * NOTE: 'nextrecord' may be NULL.
2004 static __inline void
2005 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
2008 SOCKBUF_LOCK_ASSERT(sb);
2010 * First, update for the new value of nextrecord. If necessary, make
2011 * it the first record.
2013 if (sb->sb_mb != NULL)
2014 sb->sb_mb->m_nextpkt = nextrecord;
2016 sb->sb_mb = nextrecord;
2019 * Now update any dependent socket buffer fields to reflect the new
2020 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
2021 * addition of a second clause that takes care of the case where
2022 * sb_mb has been updated, but remains the last record.
2024 if (sb->sb_mb == NULL) {
2025 sb->sb_mbtail = NULL;
2026 sb->sb_lastrecord = NULL;
2027 } else if (sb->sb_mb->m_nextpkt == NULL)
2028 sb->sb_lastrecord = sb->sb_mb;
2032 * Implement receive operations on a socket. We depend on the way that
2033 * records are added to the sockbuf by sbappend. In particular, each record
2034 * (mbufs linked through m_next) must begin with an address if the protocol
2035 * so specifies, followed by an optional mbuf or mbufs containing ancillary
2036 * data, and then zero or more mbufs of data. In order to allow parallelism
2037 * between network receive and copying to user space, as well as avoid
2038 * sleeping with a mutex held, we release the socket buffer mutex during the
2039 * user space copy. Although the sockbuf is locked, new data may still be
2040 * appended, and thus we must maintain consistency of the sockbuf during that
2043 * The caller may receive the data as a single mbuf chain by supplying an
2044 * mbuf **mp0 for use in returning the chain. The uio is then used only for
2045 * the count in uio_resid.
2048 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
2049 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2051 struct mbuf *m, **mp;
2052 int flags, error, offset;
2054 struct protosw *pr = so->so_proto;
2055 struct mbuf *nextrecord;
2057 ssize_t orig_resid = uio->uio_resid;
2058 bool report_real_len = false;
2063 if (controlp != NULL)
2065 if (flagsp != NULL) {
2066 report_real_len = *flagsp & MSG_TRUNC;
2067 *flagsp &= ~MSG_TRUNC;
2068 flags = *flagsp &~ MSG_EOR;
2071 if (flags & MSG_OOB)
2072 return (soreceive_rcvoob(so, uio, flags));
2075 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
2076 && uio->uio_resid) {
2081 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
2086 SOCKBUF_LOCK(&so->so_rcv);
2087 m = so->so_rcv.sb_mb;
2089 * If we have less data than requested, block awaiting more (subject
2090 * to any timeout) if:
2091 * 1. the current count is less than the low water mark, or
2092 * 2. MSG_DONTWAIT is not set
2094 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
2095 sbavail(&so->so_rcv) < uio->uio_resid) &&
2096 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
2097 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
2098 KASSERT(m != NULL || !sbavail(&so->so_rcv),
2099 ("receive: m == %p sbavail == %u",
2100 m, sbavail(&so->so_rcv)));
2101 if (so->so_error || so->so_rerror) {
2105 error = so->so_error;
2107 error = so->so_rerror;
2108 if ((flags & MSG_PEEK) == 0) {
2114 SOCKBUF_UNLOCK(&so->so_rcv);
2117 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2118 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2122 else if (so->so_rcv.sb_tlsdcc == 0 &&
2123 so->so_rcv.sb_tlscc == 0) {
2127 SOCKBUF_UNLOCK(&so->so_rcv);
2131 for (; m != NULL; m = m->m_next)
2132 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
2133 m = so->so_rcv.sb_mb;
2136 if ((so->so_state & (SS_ISCONNECTING | SS_ISCONNECTED |
2137 SS_ISDISCONNECTING | SS_ISDISCONNECTED)) == 0 &&
2138 (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) {
2139 SOCKBUF_UNLOCK(&so->so_rcv);
2143 if (uio->uio_resid == 0 && !report_real_len) {
2144 SOCKBUF_UNLOCK(&so->so_rcv);
2147 if ((so->so_state & SS_NBIO) ||
2148 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2149 SOCKBUF_UNLOCK(&so->so_rcv);
2150 error = EWOULDBLOCK;
2153 SBLASTRECORDCHK(&so->so_rcv);
2154 SBLASTMBUFCHK(&so->so_rcv);
2155 error = sbwait(so, SO_RCV);
2156 SOCKBUF_UNLOCK(&so->so_rcv);
2163 * From this point onward, we maintain 'nextrecord' as a cache of the
2164 * pointer to the next record in the socket buffer. We must keep the
2165 * various socket buffer pointers and local stack versions of the
2166 * pointers in sync, pushing out modifications before dropping the
2167 * socket buffer mutex, and re-reading them when picking it up.
2169 * Otherwise, we will race with the network stack appending new data
2170 * or records onto the socket buffer by using inconsistent/stale
2171 * versions of the field, possibly resulting in socket buffer
2174 * By holding the high-level sblock(), we prevent simultaneous
2175 * readers from pulling off the front of the socket buffer.
2177 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2179 uio->uio_td->td_ru.ru_msgrcv++;
2180 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
2181 SBLASTRECORDCHK(&so->so_rcv);
2182 SBLASTMBUFCHK(&so->so_rcv);
2183 nextrecord = m->m_nextpkt;
2184 if (pr->pr_flags & PR_ADDR) {
2185 KASSERT(m->m_type == MT_SONAME,
2186 ("m->m_type == %d", m->m_type));
2189 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2191 if (flags & MSG_PEEK) {
2194 sbfree(&so->so_rcv, m);
2195 so->so_rcv.sb_mb = m_free(m);
2196 m = so->so_rcv.sb_mb;
2197 sockbuf_pushsync(&so->so_rcv, nextrecord);
2202 * Process one or more MT_CONTROL mbufs present before any data mbufs
2203 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
2204 * just copy the data; if !MSG_PEEK, we call into the protocol to
2205 * perform externalization (or freeing if controlp == NULL).
2207 if (m != NULL && m->m_type == MT_CONTROL) {
2208 struct mbuf *cm = NULL, *cmn;
2209 struct mbuf **cme = &cm;
2211 struct cmsghdr *cmsg;
2212 struct tls_get_record tgr;
2215 * For MSG_TLSAPPDATA, check for an alert record.
2216 * If found, return ENXIO without removing
2217 * it from the receive queue. This allows a subsequent
2218 * call without MSG_TLSAPPDATA to receive it.
2219 * Note that, for TLS, there should only be a single
2220 * control mbuf with the TLS_GET_RECORD message in it.
2222 if (flags & MSG_TLSAPPDATA) {
2223 cmsg = mtod(m, struct cmsghdr *);
2224 if (cmsg->cmsg_type == TLS_GET_RECORD &&
2225 cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) {
2226 memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr));
2227 if (__predict_false(tgr.tls_type ==
2228 TLS_RLTYPE_ALERT)) {
2229 SOCKBUF_UNLOCK(&so->so_rcv);
2238 if (flags & MSG_PEEK) {
2239 if (controlp != NULL) {
2240 *controlp = m_copym(m, 0, m->m_len,
2242 controlp = &(*controlp)->m_next;
2246 sbfree(&so->so_rcv, m);
2247 so->so_rcv.sb_mb = m->m_next;
2250 cme = &(*cme)->m_next;
2251 m = so->so_rcv.sb_mb;
2253 } while (m != NULL && m->m_type == MT_CONTROL);
2254 if ((flags & MSG_PEEK) == 0)
2255 sockbuf_pushsync(&so->so_rcv, nextrecord);
2256 while (cm != NULL) {
2259 if (pr->pr_domain->dom_externalize != NULL) {
2260 SOCKBUF_UNLOCK(&so->so_rcv);
2262 error = (*pr->pr_domain->dom_externalize)
2263 (cm, controlp, flags);
2264 SOCKBUF_LOCK(&so->so_rcv);
2265 } else if (controlp != NULL)
2269 if (controlp != NULL) {
2270 while (*controlp != NULL)
2271 controlp = &(*controlp)->m_next;
2276 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
2278 nextrecord = so->so_rcv.sb_mb;
2282 if ((flags & MSG_PEEK) == 0) {
2283 KASSERT(m->m_nextpkt == nextrecord,
2284 ("soreceive: post-control, nextrecord !sync"));
2285 if (nextrecord == NULL) {
2286 KASSERT(so->so_rcv.sb_mb == m,
2287 ("soreceive: post-control, sb_mb!=m"));
2288 KASSERT(so->so_rcv.sb_lastrecord == m,
2289 ("soreceive: post-control, lastrecord!=m"));
2293 if (type == MT_OOBDATA)
2296 if ((flags & MSG_PEEK) == 0) {
2297 KASSERT(so->so_rcv.sb_mb == nextrecord,
2298 ("soreceive: sb_mb != nextrecord"));
2299 if (so->so_rcv.sb_mb == NULL) {
2300 KASSERT(so->so_rcv.sb_lastrecord == NULL,
2301 ("soreceive: sb_lastercord != NULL"));
2305 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2306 SBLASTRECORDCHK(&so->so_rcv);
2307 SBLASTMBUFCHK(&so->so_rcv);
2310 * Now continue to read any data mbufs off of the head of the socket
2311 * buffer until the read request is satisfied. Note that 'type' is
2312 * used to store the type of any mbuf reads that have happened so far
2313 * such that soreceive() can stop reading if the type changes, which
2314 * causes soreceive() to return only one of regular data and inline
2315 * out-of-band data in a single socket receive operation.
2319 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
2322 * If the type of mbuf has changed since the last mbuf
2323 * examined ('type'), end the receive operation.
2325 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2326 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
2327 if (type != m->m_type)
2329 } else if (type == MT_OOBDATA)
2332 KASSERT(m->m_type == MT_DATA,
2333 ("m->m_type == %d", m->m_type));
2334 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
2335 len = uio->uio_resid;
2336 if (so->so_oobmark && len > so->so_oobmark - offset)
2337 len = so->so_oobmark - offset;
2338 if (len > m->m_len - moff)
2339 len = m->m_len - moff;
2341 * If mp is set, just pass back the mbufs. Otherwise copy
2342 * them out via the uio, then free. Sockbuf must be
2343 * consistent here (points to current mbuf, it points to next
2344 * record) when we drop priority; we must note any additions
2345 * to the sockbuf when we block interrupts again.
2348 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2349 SBLASTRECORDCHK(&so->so_rcv);
2350 SBLASTMBUFCHK(&so->so_rcv);
2351 SOCKBUF_UNLOCK(&so->so_rcv);
2352 if ((m->m_flags & M_EXTPG) != 0)
2353 error = m_unmapped_uiomove(m, moff, uio,
2356 error = uiomove(mtod(m, char *) + moff,
2358 SOCKBUF_LOCK(&so->so_rcv);
2361 * The MT_SONAME mbuf has already been removed
2362 * from the record, so it is necessary to
2363 * remove the data mbufs, if any, to preserve
2364 * the invariant in the case of PR_ADDR that
2365 * requires MT_SONAME mbufs at the head of
2368 if (pr->pr_flags & PR_ATOMIC &&
2369 ((flags & MSG_PEEK) == 0))
2370 (void)sbdroprecord_locked(&so->so_rcv);
2371 SOCKBUF_UNLOCK(&so->so_rcv);
2375 uio->uio_resid -= len;
2376 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2377 if (len == m->m_len - moff) {
2378 if (m->m_flags & M_EOR)
2380 if (flags & MSG_PEEK) {
2384 nextrecord = m->m_nextpkt;
2385 sbfree(&so->so_rcv, m);
2387 m->m_nextpkt = NULL;
2390 so->so_rcv.sb_mb = m = m->m_next;
2393 so->so_rcv.sb_mb = m_free(m);
2394 m = so->so_rcv.sb_mb;
2396 sockbuf_pushsync(&so->so_rcv, nextrecord);
2397 SBLASTRECORDCHK(&so->so_rcv);
2398 SBLASTMBUFCHK(&so->so_rcv);
2401 if (flags & MSG_PEEK)
2405 if (flags & MSG_DONTWAIT) {
2406 *mp = m_copym(m, 0, len,
2410 * m_copym() couldn't
2412 * Adjust uio_resid back
2414 * down by len bytes,
2415 * which we didn't end
2416 * up "copying" over).
2418 uio->uio_resid += len;
2422 SOCKBUF_UNLOCK(&so->so_rcv);
2423 *mp = m_copym(m, 0, len,
2425 SOCKBUF_LOCK(&so->so_rcv);
2428 sbcut_locked(&so->so_rcv, len);
2431 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2432 if (so->so_oobmark) {
2433 if ((flags & MSG_PEEK) == 0) {
2434 so->so_oobmark -= len;
2435 if (so->so_oobmark == 0) {
2436 so->so_rcv.sb_state |= SBS_RCVATMARK;
2441 if (offset == so->so_oobmark)
2445 if (flags & MSG_EOR)
2448 * If the MSG_WAITALL flag is set (for non-atomic socket), we
2449 * must not quit until "uio->uio_resid == 0" or an error
2450 * termination. If a signal/timeout occurs, return with a
2451 * short count but without error. Keep sockbuf locked
2452 * against other readers.
2454 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
2455 !sosendallatonce(so) && nextrecord == NULL) {
2456 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2457 if (so->so_error || so->so_rerror ||
2458 so->so_rcv.sb_state & SBS_CANTRCVMORE)
2461 * Notify the protocol that some data has been
2462 * drained before blocking.
2464 if (pr->pr_flags & PR_WANTRCVD) {
2465 SOCKBUF_UNLOCK(&so->so_rcv);
2467 pr->pr_rcvd(so, flags);
2468 SOCKBUF_LOCK(&so->so_rcv);
2469 if (__predict_false(so->so_rcv.sb_mb == NULL &&
2470 (so->so_error || so->so_rerror ||
2471 so->so_rcv.sb_state & SBS_CANTRCVMORE)))
2474 SBLASTRECORDCHK(&so->so_rcv);
2475 SBLASTMBUFCHK(&so->so_rcv);
2477 * We could receive some data while was notifying
2478 * the protocol. Skip blocking in this case.
2480 if (so->so_rcv.sb_mb == NULL) {
2481 error = sbwait(so, SO_RCV);
2483 SOCKBUF_UNLOCK(&so->so_rcv);
2487 m = so->so_rcv.sb_mb;
2489 nextrecord = m->m_nextpkt;
2493 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2494 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
2495 if (report_real_len)
2496 uio->uio_resid -= m_length(m, NULL) - moff;
2498 if ((flags & MSG_PEEK) == 0)
2499 (void) sbdroprecord_locked(&so->so_rcv);
2501 if ((flags & MSG_PEEK) == 0) {
2504 * First part is an inline SB_EMPTY_FIXUP(). Second
2505 * part makes sure sb_lastrecord is up-to-date if
2506 * there is still data in the socket buffer.
2508 so->so_rcv.sb_mb = nextrecord;
2509 if (so->so_rcv.sb_mb == NULL) {
2510 so->so_rcv.sb_mbtail = NULL;
2511 so->so_rcv.sb_lastrecord = NULL;
2512 } else if (nextrecord->m_nextpkt == NULL)
2513 so->so_rcv.sb_lastrecord = nextrecord;
2515 SBLASTRECORDCHK(&so->so_rcv);
2516 SBLASTMBUFCHK(&so->so_rcv);
2518 * If soreceive() is being done from the socket callback,
2519 * then don't need to generate ACK to peer to update window,
2520 * since ACK will be generated on return to TCP.
2522 if (!(flags & MSG_SOCALLBCK) &&
2523 (pr->pr_flags & PR_WANTRCVD)) {
2524 SOCKBUF_UNLOCK(&so->so_rcv);
2526 pr->pr_rcvd(so, flags);
2527 SOCKBUF_LOCK(&so->so_rcv);
2530 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2531 if (orig_resid == uio->uio_resid && orig_resid &&
2532 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
2533 SOCKBUF_UNLOCK(&so->so_rcv);
2536 SOCKBUF_UNLOCK(&so->so_rcv);
2541 SOCK_IO_RECV_UNLOCK(so);
2546 * Optimized version of soreceive() for stream (TCP) sockets.
2549 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
2550 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2552 int len = 0, error = 0, flags, oresid;
2554 struct mbuf *m, *n = NULL;
2556 /* We only do stream sockets. */
2557 if (so->so_type != SOCK_STREAM)
2562 flags = *flagsp &~ MSG_EOR;
2565 if (controlp != NULL)
2567 if (flags & MSG_OOB)
2568 return (soreceive_rcvoob(so, uio, flags));
2576 * KTLS store TLS records as records with a control message to
2577 * describe the framing.
2579 * We check once here before acquiring locks to optimize the
2582 if (sb->sb_tls_info != NULL)
2583 return (soreceive_generic(so, psa, uio, mp0, controlp,
2587 /* Prevent other readers from entering the socket. */
2588 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
2594 if (sb->sb_tls_info != NULL) {
2596 SOCK_IO_RECV_UNLOCK(so);
2597 return (soreceive_generic(so, psa, uio, mp0, controlp,
2602 /* Easy one, no space to copyout anything. */
2603 if (uio->uio_resid == 0) {
2607 oresid = uio->uio_resid;
2609 /* We will never ever get anything unless we are or were connected. */
2610 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
2616 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2618 /* Abort if socket has reported problems. */
2620 if (sbavail(sb) > 0)
2622 if (oresid > uio->uio_resid)
2624 error = so->so_error;
2625 if (!(flags & MSG_PEEK))
2630 /* Door is closed. Deliver what is left, if any. */
2631 if (sb->sb_state & SBS_CANTRCVMORE) {
2632 if (sbavail(sb) > 0)
2638 /* Socket buffer is empty and we shall not block. */
2639 if (sbavail(sb) == 0 &&
2640 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
2645 /* Socket buffer got some data that we shall deliver now. */
2646 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
2647 ((so->so_state & SS_NBIO) ||
2648 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2649 sbavail(sb) >= sb->sb_lowat ||
2650 sbavail(sb) >= uio->uio_resid ||
2651 sbavail(sb) >= sb->sb_hiwat) ) {
2655 /* On MSG_WAITALL we must wait until all data or error arrives. */
2656 if ((flags & MSG_WAITALL) &&
2657 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
2661 * Wait and block until (more) data comes in.
2662 * NB: Drops the sockbuf lock during wait.
2664 error = sbwait(so, SO_RCV);
2670 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2671 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
2672 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2676 uio->uio_td->td_ru.ru_msgrcv++;
2678 /* Fill uio until full or current end of socket buffer is reached. */
2679 len = min(uio->uio_resid, sbavail(sb));
2681 /* Dequeue as many mbufs as possible. */
2682 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2686 m_cat(*mp0, sb->sb_mb);
2688 m != NULL && m->m_len <= len;
2690 KASSERT(!(m->m_flags & M_NOTAVAIL),
2691 ("%s: m %p not available", __func__, m));
2693 uio->uio_resid -= m->m_len;
2699 sb->sb_lastrecord = sb->sb_mb;
2700 if (sb->sb_mb == NULL)
2703 /* Copy the remainder. */
2705 KASSERT(sb->sb_mb != NULL,
2706 ("%s: len > 0 && sb->sb_mb empty", __func__));
2708 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2710 len = 0; /* Don't flush data from sockbuf. */
2712 uio->uio_resid -= len;
2723 /* NB: Must unlock socket buffer as uiomove may sleep. */
2725 error = m_mbuftouio(uio, sb->sb_mb, len);
2730 SBLASTRECORDCHK(sb);
2734 * Remove the delivered data from the socket buffer unless we
2735 * were only peeking.
2737 if (!(flags & MSG_PEEK)) {
2739 sbdrop_locked(sb, len);
2741 /* Notify protocol that we drained some data. */
2742 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2743 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2744 !(flags & MSG_SOCALLBCK))) {
2747 so->so_proto->pr_rcvd(so, flags);
2753 * For MSG_WAITALL we may have to loop again and wait for
2754 * more data to come in.
2756 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2759 SBLASTRECORDCHK(sb);
2762 SOCK_IO_RECV_UNLOCK(so);
2767 * Optimized version of soreceive() for simple datagram cases from userspace.
2768 * Unlike in the stream case, we're able to drop a datagram if copyout()
2769 * fails, and because we handle datagrams atomically, we don't need to use a
2770 * sleep lock to prevent I/O interlacing.
2773 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2774 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2776 struct mbuf *m, *m2;
2779 struct protosw *pr = so->so_proto;
2780 struct mbuf *nextrecord;
2784 if (controlp != NULL)
2787 flags = *flagsp &~ MSG_EOR;
2792 * For any complicated cases, fall back to the full
2793 * soreceive_generic().
2795 if (mp0 != NULL || (flags & (MSG_PEEK | MSG_OOB | MSG_TRUNC)))
2796 return (soreceive_generic(so, psa, uio, mp0, controlp,
2800 * Enforce restrictions on use.
2802 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2803 ("soreceive_dgram: wantrcvd"));
2804 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2805 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2806 ("soreceive_dgram: SBS_RCVATMARK"));
2807 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2808 ("soreceive_dgram: P_CONNREQUIRED"));
2811 * Loop blocking while waiting for a datagram.
2813 SOCKBUF_LOCK(&so->so_rcv);
2814 while ((m = so->so_rcv.sb_mb) == NULL) {
2815 KASSERT(sbavail(&so->so_rcv) == 0,
2816 ("soreceive_dgram: sb_mb NULL but sbavail %u",
2817 sbavail(&so->so_rcv)));
2819 error = so->so_error;
2821 SOCKBUF_UNLOCK(&so->so_rcv);
2824 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2825 uio->uio_resid == 0) {
2826 SOCKBUF_UNLOCK(&so->so_rcv);
2829 if ((so->so_state & SS_NBIO) ||
2830 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2831 SOCKBUF_UNLOCK(&so->so_rcv);
2832 return (EWOULDBLOCK);
2834 SBLASTRECORDCHK(&so->so_rcv);
2835 SBLASTMBUFCHK(&so->so_rcv);
2836 error = sbwait(so, SO_RCV);
2838 SOCKBUF_UNLOCK(&so->so_rcv);
2842 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2845 uio->uio_td->td_ru.ru_msgrcv++;
2846 SBLASTRECORDCHK(&so->so_rcv);
2847 SBLASTMBUFCHK(&so->so_rcv);
2848 nextrecord = m->m_nextpkt;
2849 if (nextrecord == NULL) {
2850 KASSERT(so->so_rcv.sb_lastrecord == m,
2851 ("soreceive_dgram: lastrecord != m"));
2854 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2855 ("soreceive_dgram: m_nextpkt != nextrecord"));
2858 * Pull 'm' and its chain off the front of the packet queue.
2860 so->so_rcv.sb_mb = NULL;
2861 sockbuf_pushsync(&so->so_rcv, nextrecord);
2864 * Walk 'm's chain and free that many bytes from the socket buffer.
2866 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2867 sbfree(&so->so_rcv, m2);
2870 * Do a few last checks before we let go of the lock.
2872 SBLASTRECORDCHK(&so->so_rcv);
2873 SBLASTMBUFCHK(&so->so_rcv);
2874 SOCKBUF_UNLOCK(&so->so_rcv);
2876 if (pr->pr_flags & PR_ADDR) {
2877 KASSERT(m->m_type == MT_SONAME,
2878 ("m->m_type == %d", m->m_type));
2880 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2885 /* XXXRW: Can this happen? */
2890 * Packet to copyout() is now in 'm' and it is disconnected from the
2893 * Process one or more MT_CONTROL mbufs present before any data mbufs
2894 * in the first mbuf chain on the socket buffer. We call into the
2895 * protocol to perform externalization (or freeing if controlp ==
2896 * NULL). In some cases there can be only MT_CONTROL mbufs without
2899 if (m->m_type == MT_CONTROL) {
2900 struct mbuf *cm = NULL, *cmn;
2901 struct mbuf **cme = &cm;
2907 cme = &(*cme)->m_next;
2909 } while (m != NULL && m->m_type == MT_CONTROL);
2910 while (cm != NULL) {
2913 if (pr->pr_domain->dom_externalize != NULL) {
2914 error = (*pr->pr_domain->dom_externalize)
2915 (cm, controlp, flags);
2916 } else if (controlp != NULL)
2920 if (controlp != NULL) {
2921 while (*controlp != NULL)
2922 controlp = &(*controlp)->m_next;
2927 KASSERT(m == NULL || m->m_type == MT_DATA,
2928 ("soreceive_dgram: !data"));
2929 while (m != NULL && uio->uio_resid > 0) {
2930 len = uio->uio_resid;
2933 error = uiomove(mtod(m, char *), (int)len, uio);
2938 if (len == m->m_len)
2955 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2956 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2960 CURVNET_SET(so->so_vnet);
2961 error = so->so_proto->pr_soreceive(so, psa, uio, mp0, controlp, flagsp);
2967 soshutdown(struct socket *so, int how)
2970 int error, soerror_enotconn;
2972 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2975 soerror_enotconn = 0;
2978 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
2980 * POSIX mandates us to return ENOTCONN when shutdown(2) is
2981 * invoked on a datagram sockets, however historically we would
2982 * actually tear socket down. This is known to be leveraged by
2983 * some applications to unblock process waiting in recvXXX(2)
2984 * by other process that it shares that socket with. Try to meet
2985 * both backward-compatibility and POSIX requirements by forcing
2986 * ENOTCONN but still asking protocol to perform pru_shutdown().
2988 if (so->so_type != SOCK_DGRAM && !SOLISTENING(so)) {
2992 soerror_enotconn = 1;
2995 if (SOLISTENING(so)) {
2996 if (how != SHUT_WR) {
2997 so->so_error = ECONNABORTED;
2998 solisten_wakeup(so); /* unlocks so */
3006 CURVNET_SET(so->so_vnet);
3008 if (pr->pr_flush != NULL)
3009 pr->pr_flush(so, how);
3012 if (how != SHUT_RD) {
3013 error = pr->pr_shutdown(so);
3014 wakeup(&so->so_timeo);
3016 return ((error == 0 && soerror_enotconn) ? ENOTCONN : error);
3018 wakeup(&so->so_timeo);
3022 return (soerror_enotconn ? ENOTCONN : 0);
3026 sorflush(struct socket *so)
3034 * Dislodge threads currently blocked in receive and wait to acquire
3035 * a lock against other simultaneous readers before clearing the
3036 * socket buffer. Don't let our acquire be interrupted by a signal
3037 * despite any existing socket disposition on interruptable waiting.
3041 error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR);
3043 KASSERT(SOLISTENING(so),
3044 ("%s: soiolock(%p) failed", __func__, so));
3049 if (pr->pr_flags & PR_RIGHTS) {
3050 MPASS(pr->pr_domain->dom_dispose != NULL);
3051 (*pr->pr_domain->dom_dispose)(so);
3053 sbrelease(so, SO_RCV);
3054 SOCK_IO_RECV_UNLOCK(so);
3060 * Wrapper for Socket established helper hook.
3061 * Parameters: socket, context of the hook point, hook id.
3064 hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
3066 struct socket_hhook_data hhook_data = {
3073 CURVNET_SET(so->so_vnet);
3074 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
3077 /* Ugly but needed, since hhooks return void for now */
3078 return (hhook_data.status);
3082 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
3083 * additional variant to handle the case where the option value needs to be
3084 * some kind of integer, but not a specific size. In addition to their use
3085 * here, these functions are also called by the protocol-level pr_ctloutput()
3089 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
3094 * If the user gives us more than we wanted, we ignore it, but if we
3095 * don't get the minimum length the caller wants, we return EINVAL.
3096 * On success, sopt->sopt_valsize is set to however much we actually
3099 if ((valsize = sopt->sopt_valsize) < minlen)
3102 sopt->sopt_valsize = valsize = len;
3104 if (sopt->sopt_td != NULL)
3105 return (copyin(sopt->sopt_val, buf, valsize));
3107 bcopy(sopt->sopt_val, buf, valsize);
3112 * Kernel version of setsockopt(2).
3114 * XXX: optlen is size_t, not socklen_t
3117 so_setsockopt(struct socket *so, int level, int optname, void *optval,
3120 struct sockopt sopt;
3122 sopt.sopt_level = level;
3123 sopt.sopt_name = optname;
3124 sopt.sopt_dir = SOPT_SET;
3125 sopt.sopt_val = optval;
3126 sopt.sopt_valsize = optlen;
3127 sopt.sopt_td = NULL;
3128 return (sosetopt(so, &sopt));
3132 sosetopt(struct socket *so, struct sockopt *sopt)
3137 sbintime_t val, *valp;
3143 CURVNET_SET(so->so_vnet);
3145 if (sopt->sopt_level != SOL_SOCKET) {
3146 if (so->so_proto->pr_ctloutput != NULL)
3147 error = (*so->so_proto->pr_ctloutput)(so, sopt);
3149 error = ENOPROTOOPT;
3151 switch (sopt->sopt_name) {
3152 case SO_ACCEPTFILTER:
3153 error = accept_filt_setopt(so, sopt);
3159 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
3162 if (l.l_linger < 0 ||
3163 l.l_linger > USHRT_MAX ||
3164 l.l_linger > (INT_MAX / hz)) {
3169 so->so_linger = l.l_linger;
3171 so->so_options |= SO_LINGER;
3173 so->so_options &= ~SO_LINGER;
3180 case SO_USELOOPBACK:
3184 case SO_REUSEPORT_LB:
3192 error = sooptcopyin(sopt, &optval, sizeof optval,
3198 so->so_options |= sopt->sopt_name;
3200 so->so_options &= ~sopt->sopt_name;
3205 error = sooptcopyin(sopt, &optval, sizeof optval,
3210 if (optval < 0 || optval >= rt_numfibs) {
3214 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
3215 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
3216 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
3217 so->so_fibnum = optval;
3222 case SO_USER_COOKIE:
3223 error = sooptcopyin(sopt, &val32, sizeof val32,
3227 so->so_user_cookie = val32;
3234 error = so->so_proto->pr_setsbopt(so, sopt);
3241 #ifdef COMPAT_FREEBSD32
3242 if (SV_CURPROC_FLAG(SV_ILP32)) {
3243 struct timeval32 tv32;
3245 error = sooptcopyin(sopt, &tv32, sizeof tv32,
3247 CP(tv32, tv, tv_sec);
3248 CP(tv32, tv, tv_usec);
3251 error = sooptcopyin(sopt, &tv, sizeof tv,
3255 if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
3256 tv.tv_usec >= 1000000) {
3260 if (tv.tv_sec > INT32_MAX)
3265 valp = sopt->sopt_name == SO_SNDTIMEO ?
3266 (SOLISTENING(so) ? &so->sol_sbsnd_timeo :
3267 &so->so_snd.sb_timeo) :
3268 (SOLISTENING(so) ? &so->sol_sbrcv_timeo :
3269 &so->so_rcv.sb_timeo);
3276 error = sooptcopyin(sopt, &extmac, sizeof extmac,
3280 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
3288 error = sooptcopyin(sopt, &optval, sizeof optval,
3292 if (optval < 0 || optval > SO_TS_CLOCK_MAX) {
3296 so->so_ts_clock = optval;
3299 case SO_MAX_PACING_RATE:
3300 error = sooptcopyin(sopt, &val32, sizeof(val32),
3304 so->so_max_pacing_rate = val32;
3308 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
3309 error = hhook_run_socket(so, sopt,
3312 error = ENOPROTOOPT;
3315 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
3316 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
3324 * Helper routine for getsockopt.
3327 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
3335 * Documented get behavior is that we always return a value, possibly
3336 * truncated to fit in the user's buffer. Traditional behavior is
3337 * that we always tell the user precisely how much we copied, rather
3338 * than something useful like the total amount we had available for
3339 * her. Note that this interface is not idempotent; the entire
3340 * answer must be generated ahead of time.
3342 valsize = min(len, sopt->sopt_valsize);
3343 sopt->sopt_valsize = valsize;
3344 if (sopt->sopt_val != NULL) {
3345 if (sopt->sopt_td != NULL)
3346 error = copyout(buf, sopt->sopt_val, valsize);
3348 bcopy(buf, sopt->sopt_val, valsize);
3354 sogetopt(struct socket *so, struct sockopt *sopt)
3363 CURVNET_SET(so->so_vnet);
3365 if (sopt->sopt_level != SOL_SOCKET) {
3366 if (so->so_proto->pr_ctloutput != NULL)
3367 error = (*so->so_proto->pr_ctloutput)(so, sopt);
3369 error = ENOPROTOOPT;
3373 switch (sopt->sopt_name) {
3374 case SO_ACCEPTFILTER:
3375 error = accept_filt_getopt(so, sopt);
3380 l.l_onoff = so->so_options & SO_LINGER;
3381 l.l_linger = so->so_linger;
3383 error = sooptcopyout(sopt, &l, sizeof l);
3386 case SO_USELOOPBACK:
3392 case SO_REUSEPORT_LB:
3402 optval = so->so_options & sopt->sopt_name;
3404 error = sooptcopyout(sopt, &optval, sizeof optval);
3408 optval = so->so_proto->pr_domain->dom_family;
3412 optval = so->so_type;
3416 optval = so->so_proto->pr_protocol;
3422 optval = so->so_error;
3425 optval = so->so_rerror;
3432 optval = SOLISTENING(so) ? so->sol_sbsnd_hiwat :
3433 so->so_snd.sb_hiwat;
3437 optval = SOLISTENING(so) ? so->sol_sbrcv_hiwat :
3438 so->so_rcv.sb_hiwat;
3442 optval = SOLISTENING(so) ? so->sol_sbsnd_lowat :
3443 so->so_snd.sb_lowat;
3447 optval = SOLISTENING(so) ? so->sol_sbrcv_lowat :
3448 so->so_rcv.sb_lowat;
3454 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
3455 (SOLISTENING(so) ? so->sol_sbsnd_timeo :
3456 so->so_snd.sb_timeo) :
3457 (SOLISTENING(so) ? so->sol_sbrcv_timeo :
3458 so->so_rcv.sb_timeo));
3460 #ifdef COMPAT_FREEBSD32
3461 if (SV_CURPROC_FLAG(SV_ILP32)) {
3462 struct timeval32 tv32;
3464 CP(tv, tv32, tv_sec);
3465 CP(tv, tv32, tv_usec);
3466 error = sooptcopyout(sopt, &tv32, sizeof tv32);
3469 error = sooptcopyout(sopt, &tv, sizeof tv);
3474 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
3478 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
3482 /* Don't copy out extmac, it is unchanged. */
3490 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
3494 error = mac_getsockopt_peerlabel(
3495 sopt->sopt_td->td_ucred, so, &extmac);
3498 /* Don't copy out extmac, it is unchanged. */
3504 case SO_LISTENQLIMIT:
3505 optval = SOLISTENING(so) ? so->sol_qlimit : 0;
3509 optval = SOLISTENING(so) ? so->sol_qlen : 0;
3512 case SO_LISTENINCQLEN:
3513 optval = SOLISTENING(so) ? so->sol_incqlen : 0;
3517 optval = so->so_ts_clock;
3520 case SO_MAX_PACING_RATE:
3521 optval = so->so_max_pacing_rate;
3525 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
3526 error = hhook_run_socket(so, sopt,
3529 error = ENOPROTOOPT;
3541 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
3543 struct mbuf *m, *m_prev;
3544 int sopt_size = sopt->sopt_valsize;
3546 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
3549 if (sopt_size > MLEN) {
3550 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
3551 if ((m->m_flags & M_EXT) == 0) {
3555 m->m_len = min(MCLBYTES, sopt_size);
3557 m->m_len = min(MLEN, sopt_size);
3559 sopt_size -= m->m_len;
3564 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
3569 if (sopt_size > MLEN) {
3570 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
3572 if ((m->m_flags & M_EXT) == 0) {
3577 m->m_len = min(MCLBYTES, sopt_size);
3579 m->m_len = min(MLEN, sopt_size);
3581 sopt_size -= m->m_len;
3589 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
3591 struct mbuf *m0 = m;
3593 if (sopt->sopt_val == NULL)
3595 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
3596 if (sopt->sopt_td != NULL) {
3599 error = copyin(sopt->sopt_val, mtod(m, char *),
3606 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
3607 sopt->sopt_valsize -= m->m_len;
3608 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3611 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
3612 panic("ip6_sooptmcopyin");
3617 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
3619 struct mbuf *m0 = m;
3622 if (sopt->sopt_val == NULL)
3624 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
3625 if (sopt->sopt_td != NULL) {
3628 error = copyout(mtod(m, char *), sopt->sopt_val,
3635 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
3636 sopt->sopt_valsize -= m->m_len;
3637 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3638 valsize += m->m_len;
3642 /* enough soopt buffer should be given from user-land */
3646 sopt->sopt_valsize = valsize;
3651 * sohasoutofband(): protocol notifies socket layer of the arrival of new
3652 * out-of-band data, which will then notify socket consumers.
3655 sohasoutofband(struct socket *so)
3658 if (so->so_sigio != NULL)
3659 pgsigio(&so->so_sigio, SIGURG, 0);
3660 selwakeuppri(&so->so_rdsel, PSOCK);
3664 sopoll(struct socket *so, int events, struct ucred *active_cred,
3669 * We do not need to set or assert curvnet as long as everyone uses
3672 return (so->so_proto->pr_sopoll(so, events, active_cred, td));
3676 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3682 if (SOLISTENING(so)) {
3683 if (!(events & (POLLIN | POLLRDNORM)))
3685 else if (!TAILQ_EMPTY(&so->sol_comp))
3686 revents = events & (POLLIN | POLLRDNORM);
3687 else if ((events & POLLINIGNEOF) == 0 && so->so_error)
3688 revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP;
3690 selrecord(td, &so->so_rdsel);
3695 SOCK_SENDBUF_LOCK(so);
3696 SOCK_RECVBUF_LOCK(so);
3697 if (events & (POLLIN | POLLRDNORM))
3698 if (soreadabledata(so))
3699 revents |= events & (POLLIN | POLLRDNORM);
3700 if (events & (POLLOUT | POLLWRNORM))
3701 if (sowriteable(so))
3702 revents |= events & (POLLOUT | POLLWRNORM);
3703 if (events & (POLLPRI | POLLRDBAND))
3704 if (so->so_oobmark ||
3705 (so->so_rcv.sb_state & SBS_RCVATMARK))
3706 revents |= events & (POLLPRI | POLLRDBAND);
3707 if ((events & POLLINIGNEOF) == 0) {
3708 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3709 revents |= events & (POLLIN | POLLRDNORM);
3710 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3714 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
3715 revents |= events & POLLRDHUP;
3718 (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND | POLLRDHUP)) {
3719 selrecord(td, &so->so_rdsel);
3720 so->so_rcv.sb_flags |= SB_SEL;
3722 if (events & (POLLOUT | POLLWRNORM)) {
3723 selrecord(td, &so->so_wrsel);
3724 so->so_snd.sb_flags |= SB_SEL;
3727 SOCK_RECVBUF_UNLOCK(so);
3728 SOCK_SENDBUF_UNLOCK(so);
3735 soo_kqfilter(struct file *fp, struct knote *kn)
3737 struct socket *so = kn->kn_fp->f_data;
3742 switch (kn->kn_filter) {
3744 kn->kn_fop = &soread_filtops;
3745 knl = &so->so_rdsel.si_note;
3750 kn->kn_fop = &sowrite_filtops;
3751 knl = &so->so_wrsel.si_note;
3756 kn->kn_fop = &soempty_filtops;
3757 knl = &so->so_wrsel.si_note;
3766 if (SOLISTENING(so)) {
3767 knlist_add(knl, kn, 1);
3769 SOCK_BUF_LOCK(so, which);
3770 knlist_add(knl, kn, 1);
3771 sb->sb_flags |= SB_KNOTE;
3772 SOCK_BUF_UNLOCK(so, which);
3779 filt_sordetach(struct knote *kn)
3781 struct socket *so = kn->kn_fp->f_data;
3784 knlist_remove(&so->so_rdsel.si_note, kn, 1);
3785 if (!SOLISTENING(so) && knlist_empty(&so->so_rdsel.si_note))
3786 so->so_rcv.sb_flags &= ~SB_KNOTE;
3787 so_rdknl_unlock(so);
3792 filt_soread(struct knote *kn, long hint)
3796 so = kn->kn_fp->f_data;
3798 if (SOLISTENING(so)) {
3799 SOCK_LOCK_ASSERT(so);
3800 kn->kn_data = so->sol_qlen;
3802 kn->kn_flags |= EV_EOF;
3803 kn->kn_fflags = so->so_error;
3806 return (!TAILQ_EMPTY(&so->sol_comp));
3809 SOCK_RECVBUF_LOCK_ASSERT(so);
3811 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
3812 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3813 kn->kn_flags |= EV_EOF;
3814 kn->kn_fflags = so->so_error;
3816 } else if (so->so_error || so->so_rerror)
3819 if (kn->kn_sfflags & NOTE_LOWAT) {
3820 if (kn->kn_data >= kn->kn_sdata)
3822 } else if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
3825 /* This hook returning non-zero indicates an event, not error */
3826 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
3830 filt_sowdetach(struct knote *kn)
3832 struct socket *so = kn->kn_fp->f_data;
3835 knlist_remove(&so->so_wrsel.si_note, kn, 1);
3836 if (!SOLISTENING(so) && knlist_empty(&so->so_wrsel.si_note))
3837 so->so_snd.sb_flags &= ~SB_KNOTE;
3838 so_wrknl_unlock(so);
3843 filt_sowrite(struct knote *kn, long hint)
3847 so = kn->kn_fp->f_data;
3849 if (SOLISTENING(so))
3852 SOCK_SENDBUF_LOCK_ASSERT(so);
3853 kn->kn_data = sbspace(&so->so_snd);
3855 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
3857 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3858 kn->kn_flags |= EV_EOF;
3859 kn->kn_fflags = so->so_error;
3861 } else if (so->so_error) /* temporary udp error */
3863 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3864 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3866 else if (kn->kn_sfflags & NOTE_LOWAT)
3867 return (kn->kn_data >= kn->kn_sdata);
3869 return (kn->kn_data >= so->so_snd.sb_lowat);
3873 filt_soempty(struct knote *kn, long hint)
3877 so = kn->kn_fp->f_data;
3879 if (SOLISTENING(so))
3882 SOCK_SENDBUF_LOCK_ASSERT(so);
3883 kn->kn_data = sbused(&so->so_snd);
3885 if (kn->kn_data == 0)
3892 socheckuid(struct socket *so, uid_t uid)
3897 if (so->so_cred->cr_uid != uid)
3903 * These functions are used by protocols to notify the socket layer (and its
3904 * consumers) of state changes in the sockets driven by protocol-side events.
3908 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3910 * Normal sequence from the active (originating) side is that
3911 * soisconnecting() is called during processing of connect() call, resulting
3912 * in an eventual call to soisconnected() if/when the connection is
3913 * established. When the connection is torn down soisdisconnecting() is
3914 * called during processing of disconnect() call, and soisdisconnected() is
3915 * called when the connection to the peer is totally severed. The semantics
3916 * of these routines are such that connectionless protocols can call
3917 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3918 * calls when setting up a ``connection'' takes no time.
3920 * From the passive side, a socket is created with two queues of sockets:
3921 * so_incomp for connections in progress and so_comp for connections already
3922 * made and awaiting user acceptance. As a protocol is preparing incoming
3923 * connections, it creates a socket structure queued on so_incomp by calling
3924 * sonewconn(). When the connection is established, soisconnected() is
3925 * called, and transfers the socket structure to so_comp, making it available
3928 * If a socket is closed with sockets on either so_incomp or so_comp, these
3929 * sockets are dropped.
3931 * If higher-level protocols are implemented in the kernel, the wakeups done
3932 * here will sometimes cause software-interrupt process scheduling.
3935 soisconnecting(struct socket *so)
3939 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3940 so->so_state |= SS_ISCONNECTING;
3945 soisconnected(struct socket *so)
3947 bool last __diagused;
3950 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3951 so->so_state |= SS_ISCONNECTED;
3953 if (so->so_qstate == SQ_INCOMP) {
3954 struct socket *head = so->so_listen;
3957 KASSERT(head, ("%s: so %p on incomp of NULL", __func__, so));
3959 * Promoting a socket from incomplete queue to complete, we
3960 * need to go through reverse order of locking. We first do
3961 * trylock, and if that doesn't succeed, we go the hard way
3962 * leaving a reference and rechecking consistency after proper
3965 if (__predict_false(SOLISTEN_TRYLOCK(head) == 0)) {
3968 SOLISTEN_LOCK(head);
3970 if (__predict_false(head != so->so_listen)) {
3972 * The socket went off the listen queue,
3973 * should be lost race to close(2) of sol.
3974 * The socket is about to soabort().
3977 sorele_locked(head);
3980 last = refcount_release(&head->so_count);
3981 KASSERT(!last, ("%s: released last reference for %p",
3985 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3986 TAILQ_REMOVE(&head->sol_incomp, so, so_list);
3987 head->sol_incqlen--;
3988 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list);
3990 so->so_qstate = SQ_COMP;
3992 solisten_wakeup(head); /* unlocks */
3994 SOCK_RECVBUF_LOCK(so);
3995 soupcall_set(so, SO_RCV,
3996 head->sol_accept_filter->accf_callback,
3997 head->sol_accept_filter_arg);
3998 so->so_options &= ~SO_ACCEPTFILTER;
3999 ret = head->sol_accept_filter->accf_callback(so,
4000 head->sol_accept_filter_arg, M_NOWAIT);
4001 if (ret == SU_ISCONNECTED) {
4002 soupcall_clear(so, SO_RCV);
4003 SOCK_RECVBUF_UNLOCK(so);
4006 SOCK_RECVBUF_UNLOCK(so);
4008 SOLISTEN_UNLOCK(head);
4013 wakeup(&so->so_timeo);
4019 soisdisconnecting(struct socket *so)
4023 so->so_state &= ~SS_ISCONNECTING;
4024 so->so_state |= SS_ISDISCONNECTING;
4026 if (!SOLISTENING(so)) {
4027 SOCK_RECVBUF_LOCK(so);
4028 socantrcvmore_locked(so);
4029 SOCK_SENDBUF_LOCK(so);
4030 socantsendmore_locked(so);
4033 wakeup(&so->so_timeo);
4037 soisdisconnected(struct socket *so)
4043 * There is at least one reader of so_state that does not
4044 * acquire socket lock, namely soreceive_generic(). Ensure
4045 * that it never sees all flags that track connection status
4046 * cleared, by ordering the update with a barrier semantic of
4047 * our release thread fence.
4049 so->so_state |= SS_ISDISCONNECTED;
4050 atomic_thread_fence_rel();
4051 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
4053 if (!SOLISTENING(so)) {
4055 SOCK_RECVBUF_LOCK(so);
4056 socantrcvmore_locked(so);
4057 SOCK_SENDBUF_LOCK(so);
4058 sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
4059 socantsendmore_locked(so);
4062 wakeup(&so->so_timeo);
4066 soiolock(struct socket *so, struct sx *sx, int flags)
4070 KASSERT((flags & SBL_VALID) == flags,
4071 ("soiolock: invalid flags %#x", flags));
4073 if ((flags & SBL_WAIT) != 0) {
4074 if ((flags & SBL_NOINTR) != 0) {
4077 error = sx_xlock_sig(sx);
4081 } else if (!sx_try_xlock(sx)) {
4082 return (EWOULDBLOCK);
4085 if (__predict_false(SOLISTENING(so))) {
4093 soiounlock(struct sx *sx)
4099 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
4102 sodupsockaddr(const struct sockaddr *sa, int mflags)
4104 struct sockaddr *sa2;
4106 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
4108 bcopy(sa, sa2, sa->sa_len);
4113 * Register per-socket destructor.
4116 sodtor_set(struct socket *so, so_dtor_t *func)
4119 SOCK_LOCK_ASSERT(so);
4124 * Register per-socket buffer upcalls.
4127 soupcall_set(struct socket *so, sb_which which, so_upcall_t func, void *arg)
4131 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
4141 SOCK_BUF_LOCK_ASSERT(so, which);
4142 sb->sb_upcall = func;
4143 sb->sb_upcallarg = arg;
4144 sb->sb_flags |= SB_UPCALL;
4148 soupcall_clear(struct socket *so, sb_which which)
4152 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so));
4162 SOCK_BUF_LOCK_ASSERT(so, which);
4163 KASSERT(sb->sb_upcall != NULL,
4164 ("%s: so %p no upcall to clear", __func__, so));
4165 sb->sb_upcall = NULL;
4166 sb->sb_upcallarg = NULL;
4167 sb->sb_flags &= ~SB_UPCALL;
4171 solisten_upcall_set(struct socket *so, so_upcall_t func, void *arg)
4174 SOLISTEN_LOCK_ASSERT(so);
4175 so->sol_upcall = func;
4176 so->sol_upcallarg = arg;
4180 so_rdknl_lock(void *arg)
4182 struct socket *so = arg;
4185 if (SOLISTENING(so)) {
4188 SOCK_RECVBUF_LOCK(so);
4189 if (__predict_false(SOLISTENING(so))) {
4190 SOCK_RECVBUF_UNLOCK(so);
4197 so_rdknl_unlock(void *arg)
4199 struct socket *so = arg;
4201 if (SOLISTENING(so))
4202 SOLISTEN_UNLOCK(so);
4204 SOCK_RECVBUF_UNLOCK(so);
4208 so_rdknl_assert_lock(void *arg, int what)
4210 struct socket *so = arg;
4212 if (what == LA_LOCKED) {
4213 if (SOLISTENING(so))
4214 SOLISTEN_LOCK_ASSERT(so);
4216 SOCK_RECVBUF_LOCK_ASSERT(so);
4218 if (SOLISTENING(so))
4219 SOLISTEN_UNLOCK_ASSERT(so);
4221 SOCK_RECVBUF_UNLOCK_ASSERT(so);
4226 so_wrknl_lock(void *arg)
4228 struct socket *so = arg;
4231 if (SOLISTENING(so)) {
4234 SOCK_SENDBUF_LOCK(so);
4235 if (__predict_false(SOLISTENING(so))) {
4236 SOCK_SENDBUF_UNLOCK(so);
4243 so_wrknl_unlock(void *arg)
4245 struct socket *so = arg;
4247 if (SOLISTENING(so))
4248 SOLISTEN_UNLOCK(so);
4250 SOCK_SENDBUF_UNLOCK(so);
4254 so_wrknl_assert_lock(void *arg, int what)
4256 struct socket *so = arg;
4258 if (what == LA_LOCKED) {
4259 if (SOLISTENING(so))
4260 SOLISTEN_LOCK_ASSERT(so);
4262 SOCK_SENDBUF_LOCK_ASSERT(so);
4264 if (SOLISTENING(so))
4265 SOLISTEN_UNLOCK_ASSERT(so);
4267 SOCK_SENDBUF_UNLOCK_ASSERT(so);
4272 * Create an external-format (``xsocket'') structure using the information in
4273 * the kernel-format socket structure pointed to by so. This is done to
4274 * reduce the spew of irrelevant information over this interface, to isolate
4275 * user code from changes in the kernel structure, and potentially to provide
4276 * information-hiding if we decide that some of this information should be
4277 * hidden from users.
4280 sotoxsocket(struct socket *so, struct xsocket *xso)
4283 bzero(xso, sizeof(*xso));
4284 xso->xso_len = sizeof *xso;
4285 xso->xso_so = (uintptr_t)so;
4286 xso->so_type = so->so_type;
4287 xso->so_options = so->so_options;
4288 xso->so_linger = so->so_linger;
4289 xso->so_state = so->so_state;
4290 xso->so_pcb = (uintptr_t)so->so_pcb;
4291 xso->xso_protocol = so->so_proto->pr_protocol;
4292 xso->xso_family = so->so_proto->pr_domain->dom_family;
4293 xso->so_timeo = so->so_timeo;
4294 xso->so_error = so->so_error;
4295 xso->so_uid = so->so_cred->cr_uid;
4296 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
4297 if (SOLISTENING(so)) {
4298 xso->so_qlen = so->sol_qlen;
4299 xso->so_incqlen = so->sol_incqlen;
4300 xso->so_qlimit = so->sol_qlimit;
4301 xso->so_oobmark = 0;
4303 xso->so_state |= so->so_qstate;
4304 xso->so_qlen = xso->so_incqlen = xso->so_qlimit = 0;
4305 xso->so_oobmark = so->so_oobmark;
4306 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
4307 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
4312 so_sockbuf_rcv(struct socket *so)
4315 return (&so->so_rcv);
4319 so_sockbuf_snd(struct socket *so)
4322 return (&so->so_snd);
4326 so_state_get(const struct socket *so)
4329 return (so->so_state);
4333 so_state_set(struct socket *so, int val)
4340 so_options_get(const struct socket *so)
4343 return (so->so_options);
4347 so_options_set(struct socket *so, int val)
4350 so->so_options = val;
4354 so_error_get(const struct socket *so)
4357 return (so->so_error);
4361 so_error_set(struct socket *so, int val)
4368 so_linger_get(const struct socket *so)
4371 return (so->so_linger);
4375 so_linger_set(struct socket *so, int val)
4378 KASSERT(val >= 0 && val <= USHRT_MAX && val <= (INT_MAX / hz),
4379 ("%s: val %d out of range", __func__, val));
4381 so->so_linger = val;
4385 so_protosw_get(const struct socket *so)
4388 return (so->so_proto);
4392 so_protosw_set(struct socket *so, struct protosw *val)
4399 so_sorwakeup(struct socket *so)
4406 so_sowwakeup(struct socket *so)
4413 so_sorwakeup_locked(struct socket *so)
4416 sorwakeup_locked(so);
4420 so_sowwakeup_locked(struct socket *so)
4423 sowwakeup_locked(so);
4427 so_lock(struct socket *so)
4434 so_unlock(struct socket *so)