2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2007 Robert N. M. Watson
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
36 * Comments on the socket life cycle:
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
97 #include <sys/cdefs.h>
98 __FBSDID("$FreeBSD$");
100 #include "opt_inet.h"
102 #include "opt_zero.h"
103 #include "opt_compat.h"
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/fcntl.h>
108 #include <sys/limits.h>
109 #include <sys/lock.h>
111 #include <sys/malloc.h>
112 #include <sys/mbuf.h>
113 #include <sys/mutex.h>
114 #include <sys/domain.h>
115 #include <sys/file.h> /* for struct knote */
116 #include <sys/kernel.h>
117 #include <sys/event.h>
118 #include <sys/eventhandler.h>
119 #include <sys/poll.h>
120 #include <sys/proc.h>
121 #include <sys/protosw.h>
122 #include <sys/socket.h>
123 #include <sys/socketvar.h>
124 #include <sys/resourcevar.h>
125 #include <net/route.h>
126 #include <sys/signalvar.h>
127 #include <sys/stat.h>
129 #include <sys/sysctl.h>
131 #include <sys/jail.h>
133 #include <security/mac/mac_framework.h>
138 #include <sys/mount.h>
139 #include <compat/freebsd32/freebsd32.h>
141 extern struct sysentvec ia32_freebsd_sysvec;
144 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
147 static void filt_sordetach(struct knote *kn);
148 static int filt_soread(struct knote *kn, long hint);
149 static void filt_sowdetach(struct knote *kn);
150 static int filt_sowrite(struct knote *kn, long hint);
151 static int filt_solisten(struct knote *kn, long hint);
153 static struct filterops solisten_filtops =
154 { 1, NULL, filt_sordetach, filt_solisten };
155 static struct filterops soread_filtops =
156 { 1, NULL, filt_sordetach, filt_soread };
157 static struct filterops sowrite_filtops =
158 { 1, NULL, filt_sowdetach, filt_sowrite };
160 uma_zone_t socket_zone;
161 so_gen_t so_gencnt; /* generation count for sockets */
165 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
166 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
168 static int somaxconn = SOMAXCONN;
169 static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS);
170 /* XXX: we dont have SYSCTL_USHORT */
171 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
172 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection "
174 static int numopensockets;
175 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
176 &numopensockets, 0, "Number of open sockets");
177 #ifdef ZERO_COPY_SOCKETS
178 /* These aren't static because they're used in other files. */
179 int so_zero_copy_send = 1;
180 int so_zero_copy_receive = 1;
181 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
182 "Zero copy controls");
183 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
184 &so_zero_copy_receive, 0, "Enable zero copy receive");
185 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
186 &so_zero_copy_send, 0, "Enable zero copy send");
187 #endif /* ZERO_COPY_SOCKETS */
190 * accept_mtx locks down per-socket fields relating to accept queues. See
191 * socketvar.h for an annotation of the protected fields of struct socket.
193 struct mtx accept_mtx;
194 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
197 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
200 static struct mtx so_global_mtx;
201 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
204 * General IPC sysctl name space, used by sockets and a variety of other IPC
207 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
210 * Sysctl to get and set the maximum global sockets limit. Notify protocols
211 * of the change so that they can update their dependent limits as required.
214 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
216 int error, newmaxsockets;
218 newmaxsockets = maxsockets;
219 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
220 if (error == 0 && req->newptr) {
221 if (newmaxsockets > maxsockets) {
222 maxsockets = newmaxsockets;
223 if (maxsockets > ((maxfiles / 4) * 3)) {
224 maxfiles = (maxsockets * 5) / 4;
225 maxfilesperproc = (maxfiles * 9) / 10;
227 EVENTHANDLER_INVOKE(maxsockets_change);
234 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
235 &maxsockets, 0, sysctl_maxsockets, "IU",
236 "Maximum number of sockets avaliable");
239 * Initialise maxsockets.
241 static void init_maxsockets(void *ignored)
243 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
244 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
246 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
249 * Socket operation routines. These routines are called by the routines in
250 * sys_socket.c or from a system process, and implement the semantics of
251 * socket operations by switching out to the protocol specific routines.
255 * Get a socket structure from our zone, and initialize it. Note that it
256 * would probably be better to allocate socket and PCB at the same time, but
257 * I'm not convinced that all the protocols can be easily modified to do
260 * soalloc() returns a socket with a ref count of 0.
262 static struct socket *
267 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
271 if (mac_socket_init(so, M_NOWAIT) != 0) {
272 uma_zfree(socket_zone, so);
276 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
277 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
278 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
279 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
280 TAILQ_INIT(&so->so_aiojobq);
281 mtx_lock(&so_global_mtx);
282 so->so_gencnt = ++so_gencnt;
284 mtx_unlock(&so_global_mtx);
289 * Free the storage associated with a socket at the socket layer, tear down
290 * locks, labels, etc. All protocol state is assumed already to have been
291 * torn down (and possibly never set up) by the caller.
294 sodealloc(struct socket *so)
297 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
298 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
300 mtx_lock(&so_global_mtx);
301 so->so_gencnt = ++so_gencnt;
302 --numopensockets; /* Could be below, but faster here. */
303 mtx_unlock(&so_global_mtx);
304 if (so->so_rcv.sb_hiwat)
305 (void)chgsbsize(so->so_cred->cr_uidinfo,
306 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
307 if (so->so_snd.sb_hiwat)
308 (void)chgsbsize(so->so_cred->cr_uidinfo,
309 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
311 /* remove acccept filter if one is present. */
312 if (so->so_accf != NULL)
313 do_setopt_accept_filter(so, NULL);
316 mac_socket_destroy(so);
319 sx_destroy(&so->so_snd.sb_sx);
320 sx_destroy(&so->so_rcv.sb_sx);
321 SOCKBUF_LOCK_DESTROY(&so->so_snd);
322 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
323 uma_zfree(socket_zone, so);
327 * socreate returns a socket with a ref count of 1. The socket should be
328 * closed with soclose().
331 socreate(int dom, struct socket **aso, int type, int proto,
332 struct ucred *cred, struct thread *td)
339 prp = pffindproto(dom, proto, type);
341 prp = pffindtype(dom, type);
343 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
344 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
345 return (EPROTONOSUPPORT);
347 if (jailed(cred) && jail_socket_unixiproute_only &&
348 prp->pr_domain->dom_family != PF_LOCAL &&
349 prp->pr_domain->dom_family != PF_INET &&
350 prp->pr_domain->dom_family != PF_ROUTE) {
351 return (EPROTONOSUPPORT);
354 if (prp->pr_type != type)
360 TAILQ_INIT(&so->so_incomp);
361 TAILQ_INIT(&so->so_comp);
363 so->so_cred = crhold(cred);
364 if ((prp->pr_domain->dom_family == PF_INET) ||
365 (prp->pr_domain->dom_family == PF_ROUTE))
366 so->so_fibnum = td->td_proc->p_fibnum;
371 mac_socket_create(cred, so);
373 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
375 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
379 * Auto-sizing of socket buffers is managed by the protocols and
380 * the appropriate flags must be set in the pru_attach function.
382 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
384 KASSERT(so->so_count == 1, ("socreate: so_count %d",
395 static int regression_sonewconn_earlytest = 1;
396 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
397 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
401 * When an attempt at a new connection is noted on a socket which accepts
402 * connections, sonewconn is called. If the connection is possible (subject
403 * to space constraints, etc.) then we allocate a new structure, propoerly
404 * linked into the data structure of the original socket, and return this.
405 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
407 * Note: the ref count on the socket is 0 on return.
410 sonewconn(struct socket *head, int connstatus)
416 over = (head->so_qlen > 3 * head->so_qlimit / 2);
419 if (regression_sonewconn_earlytest && over)
427 if ((head->so_options & SO_ACCEPTFILTER) != 0)
430 so->so_type = head->so_type;
431 so->so_options = head->so_options &~ SO_ACCEPTCONN;
432 so->so_linger = head->so_linger;
433 so->so_state = head->so_state | SS_NOFDREF;
434 so->so_proto = head->so_proto;
435 so->so_cred = crhold(head->so_cred);
438 mac_socket_newconn(head, so);
441 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
443 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
445 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
446 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
450 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
451 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
452 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
453 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
454 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
455 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
456 so->so_state |= connstatus;
459 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
460 so->so_qstate |= SQ_COMP;
464 * Keep removing sockets from the head until there's room for
465 * us to insert on the tail. In pre-locking revisions, this
466 * was a simple if(), but as we could be racing with other
467 * threads and soabort() requires dropping locks, we must
468 * loop waiting for the condition to be true.
470 while (head->so_incqlen > head->so_qlimit) {
472 sp = TAILQ_FIRST(&head->so_incomp);
473 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
475 sp->so_qstate &= ~SQ_INCOMP;
481 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
482 so->so_qstate |= SQ_INCOMP;
488 wakeup_one(&head->so_timeo);
494 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
497 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td));
501 * solisten() transitions a socket from a non-listening state to a listening
502 * state, but can also be used to update the listen queue depth on an
503 * existing listen socket. The protocol will call back into the sockets
504 * layer using solisten_proto_check() and solisten_proto() to check and set
505 * socket-layer listen state. Call backs are used so that the protocol can
506 * acquire both protocol and socket layer locks in whatever order is required
509 * Protocol implementors are advised to hold the socket lock across the
510 * socket-layer test and set to avoid races at the socket layer.
513 solisten(struct socket *so, int backlog, struct thread *td)
516 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td));
520 solisten_proto_check(struct socket *so)
523 SOCK_LOCK_ASSERT(so);
525 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
532 solisten_proto(struct socket *so, int backlog)
535 SOCK_LOCK_ASSERT(so);
537 if (backlog < 0 || backlog > somaxconn)
539 so->so_qlimit = backlog;
540 so->so_options |= SO_ACCEPTCONN;
544 * Attempt to free a socket. This should really be sotryfree().
546 * sofree() will succeed if:
548 * - There are no outstanding file descriptor references or related consumers
551 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
553 * - The protocol does not have an outstanding strong reference on the socket
556 * - The socket is not in a completed connection queue, so a process has been
557 * notified that it is present. If it is removed, the user process may
558 * block in accept() despite select() saying the socket was ready.
560 * Otherwise, it will quietly abort so that a future call to sofree(), when
561 * conditions are right, can succeed.
564 sofree(struct socket *so)
566 struct protosw *pr = so->so_proto;
569 ACCEPT_LOCK_ASSERT();
570 SOCK_LOCK_ASSERT(so);
572 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
573 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
581 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
582 (so->so_qstate & SQ_INCOMP) != 0,
583 ("sofree: so_head != NULL, but neither SQ_COMP nor "
585 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
586 (so->so_qstate & SQ_INCOMP) == 0,
587 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
588 TAILQ_REMOVE(&head->so_incomp, so, so_list);
590 so->so_qstate &= ~SQ_INCOMP;
593 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
594 (so->so_qstate & SQ_INCOMP) == 0,
595 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
596 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
597 if (so->so_options & SO_ACCEPTCONN) {
598 KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated"));
599 KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated"));
604 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
605 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
606 if (pr->pr_usrreqs->pru_detach != NULL)
607 (*pr->pr_usrreqs->pru_detach)(so);
610 * From this point on, we assume that no other references to this
611 * socket exist anywhere else in the stack. Therefore, no locks need
612 * to be acquired or held.
614 * We used to do a lot of socket buffer and socket locking here, as
615 * well as invoke sorflush() and perform wakeups. The direct call to
616 * dom_dispose() and sbrelease_internal() are an inlining of what was
617 * necessary from sorflush().
619 * Notice that the socket buffer and kqueue state are torn down
620 * before calling pru_detach. This means that protocols shold not
621 * assume they can perform socket wakeups, etc, in their detach code.
623 sbdestroy(&so->so_snd, so);
624 sbdestroy(&so->so_rcv, so);
625 knlist_destroy(&so->so_rcv.sb_sel.si_note);
626 knlist_destroy(&so->so_snd.sb_sel.si_note);
631 * Close a socket on last file table reference removal. Initiate disconnect
632 * if connected. Free socket when disconnect complete.
634 * This function will sorele() the socket. Note that soclose() may be called
635 * prior to the ref count reaching zero. The actual socket structure will
636 * not be freed until the ref count reaches zero.
639 soclose(struct socket *so)
643 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
645 funsetown(&so->so_sigio);
646 if (so->so_state & SS_ISCONNECTED) {
647 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
648 error = sodisconnect(so);
652 if (so->so_options & SO_LINGER) {
653 if ((so->so_state & SS_ISDISCONNECTING) &&
654 (so->so_state & SS_NBIO))
656 while (so->so_state & SS_ISCONNECTED) {
657 error = tsleep(&so->so_timeo,
658 PSOCK | PCATCH, "soclos", so->so_linger * hz);
666 if (so->so_proto->pr_usrreqs->pru_close != NULL)
667 (*so->so_proto->pr_usrreqs->pru_close)(so);
668 if (so->so_options & SO_ACCEPTCONN) {
671 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
672 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
674 sp->so_qstate &= ~SQ_INCOMP;
680 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
681 TAILQ_REMOVE(&so->so_comp, sp, so_list);
683 sp->so_qstate &= ~SQ_COMP;
693 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
694 so->so_state |= SS_NOFDREF;
700 * soabort() is used to abruptly tear down a connection, such as when a
701 * resource limit is reached (listen queue depth exceeded), or if a listen
702 * socket is closed while there are sockets waiting to be accepted.
704 * This interface is tricky, because it is called on an unreferenced socket,
705 * and must be called only by a thread that has actually removed the socket
706 * from the listen queue it was on, or races with other threads are risked.
708 * This interface will call into the protocol code, so must not be called
709 * with any socket locks held. Protocols do call it while holding their own
710 * recursible protocol mutexes, but this is something that should be subject
711 * to review in the future.
714 soabort(struct socket *so)
718 * In as much as is possible, assert that no references to this
719 * socket are held. This is not quite the same as asserting that the
720 * current thread is responsible for arranging for no references, but
721 * is as close as we can get for now.
723 KASSERT(so->so_count == 0, ("soabort: so_count"));
724 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
725 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
726 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
727 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
729 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
730 (*so->so_proto->pr_usrreqs->pru_abort)(so);
737 soaccept(struct socket *so, struct sockaddr **nam)
742 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
743 so->so_state &= ~SS_NOFDREF;
745 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
750 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
754 if (so->so_options & SO_ACCEPTCONN)
757 * If protocol is connection-based, can only connect once.
758 * Otherwise, if connected, try to disconnect first. This allows
759 * user to disconnect by connecting to, e.g., a null address.
761 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
762 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
763 (error = sodisconnect(so)))) {
767 * Prevent accumulated error from previous connection from
771 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
778 soconnect2(struct socket *so1, struct socket *so2)
781 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
785 sodisconnect(struct socket *so)
789 if ((so->so_state & SS_ISCONNECTED) == 0)
791 if (so->so_state & SS_ISDISCONNECTING)
793 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
797 #ifdef ZERO_COPY_SOCKETS
798 struct so_zerocopy_stats{
803 struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
804 #include <netinet/in.h>
805 #include <net/route.h>
806 #include <netinet/in_pcb.h>
808 #include <vm/vm_page.h>
809 #include <vm/vm_object.h>
812 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise
813 * sosend_dgram() and sosend_generic() use m_uiotombuf().
815 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
816 * all of the data referenced by the uio. If desired, it uses zero-copy.
817 * *space will be updated to reflect data copied in.
819 * NB: If atomic I/O is requested, the caller must already have checked that
820 * space can hold resid bytes.
822 * NB: In the event of an error, the caller may need to free the partial
823 * chain pointed to by *mpp. The contents of both *uio and *space may be
824 * modified even in the case of an error.
827 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
830 struct mbuf *m, **mp, *top;
833 #ifdef ZERO_COPY_SOCKETS
840 resid = uio->uio_resid;
843 #ifdef ZERO_COPY_SOCKETS
845 #endif /* ZERO_COPY_SOCKETS */
846 if (resid >= MINCLSIZE) {
847 #ifdef ZERO_COPY_SOCKETS
849 m = m_gethdr(M_WAITOK, MT_DATA);
851 m->m_pkthdr.rcvif = NULL;
853 m = m_get(M_WAITOK, MT_DATA);
854 if (so_zero_copy_send &&
857 uio->uio_iov->iov_len>=PAGE_SIZE) {
858 so_zerocp_stats.size_ok++;
859 so_zerocp_stats.align_ok++;
860 cow_send = socow_setup(m, uio);
864 m_clget(m, M_WAITOK);
865 len = min(min(MCLBYTES, resid), *space);
867 #else /* ZERO_COPY_SOCKETS */
869 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
871 m->m_pkthdr.rcvif = NULL;
873 m = m_getcl(M_WAIT, MT_DATA, 0);
874 len = min(min(MCLBYTES, resid), *space);
875 #endif /* ZERO_COPY_SOCKETS */
878 m = m_gethdr(M_WAIT, MT_DATA);
880 m->m_pkthdr.rcvif = NULL;
882 len = min(min(MHLEN, resid), *space);
884 * For datagram protocols, leave room
885 * for protocol headers in first mbuf.
887 if (atomic && m && len < MHLEN)
890 m = m_get(M_WAIT, MT_DATA);
891 len = min(min(MLEN, resid), *space);
900 #ifdef ZERO_COPY_SOCKETS
904 #endif /* ZERO_COPY_SOCKETS */
905 error = uiomove(mtod(m, void *), (int)len, uio);
906 resid = uio->uio_resid;
909 top->m_pkthdr.len += len;
915 top->m_flags |= M_EOR;
918 } while (*space > 0 && atomic);
923 #endif /*ZERO_COPY_SOCKETS*/
925 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
928 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
929 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
932 int clen = 0, error, dontroute;
933 #ifdef ZERO_COPY_SOCKETS
934 int atomic = sosendallatonce(so) || top;
937 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM"));
938 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
939 ("sodgram_send: !PR_ATOMIC"));
942 resid = uio->uio_resid;
944 resid = top->m_pkthdr.len;
946 * In theory resid should be unsigned. However, space must be
947 * signed, as it might be less than 0 if we over-committed, and we
948 * must use a signed comparison of space and resid. On the other
949 * hand, a negative resid causes us to loop sending 0-length
950 * segments to the protocol.
952 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
953 * type sockets since that's an error.
961 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
963 td->td_ru.ru_msgsnd++;
965 clen = control->m_len;
967 SOCKBUF_LOCK(&so->so_snd);
968 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
969 SOCKBUF_UNLOCK(&so->so_snd);
974 error = so->so_error;
976 SOCKBUF_UNLOCK(&so->so_snd);
979 if ((so->so_state & SS_ISCONNECTED) == 0) {
981 * `sendto' and `sendmsg' is allowed on a connection-based
982 * socket if it supports implied connect. Return ENOTCONN if
983 * not connected and no address is supplied.
985 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
986 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
987 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
988 !(resid == 0 && clen != 0)) {
989 SOCKBUF_UNLOCK(&so->so_snd);
993 } else if (addr == NULL) {
994 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
997 error = EDESTADDRREQ;
998 SOCKBUF_UNLOCK(&so->so_snd);
1004 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1005 * problem and need fixing.
1007 space = sbspace(&so->so_snd);
1008 if (flags & MSG_OOB)
1011 SOCKBUF_UNLOCK(&so->so_snd);
1012 if (resid > space) {
1018 if (flags & MSG_EOR)
1019 top->m_flags |= M_EOR;
1021 #ifdef ZERO_COPY_SOCKETS
1022 error = sosend_copyin(uio, &top, atomic, &space, flags);
1027 * Copy the data from userland into a mbuf chain.
1028 * If no data is to be copied in, a single empty mbuf
1031 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1032 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1034 error = EFAULT; /* only possible error */
1037 space -= resid - uio->uio_resid;
1039 resid = uio->uio_resid;
1041 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1043 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1048 so->so_options |= SO_DONTROUTE;
1052 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1053 * of date. We could have recieved a reset packet in an interrupt or
1054 * maybe we slept while doing page faults in uiomove() etc. We could
1055 * probably recheck again inside the locking protection here, but
1056 * there are probably other places that this also happens. We must
1059 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1060 (flags & MSG_OOB) ? PRUS_OOB :
1062 * If the user set MSG_EOF, the protocol understands this flag and
1063 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1065 ((flags & MSG_EOF) &&
1066 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1069 /* If there is more to send set PRUS_MORETOCOME */
1070 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1071 top, addr, control, td);
1074 so->so_options &= ~SO_DONTROUTE;
1083 if (control != NULL)
1089 * Send on a socket. If send must go all at once and message is larger than
1090 * send buffering, then hard error. Lock against other senders. If must go
1091 * all at once and not enough room now, then inform user that this would
1092 * block and do nothing. Otherwise, if nonblocking, send as much as
1093 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1094 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1095 * in mbuf chain must be small enough to send all at once.
1097 * Returns nonzero on error, timeout or signal; callers must check for short
1098 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1102 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1103 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1106 int clen = 0, error, dontroute;
1107 int atomic = sosendallatonce(so) || top;
1110 resid = uio->uio_resid;
1112 resid = top->m_pkthdr.len;
1114 * In theory resid should be unsigned. However, space must be
1115 * signed, as it might be less than 0 if we over-committed, and we
1116 * must use a signed comparison of space and resid. On the other
1117 * hand, a negative resid causes us to loop sending 0-length
1118 * segments to the protocol.
1120 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1121 * type sockets since that's an error.
1123 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1129 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1130 (so->so_proto->pr_flags & PR_ATOMIC);
1132 td->td_ru.ru_msgsnd++;
1133 if (control != NULL)
1134 clen = control->m_len;
1136 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1142 SOCKBUF_LOCK(&so->so_snd);
1143 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1144 SOCKBUF_UNLOCK(&so->so_snd);
1149 error = so->so_error;
1151 SOCKBUF_UNLOCK(&so->so_snd);
1154 if ((so->so_state & SS_ISCONNECTED) == 0) {
1156 * `sendto' and `sendmsg' is allowed on a connection-
1157 * based socket if it supports implied connect.
1158 * Return ENOTCONN if not connected and no address is
1161 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1162 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1163 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1164 !(resid == 0 && clen != 0)) {
1165 SOCKBUF_UNLOCK(&so->so_snd);
1169 } else if (addr == NULL) {
1170 SOCKBUF_UNLOCK(&so->so_snd);
1171 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1174 error = EDESTADDRREQ;
1178 space = sbspace(&so->so_snd);
1179 if (flags & MSG_OOB)
1181 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1182 clen > so->so_snd.sb_hiwat) {
1183 SOCKBUF_UNLOCK(&so->so_snd);
1187 if (space < resid + clen &&
1188 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1189 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1190 SOCKBUF_UNLOCK(&so->so_snd);
1191 error = EWOULDBLOCK;
1194 error = sbwait(&so->so_snd);
1195 SOCKBUF_UNLOCK(&so->so_snd);
1200 SOCKBUF_UNLOCK(&so->so_snd);
1205 if (flags & MSG_EOR)
1206 top->m_flags |= M_EOR;
1208 #ifdef ZERO_COPY_SOCKETS
1209 error = sosend_copyin(uio, &top, atomic,
1215 * Copy the data from userland into a mbuf
1216 * chain. If no data is to be copied in,
1217 * a single empty mbuf is returned.
1219 top = m_uiotombuf(uio, M_WAITOK, space,
1220 (atomic ? max_hdr : 0),
1221 (atomic ? M_PKTHDR : 0) |
1222 ((flags & MSG_EOR) ? M_EOR : 0));
1224 error = EFAULT; /* only possible error */
1227 space -= resid - uio->uio_resid;
1229 resid = uio->uio_resid;
1233 so->so_options |= SO_DONTROUTE;
1237 * XXX all the SBS_CANTSENDMORE checks previously
1238 * done could be out of date. We could have recieved
1239 * a reset packet in an interrupt or maybe we slept
1240 * while doing page faults in uiomove() etc. We
1241 * could probably recheck again inside the locking
1242 * protection here, but there are probably other
1243 * places that this also happens. We must rethink
1246 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1247 (flags & MSG_OOB) ? PRUS_OOB :
1249 * If the user set MSG_EOF, the protocol understands
1250 * this flag and nothing left to send then use
1251 * PRU_SEND_EOF instead of PRU_SEND.
1253 ((flags & MSG_EOF) &&
1254 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1257 /* If there is more to send set PRUS_MORETOCOME. */
1258 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1259 top, addr, control, td);
1262 so->so_options &= ~SO_DONTROUTE;
1270 } while (resid && space > 0);
1274 sbunlock(&so->so_snd);
1278 if (control != NULL)
1284 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1285 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1288 /* XXXRW: Temporary debugging. */
1289 KASSERT(so->so_proto->pr_usrreqs->pru_sosend != sosend,
1290 ("sosend: protocol calls sosend"));
1292 return (so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1293 control, flags, td));
1297 * The part of soreceive() that implements reading non-inline out-of-band
1298 * data from a socket. For more complete comments, see soreceive(), from
1299 * which this code originated.
1301 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1302 * unable to return an mbuf chain to the caller.
1305 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1307 struct protosw *pr = so->so_proto;
1311 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1313 m = m_get(M_WAIT, MT_DATA);
1314 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1318 #ifdef ZERO_COPY_SOCKETS
1319 if (so_zero_copy_receive) {
1322 if ((m->m_flags & M_EXT)
1323 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1328 error = uiomoveco(mtod(m, void *),
1329 min(uio->uio_resid, m->m_len),
1332 #endif /* ZERO_COPY_SOCKETS */
1333 error = uiomove(mtod(m, void *),
1334 (int) min(uio->uio_resid, m->m_len), uio);
1336 } while (uio->uio_resid && error == 0 && m);
1344 * Following replacement or removal of the first mbuf on the first mbuf chain
1345 * of a socket buffer, push necessary state changes back into the socket
1346 * buffer so that other consumers see the values consistently. 'nextrecord'
1347 * is the callers locally stored value of the original value of
1348 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1349 * NOTE: 'nextrecord' may be NULL.
1351 static __inline void
1352 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1355 SOCKBUF_LOCK_ASSERT(sb);
1357 * First, update for the new value of nextrecord. If necessary, make
1358 * it the first record.
1360 if (sb->sb_mb != NULL)
1361 sb->sb_mb->m_nextpkt = nextrecord;
1363 sb->sb_mb = nextrecord;
1366 * Now update any dependent socket buffer fields to reflect the new
1367 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1368 * addition of a second clause that takes care of the case where
1369 * sb_mb has been updated, but remains the last record.
1371 if (sb->sb_mb == NULL) {
1372 sb->sb_mbtail = NULL;
1373 sb->sb_lastrecord = NULL;
1374 } else if (sb->sb_mb->m_nextpkt == NULL)
1375 sb->sb_lastrecord = sb->sb_mb;
1380 * Implement receive operations on a socket. We depend on the way that
1381 * records are added to the sockbuf by sbappend. In particular, each record
1382 * (mbufs linked through m_next) must begin with an address if the protocol
1383 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1384 * data, and then zero or more mbufs of data. In order to allow parallelism
1385 * between network receive and copying to user space, as well as avoid
1386 * sleeping with a mutex held, we release the socket buffer mutex during the
1387 * user space copy. Although the sockbuf is locked, new data may still be
1388 * appended, and thus we must maintain consistency of the sockbuf during that
1391 * The caller may receive the data as a single mbuf chain by supplying an
1392 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1393 * the count in uio_resid.
1396 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1397 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1399 struct mbuf *m, **mp;
1400 int flags, len, error, offset;
1401 struct protosw *pr = so->so_proto;
1402 struct mbuf *nextrecord;
1404 int orig_resid = uio->uio_resid;
1409 if (controlp != NULL)
1412 flags = *flagsp &~ MSG_EOR;
1415 if (flags & MSG_OOB)
1416 return (soreceive_rcvoob(so, uio, flags));
1419 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1421 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1423 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1428 SOCKBUF_LOCK(&so->so_rcv);
1429 m = so->so_rcv.sb_mb;
1431 * If we have less data than requested, block awaiting more (subject
1432 * to any timeout) if:
1433 * 1. the current count is less than the low water mark, or
1434 * 2. MSG_WAITALL is set, and it is possible to do the entire
1435 * receive operation at once if we block (resid <= hiwat).
1436 * 3. MSG_DONTWAIT is not set
1437 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1438 * we have to do the receive in sections, and thus risk returning a
1439 * short count if a timeout or signal occurs after we start.
1441 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1442 so->so_rcv.sb_cc < uio->uio_resid) &&
1443 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1444 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1445 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1446 KASSERT(m != NULL || !so->so_rcv.sb_cc,
1447 ("receive: m == %p so->so_rcv.sb_cc == %u",
1448 m, so->so_rcv.sb_cc));
1452 error = so->so_error;
1453 if ((flags & MSG_PEEK) == 0)
1455 SOCKBUF_UNLOCK(&so->so_rcv);
1458 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1459 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1461 SOCKBUF_UNLOCK(&so->so_rcv);
1466 for (; m != NULL; m = m->m_next)
1467 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1468 m = so->so_rcv.sb_mb;
1471 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1472 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1473 SOCKBUF_UNLOCK(&so->so_rcv);
1477 if (uio->uio_resid == 0) {
1478 SOCKBUF_UNLOCK(&so->so_rcv);
1481 if ((so->so_state & SS_NBIO) ||
1482 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1483 SOCKBUF_UNLOCK(&so->so_rcv);
1484 error = EWOULDBLOCK;
1487 SBLASTRECORDCHK(&so->so_rcv);
1488 SBLASTMBUFCHK(&so->so_rcv);
1489 error = sbwait(&so->so_rcv);
1490 SOCKBUF_UNLOCK(&so->so_rcv);
1497 * From this point onward, we maintain 'nextrecord' as a cache of the
1498 * pointer to the next record in the socket buffer. We must keep the
1499 * various socket buffer pointers and local stack versions of the
1500 * pointers in sync, pushing out modifications before dropping the
1501 * socket buffer mutex, and re-reading them when picking it up.
1503 * Otherwise, we will race with the network stack appending new data
1504 * or records onto the socket buffer by using inconsistent/stale
1505 * versions of the field, possibly resulting in socket buffer
1508 * By holding the high-level sblock(), we prevent simultaneous
1509 * readers from pulling off the front of the socket buffer.
1511 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1513 uio->uio_td->td_ru.ru_msgrcv++;
1514 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1515 SBLASTRECORDCHK(&so->so_rcv);
1516 SBLASTMBUFCHK(&so->so_rcv);
1517 nextrecord = m->m_nextpkt;
1518 if (pr->pr_flags & PR_ADDR) {
1519 KASSERT(m->m_type == MT_SONAME,
1520 ("m->m_type == %d", m->m_type));
1523 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1525 if (flags & MSG_PEEK) {
1528 sbfree(&so->so_rcv, m);
1529 so->so_rcv.sb_mb = m_free(m);
1530 m = so->so_rcv.sb_mb;
1531 sockbuf_pushsync(&so->so_rcv, nextrecord);
1536 * Process one or more MT_CONTROL mbufs present before any data mbufs
1537 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1538 * just copy the data; if !MSG_PEEK, we call into the protocol to
1539 * perform externalization (or freeing if controlp == NULL).
1541 if (m != NULL && m->m_type == MT_CONTROL) {
1542 struct mbuf *cm = NULL, *cmn;
1543 struct mbuf **cme = &cm;
1546 if (flags & MSG_PEEK) {
1547 if (controlp != NULL) {
1548 *controlp = m_copy(m, 0, m->m_len);
1549 controlp = &(*controlp)->m_next;
1553 sbfree(&so->so_rcv, m);
1554 so->so_rcv.sb_mb = m->m_next;
1557 cme = &(*cme)->m_next;
1558 m = so->so_rcv.sb_mb;
1560 } while (m != NULL && m->m_type == MT_CONTROL);
1561 if ((flags & MSG_PEEK) == 0)
1562 sockbuf_pushsync(&so->so_rcv, nextrecord);
1563 while (cm != NULL) {
1566 if (pr->pr_domain->dom_externalize != NULL) {
1567 SOCKBUF_UNLOCK(&so->so_rcv);
1568 error = (*pr->pr_domain->dom_externalize)
1570 SOCKBUF_LOCK(&so->so_rcv);
1571 } else if (controlp != NULL)
1575 if (controlp != NULL) {
1577 while (*controlp != NULL)
1578 controlp = &(*controlp)->m_next;
1583 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1585 nextrecord = so->so_rcv.sb_mb;
1589 if ((flags & MSG_PEEK) == 0) {
1590 KASSERT(m->m_nextpkt == nextrecord,
1591 ("soreceive: post-control, nextrecord !sync"));
1592 if (nextrecord == NULL) {
1593 KASSERT(so->so_rcv.sb_mb == m,
1594 ("soreceive: post-control, sb_mb!=m"));
1595 KASSERT(so->so_rcv.sb_lastrecord == m,
1596 ("soreceive: post-control, lastrecord!=m"));
1600 if (type == MT_OOBDATA)
1603 if ((flags & MSG_PEEK) == 0) {
1604 KASSERT(so->so_rcv.sb_mb == nextrecord,
1605 ("soreceive: sb_mb != nextrecord"));
1606 if (so->so_rcv.sb_mb == NULL) {
1607 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1608 ("soreceive: sb_lastercord != NULL"));
1612 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1613 SBLASTRECORDCHK(&so->so_rcv);
1614 SBLASTMBUFCHK(&so->so_rcv);
1617 * Now continue to read any data mbufs off of the head of the socket
1618 * buffer until the read request is satisfied. Note that 'type' is
1619 * used to store the type of any mbuf reads that have happened so far
1620 * such that soreceive() can stop reading if the type changes, which
1621 * causes soreceive() to return only one of regular data and inline
1622 * out-of-band data in a single socket receive operation.
1626 while (m != NULL && uio->uio_resid > 0 && error == 0) {
1628 * If the type of mbuf has changed since the last mbuf
1629 * examined ('type'), end the receive operation.
1631 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1632 if (m->m_type == MT_OOBDATA) {
1633 if (type != MT_OOBDATA)
1635 } else if (type == MT_OOBDATA)
1638 KASSERT(m->m_type == MT_DATA,
1639 ("m->m_type == %d", m->m_type));
1640 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1641 len = uio->uio_resid;
1642 if (so->so_oobmark && len > so->so_oobmark - offset)
1643 len = so->so_oobmark - offset;
1644 if (len > m->m_len - moff)
1645 len = m->m_len - moff;
1647 * If mp is set, just pass back the mbufs. Otherwise copy
1648 * them out via the uio, then free. Sockbuf must be
1649 * consistent here (points to current mbuf, it points to next
1650 * record) when we drop priority; we must note any additions
1651 * to the sockbuf when we block interrupts again.
1654 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1655 SBLASTRECORDCHK(&so->so_rcv);
1656 SBLASTMBUFCHK(&so->so_rcv);
1657 SOCKBUF_UNLOCK(&so->so_rcv);
1658 #ifdef ZERO_COPY_SOCKETS
1659 if (so_zero_copy_receive) {
1662 if ((m->m_flags & M_EXT)
1663 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1668 error = uiomoveco(mtod(m, char *) + moff,
1672 #endif /* ZERO_COPY_SOCKETS */
1673 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1674 SOCKBUF_LOCK(&so->so_rcv);
1677 * The MT_SONAME mbuf has already been removed
1678 * from the record, so it is necessary to
1679 * remove the data mbufs, if any, to preserve
1680 * the invariant in the case of PR_ADDR that
1681 * requires MT_SONAME mbufs at the head of
1684 if (m && pr->pr_flags & PR_ATOMIC &&
1685 ((flags & MSG_PEEK) == 0))
1686 (void)sbdroprecord_locked(&so->so_rcv);
1687 SOCKBUF_UNLOCK(&so->so_rcv);
1691 uio->uio_resid -= len;
1692 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1693 if (len == m->m_len - moff) {
1694 if (m->m_flags & M_EOR)
1696 if (flags & MSG_PEEK) {
1700 nextrecord = m->m_nextpkt;
1701 sbfree(&so->so_rcv, m);
1705 so->so_rcv.sb_mb = m = m->m_next;
1708 so->so_rcv.sb_mb = m_free(m);
1709 m = so->so_rcv.sb_mb;
1711 sockbuf_pushsync(&so->so_rcv, nextrecord);
1712 SBLASTRECORDCHK(&so->so_rcv);
1713 SBLASTMBUFCHK(&so->so_rcv);
1716 if (flags & MSG_PEEK)
1722 if (flags & MSG_DONTWAIT)
1723 copy_flag = M_DONTWAIT;
1726 if (copy_flag == M_WAIT)
1727 SOCKBUF_UNLOCK(&so->so_rcv);
1728 *mp = m_copym(m, 0, len, copy_flag);
1729 if (copy_flag == M_WAIT)
1730 SOCKBUF_LOCK(&so->so_rcv);
1733 * m_copym() couldn't
1734 * allocate an mbuf. Adjust
1735 * uio_resid back (it was
1736 * adjusted down by len
1737 * bytes, which we didn't end
1738 * up "copying" over).
1740 uio->uio_resid += len;
1746 so->so_rcv.sb_cc -= len;
1749 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1750 if (so->so_oobmark) {
1751 if ((flags & MSG_PEEK) == 0) {
1752 so->so_oobmark -= len;
1753 if (so->so_oobmark == 0) {
1754 so->so_rcv.sb_state |= SBS_RCVATMARK;
1759 if (offset == so->so_oobmark)
1763 if (flags & MSG_EOR)
1766 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1767 * must not quit until "uio->uio_resid == 0" or an error
1768 * termination. If a signal/timeout occurs, return with a
1769 * short count but without error. Keep sockbuf locked
1770 * against other readers.
1772 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1773 !sosendallatonce(so) && nextrecord == NULL) {
1774 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1775 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1778 * Notify the protocol that some data has been
1779 * drained before blocking.
1781 if (pr->pr_flags & PR_WANTRCVD) {
1782 SOCKBUF_UNLOCK(&so->so_rcv);
1783 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1784 SOCKBUF_LOCK(&so->so_rcv);
1786 SBLASTRECORDCHK(&so->so_rcv);
1787 SBLASTMBUFCHK(&so->so_rcv);
1788 error = sbwait(&so->so_rcv);
1790 SOCKBUF_UNLOCK(&so->so_rcv);
1793 m = so->so_rcv.sb_mb;
1795 nextrecord = m->m_nextpkt;
1799 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1800 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1802 if ((flags & MSG_PEEK) == 0)
1803 (void) sbdroprecord_locked(&so->so_rcv);
1805 if ((flags & MSG_PEEK) == 0) {
1808 * First part is an inline SB_EMPTY_FIXUP(). Second
1809 * part makes sure sb_lastrecord is up-to-date if
1810 * there is still data in the socket buffer.
1812 so->so_rcv.sb_mb = nextrecord;
1813 if (so->so_rcv.sb_mb == NULL) {
1814 so->so_rcv.sb_mbtail = NULL;
1815 so->so_rcv.sb_lastrecord = NULL;
1816 } else if (nextrecord->m_nextpkt == NULL)
1817 so->so_rcv.sb_lastrecord = nextrecord;
1819 SBLASTRECORDCHK(&so->so_rcv);
1820 SBLASTMBUFCHK(&so->so_rcv);
1822 * If soreceive() is being done from the socket callback,
1823 * then don't need to generate ACK to peer to update window,
1824 * since ACK will be generated on return to TCP.
1826 if (!(flags & MSG_SOCALLBCK) &&
1827 (pr->pr_flags & PR_WANTRCVD)) {
1828 SOCKBUF_UNLOCK(&so->so_rcv);
1829 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1830 SOCKBUF_LOCK(&so->so_rcv);
1833 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1834 if (orig_resid == uio->uio_resid && orig_resid &&
1835 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1836 SOCKBUF_UNLOCK(&so->so_rcv);
1839 SOCKBUF_UNLOCK(&so->so_rcv);
1844 sbunlock(&so->so_rcv);
1849 * Optimized version of soreceive() for simple datagram cases from userspace;
1850 * this is experimental, and while heavily tested, may contain errors.
1853 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
1854 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1856 struct mbuf *m, *m2;
1857 int flags, len, error, offset;
1858 struct protosw *pr = so->so_proto;
1859 struct mbuf *nextrecord;
1860 int orig_resid = uio->uio_resid;
1864 if (controlp != NULL)
1867 flags = *flagsp &~ MSG_EOR;
1872 * For any complicated cases, fall back to the full
1873 * soreceive_generic().
1875 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
1876 return (soreceive_generic(so, psa, uio, mp0, controlp,
1880 * Enforce restrictions on use.
1882 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
1883 ("soreceive_dgram: wantrcvd"));
1884 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
1885 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
1886 ("soreceive_dgram: SBS_RCVATMARK"));
1887 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
1888 ("soreceive_dgram: P_CONNREQUIRED"));
1891 SOCKBUF_LOCK(&so->so_rcv);
1892 m = so->so_rcv.sb_mb;
1895 * If we have less data than requested, block awaiting more (subject
1896 * to any timeout) if:
1897 * 1. the current count is less than the low water mark, or
1898 * 2. MSG_WAITALL is set, and it is possible to do the entire
1899 * receive operation at once if we block (resid <= hiwat).
1900 * 3. MSG_DONTWAIT is not set
1901 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1902 * we have to do the receive in sections, and thus risk returning a
1903 * short count if a timeout or signal occurs after we start.
1906 KASSERT(m != NULL || !so->so_rcv.sb_cc,
1907 ("receive: m == %p so->so_rcv.sb_cc == %u",
1908 m, so->so_rcv.sb_cc));
1912 error = so->so_error;
1914 SOCKBUF_UNLOCK(&so->so_rcv);
1917 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1918 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1920 SOCKBUF_UNLOCK(&so->so_rcv);
1925 if (uio->uio_resid == 0) {
1926 SOCKBUF_UNLOCK(&so->so_rcv);
1929 if ((so->so_state & SS_NBIO) ||
1930 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1931 SOCKBUF_UNLOCK(&so->so_rcv);
1932 error = EWOULDBLOCK;
1935 SBLASTRECORDCHK(&so->so_rcv);
1936 SBLASTMBUFCHK(&so->so_rcv);
1938 /* XXXRW: sbwait() may not be as happy without sblock(). */
1939 error = sbwait(&so->so_rcv);
1940 SOCKBUF_UNLOCK(&so->so_rcv);
1947 * From this point onward, we maintain 'nextrecord' as a cache of the
1948 * pointer to the next record in the socket buffer. We must keep the
1949 * various socket buffer pointers and local stack versions of the
1950 * pointers in sync, pushing out modifications before dropping the
1951 * socket buffer mutex, and re-reading them when picking it up.
1953 * Otherwise, we will race with the network stack appending new data
1954 * or records onto the socket buffer by using inconsistent/stale
1955 * versions of the field, possibly resulting in socket buffer
1958 * By holding the high-level sblock(), we prevent simultaneous
1959 * readers from pulling off the front of the socket buffer.
1961 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1963 uio->uio_td->td_ru.ru_msgrcv++;
1964 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1965 SBLASTRECORDCHK(&so->so_rcv);
1966 SBLASTMBUFCHK(&so->so_rcv);
1967 nextrecord = m->m_nextpkt;
1968 if (pr->pr_flags & PR_ADDR) {
1969 KASSERT(m->m_type == MT_SONAME,
1970 ("m->m_type == %d", m->m_type));
1973 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1975 sbfree(&so->so_rcv, m);
1976 so->so_rcv.sb_mb = m_free(m);
1977 m = so->so_rcv.sb_mb;
1978 sockbuf_pushsync(&so->so_rcv, nextrecord);
1981 /* XXXRW: Can this happen? */
1982 SOCKBUF_UNLOCK(&so->so_rcv);
1985 KASSERT(m->m_nextpkt == nextrecord,
1986 ("soreceive: post-control, nextrecord !sync"));
1987 if (nextrecord == NULL) {
1988 KASSERT(so->so_rcv.sb_mb == m,
1989 ("soreceive: post-control, sb_mb!=m"));
1990 KASSERT(so->so_rcv.sb_lastrecord == m,
1991 ("soreceive: post-control, lastrecord!=m"));
1994 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1995 SBLASTRECORDCHK(&so->so_rcv);
1996 SBLASTMBUFCHK(&so->so_rcv);
1997 KASSERT(m == so->so_rcv.sb_mb, ("soreceive_dgram: m not sb_mb"));
1998 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
1999 ("soreceive_dgram: m_nextpkt != nextrecord"));
2002 * Pull 'm' and its chain off the front of the packet queue.
2004 so->so_rcv.sb_mb = NULL;
2005 sockbuf_pushsync(&so->so_rcv, nextrecord);
2008 * Walk 'm's chain and free that many bytes from the socket buffer.
2010 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2011 sbfree(&so->so_rcv, m2);
2014 * Do a few last checks before we let go of the lock.
2016 SBLASTRECORDCHK(&so->so_rcv);
2017 SBLASTMBUFCHK(&so->so_rcv);
2018 SOCKBUF_UNLOCK(&so->so_rcv);
2021 * Packet to copyout() is now in 'm' and it is disconnected from the
2024 * Process one or more MT_CONTROL mbufs present before any data mbufs
2025 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
2026 * just copy the data; if !MSG_PEEK, we call into the protocol to
2027 * perform externalization (or freeing if controlp == NULL).
2029 if (m->m_type == MT_CONTROL) {
2030 struct mbuf *cm = NULL, *cmn;
2031 struct mbuf **cme = &cm;
2037 cme = &(*cme)->m_next;
2039 } while (m != NULL && m->m_type == MT_CONTROL);
2040 while (cm != NULL) {
2043 if (pr->pr_domain->dom_externalize != NULL) {
2044 error = (*pr->pr_domain->dom_externalize)
2046 } else if (controlp != NULL)
2050 if (controlp != NULL) {
2052 while (*controlp != NULL)
2053 controlp = &(*controlp)->m_next;
2057 orig_resid = 0; /* XXXRW: why this? */
2060 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data"));
2063 while (m != NULL && uio->uio_resid > 0) {
2064 len = uio->uio_resid;
2067 error = uiomove(mtod(m, char *), (int)len, uio);
2074 if (m != NULL && pr->pr_flags & PR_ATOMIC)
2083 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2084 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2087 /* XXXRW: Temporary debugging. */
2088 KASSERT(so->so_proto->pr_usrreqs->pru_soreceive != soreceive,
2089 ("soreceive: protocol calls soreceive"));
2091 return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2096 soshutdown(struct socket *so, int how)
2098 struct protosw *pr = so->so_proto;
2100 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2102 if (pr->pr_usrreqs->pru_flush != NULL) {
2103 (*pr->pr_usrreqs->pru_flush)(so, how);
2108 return ((*pr->pr_usrreqs->pru_shutdown)(so));
2113 sorflush(struct socket *so)
2115 struct sockbuf *sb = &so->so_rcv;
2116 struct protosw *pr = so->so_proto;
2120 * In order to avoid calling dom_dispose with the socket buffer mutex
2121 * held, and in order to generally avoid holding the lock for a long
2122 * time, we make a copy of the socket buffer and clear the original
2123 * (except locks, state). The new socket buffer copy won't have
2124 * initialized locks so we can only call routines that won't use or
2125 * assert those locks.
2127 * Dislodge threads currently blocked in receive and wait to acquire
2128 * a lock against other simultaneous readers before clearing the
2129 * socket buffer. Don't let our acquire be interrupted by a signal
2130 * despite any existing socket disposition on interruptable waiting.
2133 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2136 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2137 * and mutex data unchanged.
2140 bzero(&asb, offsetof(struct sockbuf, sb_startzero));
2141 bcopy(&sb->sb_startzero, &asb.sb_startzero,
2142 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2143 bzero(&sb->sb_startzero,
2144 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2149 * Dispose of special rights and flush the socket buffer. Don't call
2150 * any unsafe routines (that rely on locks being initialized) on asb.
2152 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2153 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
2154 sbrelease_internal(&asb, so);
2158 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2159 * additional variant to handle the case where the option value needs to be
2160 * some kind of integer, but not a specific size. In addition to their use
2161 * here, these functions are also called by the protocol-level pr_ctloutput()
2165 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2170 * If the user gives us more than we wanted, we ignore it, but if we
2171 * don't get the minimum length the caller wants, we return EINVAL.
2172 * On success, sopt->sopt_valsize is set to however much we actually
2175 if ((valsize = sopt->sopt_valsize) < minlen)
2178 sopt->sopt_valsize = valsize = len;
2180 if (sopt->sopt_td != NULL)
2181 return (copyin(sopt->sopt_val, buf, valsize));
2183 bcopy(sopt->sopt_val, buf, valsize);
2188 * Kernel version of setsockopt(2).
2190 * XXX: optlen is size_t, not socklen_t
2193 so_setsockopt(struct socket *so, int level, int optname, void *optval,
2196 struct sockopt sopt;
2198 sopt.sopt_level = level;
2199 sopt.sopt_name = optname;
2200 sopt.sopt_dir = SOPT_SET;
2201 sopt.sopt_val = optval;
2202 sopt.sopt_valsize = optlen;
2203 sopt.sopt_td = NULL;
2204 return (sosetopt(so, &sopt));
2208 sosetopt(struct socket *so, struct sockopt *sopt)
2219 if (sopt->sopt_level != SOL_SOCKET) {
2220 if (so->so_proto && so->so_proto->pr_ctloutput)
2221 return ((*so->so_proto->pr_ctloutput)
2223 error = ENOPROTOOPT;
2225 switch (sopt->sopt_name) {
2227 case SO_ACCEPTFILTER:
2228 error = do_setopt_accept_filter(so, sopt);
2234 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2239 so->so_linger = l.l_linger;
2241 so->so_options |= SO_LINGER;
2243 so->so_options &= ~SO_LINGER;
2250 case SO_USELOOPBACK:
2258 error = sooptcopyin(sopt, &optval, sizeof optval,
2264 so->so_options |= sopt->sopt_name;
2266 so->so_options &= ~sopt->sopt_name;
2271 error = sooptcopyin(sopt, &optval, sizeof optval,
2273 if (optval < 1 || optval > rt_numfibs) {
2277 if ((so->so_proto->pr_domain->dom_family == PF_INET) ||
2278 (so->so_proto->pr_domain->dom_family == PF_ROUTE)) {
2279 so->so_fibnum = optval;
2288 error = sooptcopyin(sopt, &optval, sizeof optval,
2294 * Values < 1 make no sense for any of these options,
2302 switch (sopt->sopt_name) {
2305 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2306 &so->so_snd : &so->so_rcv, (u_long)optval,
2307 so, curthread) == 0) {
2311 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2312 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2316 * Make sure the low-water is never greater than the
2320 SOCKBUF_LOCK(&so->so_snd);
2321 so->so_snd.sb_lowat =
2322 (optval > so->so_snd.sb_hiwat) ?
2323 so->so_snd.sb_hiwat : optval;
2324 SOCKBUF_UNLOCK(&so->so_snd);
2327 SOCKBUF_LOCK(&so->so_rcv);
2328 so->so_rcv.sb_lowat =
2329 (optval > so->so_rcv.sb_hiwat) ?
2330 so->so_rcv.sb_hiwat : optval;
2331 SOCKBUF_UNLOCK(&so->so_rcv);
2339 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2340 struct timeval32 tv32;
2342 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2344 CP(tv32, tv, tv_sec);
2345 CP(tv32, tv, tv_usec);
2348 error = sooptcopyin(sopt, &tv, sizeof tv,
2353 /* assert(hz > 0); */
2354 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
2355 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2359 /* assert(tick > 0); */
2360 /* assert(ULONG_MAX - INT_MAX >= 1000000); */
2361 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
2362 if (val > INT_MAX) {
2366 if (val == 0 && tv.tv_usec != 0)
2369 switch (sopt->sopt_name) {
2371 so->so_snd.sb_timeo = val;
2374 so->so_rcv.sb_timeo = val;
2381 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2385 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2393 error = ENOPROTOOPT;
2396 if (error == 0 && so->so_proto != NULL &&
2397 so->so_proto->pr_ctloutput != NULL) {
2398 (void) ((*so->so_proto->pr_ctloutput)
2407 * Helper routine for getsockopt.
2410 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2418 * Documented get behavior is that we always return a value, possibly
2419 * truncated to fit in the user's buffer. Traditional behavior is
2420 * that we always tell the user precisely how much we copied, rather
2421 * than something useful like the total amount we had available for
2422 * her. Note that this interface is not idempotent; the entire
2423 * answer must generated ahead of time.
2425 valsize = min(len, sopt->sopt_valsize);
2426 sopt->sopt_valsize = valsize;
2427 if (sopt->sopt_val != NULL) {
2428 if (sopt->sopt_td != NULL)
2429 error = copyout(buf, sopt->sopt_val, valsize);
2431 bcopy(buf, sopt->sopt_val, valsize);
2437 sogetopt(struct socket *so, struct sockopt *sopt)
2447 if (sopt->sopt_level != SOL_SOCKET) {
2448 if (so->so_proto && so->so_proto->pr_ctloutput) {
2449 return ((*so->so_proto->pr_ctloutput)
2452 return (ENOPROTOOPT);
2454 switch (sopt->sopt_name) {
2456 case SO_ACCEPTFILTER:
2457 error = do_getopt_accept_filter(so, sopt);
2462 l.l_onoff = so->so_options & SO_LINGER;
2463 l.l_linger = so->so_linger;
2465 error = sooptcopyout(sopt, &l, sizeof l);
2468 case SO_USELOOPBACK:
2480 optval = so->so_options & sopt->sopt_name;
2482 error = sooptcopyout(sopt, &optval, sizeof optval);
2486 optval = so->so_type;
2491 optval = so->so_error;
2497 optval = so->so_snd.sb_hiwat;
2501 optval = so->so_rcv.sb_hiwat;
2505 optval = so->so_snd.sb_lowat;
2509 optval = so->so_rcv.sb_lowat;
2514 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2515 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2517 tv.tv_sec = optval / hz;
2518 tv.tv_usec = (optval % hz) * tick;
2520 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) {
2521 struct timeval32 tv32;
2523 CP(tv, tv32, tv_sec);
2524 CP(tv, tv32, tv_usec);
2525 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2528 error = sooptcopyout(sopt, &tv, sizeof tv);
2533 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2537 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2541 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2549 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2553 error = mac_getsockopt_peerlabel(
2554 sopt->sopt_td->td_ucred, so, &extmac);
2557 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2563 case SO_LISTENQLIMIT:
2564 optval = so->so_qlimit;
2568 optval = so->so_qlen;
2571 case SO_LISTENINCQLEN:
2572 optval = so->so_incqlen;
2576 error = ENOPROTOOPT;
2583 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2585 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2587 struct mbuf *m, *m_prev;
2588 int sopt_size = sopt->sopt_valsize;
2590 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2593 if (sopt_size > MLEN) {
2594 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT);
2595 if ((m->m_flags & M_EXT) == 0) {
2599 m->m_len = min(MCLBYTES, sopt_size);
2601 m->m_len = min(MLEN, sopt_size);
2603 sopt_size -= m->m_len;
2608 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
2613 if (sopt_size > MLEN) {
2614 MCLGET(m, sopt->sopt_td != NULL ? M_WAIT :
2616 if ((m->m_flags & M_EXT) == 0) {
2621 m->m_len = min(MCLBYTES, sopt_size);
2623 m->m_len = min(MLEN, sopt_size);
2625 sopt_size -= m->m_len;
2632 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2634 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2636 struct mbuf *m0 = m;
2638 if (sopt->sopt_val == NULL)
2640 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2641 if (sopt->sopt_td != NULL) {
2644 error = copyin(sopt->sopt_val, mtod(m, char *),
2651 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2652 sopt->sopt_valsize -= m->m_len;
2653 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2656 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2657 panic("ip6_sooptmcopyin");
2661 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2663 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2665 struct mbuf *m0 = m;
2668 if (sopt->sopt_val == NULL)
2670 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2671 if (sopt->sopt_td != NULL) {
2674 error = copyout(mtod(m, char *), sopt->sopt_val,
2681 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2682 sopt->sopt_valsize -= m->m_len;
2683 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2684 valsize += m->m_len;
2688 /* enough soopt buffer should be given from user-land */
2692 sopt->sopt_valsize = valsize;
2697 * sohasoutofband(): protocol notifies socket layer of the arrival of new
2698 * out-of-band data, which will then notify socket consumers.
2701 sohasoutofband(struct socket *so)
2704 if (so->so_sigio != NULL)
2705 pgsigio(&so->so_sigio, SIGURG, 0);
2706 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2710 sopoll(struct socket *so, int events, struct ucred *active_cred,
2714 /* XXXRW: Temporary debugging. */
2715 KASSERT(so->so_proto->pr_usrreqs->pru_sopoll != sopoll,
2716 ("sopoll: protocol calls sopoll"));
2718 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
2723 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
2728 SOCKBUF_LOCK(&so->so_snd);
2729 SOCKBUF_LOCK(&so->so_rcv);
2730 if (events & (POLLIN | POLLRDNORM))
2732 revents |= events & (POLLIN | POLLRDNORM);
2734 if (events & POLLINIGNEOF)
2735 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2736 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
2737 revents |= POLLINIGNEOF;
2739 if (events & (POLLOUT | POLLWRNORM))
2740 if (sowriteable(so))
2741 revents |= events & (POLLOUT | POLLWRNORM);
2743 if (events & (POLLPRI | POLLRDBAND))
2744 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2745 revents |= events & (POLLPRI | POLLRDBAND);
2749 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
2751 selrecord(td, &so->so_rcv.sb_sel);
2752 so->so_rcv.sb_flags |= SB_SEL;
2755 if (events & (POLLOUT | POLLWRNORM)) {
2756 selrecord(td, &so->so_snd.sb_sel);
2757 so->so_snd.sb_flags |= SB_SEL;
2761 SOCKBUF_UNLOCK(&so->so_rcv);
2762 SOCKBUF_UNLOCK(&so->so_snd);
2767 soo_kqfilter(struct file *fp, struct knote *kn)
2769 struct socket *so = kn->kn_fp->f_data;
2772 switch (kn->kn_filter) {
2774 if (so->so_options & SO_ACCEPTCONN)
2775 kn->kn_fop = &solisten_filtops;
2777 kn->kn_fop = &soread_filtops;
2781 kn->kn_fop = &sowrite_filtops;
2789 knlist_add(&sb->sb_sel.si_note, kn, 1);
2790 sb->sb_flags |= SB_KNOTE;
2796 * Some routines that return EOPNOTSUPP for entry points that are not
2797 * supported by a protocol. Fill in as needed.
2800 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
2807 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
2814 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2821 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
2828 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
2835 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
2836 struct ifnet *ifp, struct thread *td)
2843 pru_disconnect_notsupp(struct socket *so)
2850 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
2857 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
2864 pru_rcvd_notsupp(struct socket *so, int flags)
2871 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
2878 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
2879 struct sockaddr *addr, struct mbuf *control, struct thread *td)
2886 * This isn't really a ``null'' operation, but it's the default one and
2887 * doesn't do anything destructive.
2890 pru_sense_null(struct socket *so, struct stat *sb)
2893 sb->st_blksize = so->so_snd.sb_hiwat;
2898 pru_shutdown_notsupp(struct socket *so)
2905 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
2912 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
2913 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
2920 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
2921 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2928 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
2936 filt_sordetach(struct knote *kn)
2938 struct socket *so = kn->kn_fp->f_data;
2940 SOCKBUF_LOCK(&so->so_rcv);
2941 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
2942 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
2943 so->so_rcv.sb_flags &= ~SB_KNOTE;
2944 SOCKBUF_UNLOCK(&so->so_rcv);
2949 filt_soread(struct knote *kn, long hint)
2953 so = kn->kn_fp->f_data;
2954 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2956 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
2957 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2958 kn->kn_flags |= EV_EOF;
2959 kn->kn_fflags = so->so_error;
2961 } else if (so->so_error) /* temporary udp error */
2963 else if (kn->kn_sfflags & NOTE_LOWAT)
2964 return (kn->kn_data >= kn->kn_sdata);
2966 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
2970 filt_sowdetach(struct knote *kn)
2972 struct socket *so = kn->kn_fp->f_data;
2974 SOCKBUF_LOCK(&so->so_snd);
2975 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
2976 if (knlist_empty(&so->so_snd.sb_sel.si_note))
2977 so->so_snd.sb_flags &= ~SB_KNOTE;
2978 SOCKBUF_UNLOCK(&so->so_snd);
2983 filt_sowrite(struct knote *kn, long hint)
2987 so = kn->kn_fp->f_data;
2988 SOCKBUF_LOCK_ASSERT(&so->so_snd);
2989 kn->kn_data = sbspace(&so->so_snd);
2990 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2991 kn->kn_flags |= EV_EOF;
2992 kn->kn_fflags = so->so_error;
2994 } else if (so->so_error) /* temporary udp error */
2996 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2997 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2999 else if (kn->kn_sfflags & NOTE_LOWAT)
3000 return (kn->kn_data >= kn->kn_sdata);
3002 return (kn->kn_data >= so->so_snd.sb_lowat);
3007 filt_solisten(struct knote *kn, long hint)
3009 struct socket *so = kn->kn_fp->f_data;
3011 kn->kn_data = so->so_qlen;
3012 return (! TAILQ_EMPTY(&so->so_comp));
3016 socheckuid(struct socket *so, uid_t uid)
3021 if (so->so_cred->cr_uid != uid)
3027 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
3033 error = sysctl_handle_int(oidp, &val, 0, req);
3034 if (error || !req->newptr )
3037 if (val < 1 || val > USHRT_MAX)
3045 * These functions are used by protocols to notify the socket layer (and its
3046 * consumers) of state changes in the sockets driven by protocol-side events.
3050 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3052 * Normal sequence from the active (originating) side is that
3053 * soisconnecting() is called during processing of connect() call, resulting
3054 * in an eventual call to soisconnected() if/when the connection is
3055 * established. When the connection is torn down soisdisconnecting() is
3056 * called during processing of disconnect() call, and soisdisconnected() is
3057 * called when the connection to the peer is totally severed. The semantics
3058 * of these routines are such that connectionless protocols can call
3059 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3060 * calls when setting up a ``connection'' takes no time.
3062 * From the passive side, a socket is created with two queues of sockets:
3063 * so_incomp for connections in progress and so_comp for connections already
3064 * made and awaiting user acceptance. As a protocol is preparing incoming
3065 * connections, it creates a socket structure queued on so_incomp by calling
3066 * sonewconn(). When the connection is established, soisconnected() is
3067 * called, and transfers the socket structure to so_comp, making it available
3070 * If a socket is closed with sockets on either so_incomp or so_comp, these
3071 * sockets are dropped.
3073 * If higher-level protocols are implemented in the kernel, the wakeups done
3074 * here will sometimes cause software-interrupt process scheduling.
3077 soisconnecting(struct socket *so)
3081 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3082 so->so_state |= SS_ISCONNECTING;
3087 soisconnected(struct socket *so)
3089 struct socket *head;
3093 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3094 so->so_state |= SS_ISCONNECTED;
3096 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3097 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3099 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3101 so->so_qstate &= ~SQ_INCOMP;
3102 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3104 so->so_qstate |= SQ_COMP;
3107 wakeup_one(&head->so_timeo);
3111 head->so_accf->so_accept_filter->accf_callback;
3112 so->so_upcallarg = head->so_accf->so_accept_filter_arg;
3113 so->so_rcv.sb_flags |= SB_UPCALL;
3114 so->so_options &= ~SO_ACCEPTFILTER;
3116 so->so_upcall(so, so->so_upcallarg, M_DONTWAIT);
3122 wakeup(&so->so_timeo);
3128 soisdisconnecting(struct socket *so)
3132 * Note: This code assumes that SOCK_LOCK(so) and
3133 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3135 SOCKBUF_LOCK(&so->so_rcv);
3136 so->so_state &= ~SS_ISCONNECTING;
3137 so->so_state |= SS_ISDISCONNECTING;
3138 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3139 sorwakeup_locked(so);
3140 SOCKBUF_LOCK(&so->so_snd);
3141 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3142 sowwakeup_locked(so);
3143 wakeup(&so->so_timeo);
3147 soisdisconnected(struct socket *so)
3151 * Note: This code assumes that SOCK_LOCK(so) and
3152 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3154 SOCKBUF_LOCK(&so->so_rcv);
3155 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3156 so->so_state |= SS_ISDISCONNECTED;
3157 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
3158 sorwakeup_locked(so);
3159 SOCKBUF_LOCK(&so->so_snd);
3160 so->so_snd.sb_state |= SBS_CANTSENDMORE;
3161 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
3162 sowwakeup_locked(so);
3163 wakeup(&so->so_timeo);
3167 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3170 sodupsockaddr(const struct sockaddr *sa, int mflags)
3172 struct sockaddr *sa2;
3174 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3176 bcopy(sa, sa2, sa->sa_len);
3181 * Create an external-format (``xsocket'') structure using the information in
3182 * the kernel-format socket structure pointed to by so. This is done to
3183 * reduce the spew of irrelevant information over this interface, to isolate
3184 * user code from changes in the kernel structure, and potentially to provide
3185 * information-hiding if we decide that some of this information should be
3186 * hidden from users.
3189 sotoxsocket(struct socket *so, struct xsocket *xso)
3192 xso->xso_len = sizeof *xso;
3194 xso->so_type = so->so_type;
3195 xso->so_options = so->so_options;
3196 xso->so_linger = so->so_linger;
3197 xso->so_state = so->so_state;
3198 xso->so_pcb = so->so_pcb;
3199 xso->xso_protocol = so->so_proto->pr_protocol;
3200 xso->xso_family = so->so_proto->pr_domain->dom_family;
3201 xso->so_qlen = so->so_qlen;
3202 xso->so_incqlen = so->so_incqlen;
3203 xso->so_qlimit = so->so_qlimit;
3204 xso->so_timeo = so->so_timeo;
3205 xso->so_error = so->so_error;
3206 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3207 xso->so_oobmark = so->so_oobmark;
3208 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3209 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3210 xso->so_uid = so->so_cred->cr_uid;