2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All Rights Reserved.
6 * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved.
7 * Copyright (c) 2018 Matthew Macy
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
37 * UNIX Domain (Local) Sockets
39 * This is an implementation of UNIX (local) domain sockets. Each socket has
40 * an associated struct unpcb (UNIX protocol control block). Stream sockets
41 * may be connected to 0 or 1 other socket. Datagram sockets may be
42 * connected to 0, 1, or many other sockets. Sockets may be created and
43 * connected in pairs (socketpair(2)), or bound/connected to using the file
44 * system name space. For most purposes, only the receive socket buffer is
45 * used, as sending on one socket delivers directly to the receive socket
46 * buffer of a second socket.
48 * The implementation is substantially complicated by the fact that
49 * "ancillary data", such as file descriptors or credentials, may be passed
50 * across UNIX domain sockets. The potential for passing UNIX domain sockets
51 * over other UNIX domain sockets requires the implementation of a simple
52 * garbage collector to find and tear down cycles of disconnected sockets.
56 * rethink name space problems
57 * need a proper out-of-band
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
65 #include <sys/param.h>
66 #include <sys/capsicum.h>
67 #include <sys/domain.h>
68 #include <sys/eventhandler.h>
69 #include <sys/fcntl.h>
71 #include <sys/filedesc.h>
72 #include <sys/kernel.h>
74 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/mutex.h>
78 #include <sys/namei.h>
80 #include <sys/protosw.h>
81 #include <sys/queue.h>
82 #include <sys/resourcevar.h>
83 #include <sys/rwlock.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/signalvar.h>
89 #include <sys/sysctl.h>
90 #include <sys/systm.h>
91 #include <sys/taskqueue.h>
93 #include <sys/unpcb.h>
94 #include <sys/vnode.h>
102 #include <security/mac/mac_framework.h>
106 MALLOC_DECLARE(M_FILECAPS);
109 * See unpcb.h for the locking key.
112 static uma_zone_t unp_zone;
113 static unp_gen_t unp_gencnt; /* (l) */
114 static u_int unp_count; /* (l) Count of local sockets. */
115 static ino_t unp_ino; /* Prototype for fake inode numbers. */
116 static int unp_rights; /* (g) File descriptors in flight. */
117 static struct unp_head unp_shead; /* (l) List of stream sockets. */
118 static struct unp_head unp_dhead; /* (l) List of datagram sockets. */
119 static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */
122 SLIST_ENTRY(unp_defer) ud_link;
125 static SLIST_HEAD(, unp_defer) unp_defers;
126 static int unp_defers_count;
128 static const struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL };
131 * Garbage collection of cyclic file descriptor/socket references occurs
132 * asynchronously in a taskqueue context in order to avoid recursion and
133 * reentrance in the UNIX domain socket, file descriptor, and socket layer
134 * code. See unp_gc() for a full description.
136 static struct timeout_task unp_gc_task;
139 * The close of unix domain sockets attached as SCM_RIGHTS is
140 * postponed to the taskqueue, to avoid arbitrary recursion depth.
141 * The attached sockets might have another sockets attached.
143 static struct task unp_defer_task;
146 * Both send and receive buffers are allocated PIPSIZ bytes of buffering for
147 * stream sockets, although the total for sender and receiver is actually
150 * Datagram sockets really use the sendspace as the maximum datagram size,
151 * and don't really want to reserve the sendspace. Their recvspace should be
152 * large enough for at least one max-size datagram plus address.
157 static u_long unpst_sendspace = PIPSIZ;
158 static u_long unpst_recvspace = PIPSIZ;
159 static u_long unpdg_maxdgram = 2*1024;
160 static u_long unpdg_recvspace = 16*1024; /* support 8KB syslog msgs */
161 static u_long unpsp_sendspace = PIPSIZ; /* really max datagram size */
162 static u_long unpsp_recvspace = PIPSIZ;
164 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
166 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream,
167 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
169 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram,
170 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
172 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket,
173 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
176 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
177 &unpst_sendspace, 0, "Default stream send space.");
178 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
179 &unpst_recvspace, 0, "Default stream receive space.");
180 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
181 &unpdg_maxdgram, 0, "Maximum datagram size.");
182 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
183 &unpdg_recvspace, 0, "Default datagram receive space.");
184 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW,
185 &unpsp_sendspace, 0, "Default seqpacket send space.");
186 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW,
187 &unpsp_recvspace, 0, "Default seqpacket receive space.");
188 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
189 "File descriptors in flight.");
190 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
191 &unp_defers_count, 0,
192 "File descriptors deferred to taskqueue for close.");
195 * Locking and synchronization:
197 * Several types of locks exist in the local domain socket implementation:
198 * - a global linkage lock
199 * - a global connection list lock
201 * - per-unpcb mutexes
203 * The linkage lock protects the global socket lists, the generation number
204 * counter and garbage collector state.
206 * The connection list lock protects the list of referring sockets in a datagram
207 * socket PCB. This lock is also overloaded to protect a global list of
208 * sockets whose buffers contain socket references in the form of SCM_RIGHTS
209 * messages. To avoid recursion, such references are released by a dedicated
212 * The mtxpool lock protects the vnode from being modified while referenced.
213 * Lock ordering rules require that it be acquired before any PCB locks.
215 * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the
216 * unpcb. This includes the unp_conn field, which either links two connected
217 * PCBs together (for connected socket types) or points at the destination
218 * socket (for connectionless socket types). The operations of creating or
219 * destroying a connection therefore involve locking multiple PCBs. To avoid
220 * lock order reversals, in some cases this involves dropping a PCB lock and
221 * using a reference counter to maintain liveness.
223 * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
224 * allocated in pru_attach() and freed in pru_detach(). The validity of that
225 * pointer is an invariant, so no lock is required to dereference the so_pcb
226 * pointer if a valid socket reference is held by the caller. In practice,
227 * this is always true during operations performed on a socket. Each unpcb
228 * has a back-pointer to its socket, unp_socket, which will be stable under
229 * the same circumstances.
231 * This pointer may only be safely dereferenced as long as a valid reference
232 * to the unpcb is held. Typically, this reference will be from the socket,
233 * or from another unpcb when the referring unpcb's lock is held (in order
234 * that the reference not be invalidated during use). For example, to follow
235 * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee
236 * that detach is not run clearing unp_socket.
238 * Blocking with UNIX domain sockets is a tricky issue: unlike most network
239 * protocols, bind() is a non-atomic operation, and connect() requires
240 * potential sleeping in the protocol, due to potentially waiting on local or
241 * distributed file systems. We try to separate "lookup" operations, which
242 * may sleep, and the IPC operations themselves, which typically can occur
243 * with relative atomicity as locks can be held over the entire operation.
245 * Another tricky issue is simultaneous multi-threaded or multi-process
246 * access to a single UNIX domain socket. These are handled by the flags
247 * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or
248 * binding, both of which involve dropping UNIX domain socket locks in order
249 * to perform namei() and other file system operations.
251 static struct rwlock unp_link_rwlock;
252 static struct mtx unp_defers_lock;
254 #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \
257 #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \
259 #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \
262 #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock)
263 #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock)
264 #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock)
265 #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock)
266 #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \
268 #define UNP_LINK_WOWNED() rw_wowned(&unp_link_rwlock)
270 #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \
271 "unp_defer", NULL, MTX_DEF)
272 #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock)
273 #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock)
275 #define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK();
276 #define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK();
278 #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \
281 #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx)
282 #define UNP_PCB_LOCKPTR(unp) (&(unp)->unp_mtx)
283 #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx)
284 #define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx)
285 #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx)
286 #define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx)
287 #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED)
288 #define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED)
290 static int uipc_connect2(struct socket *, struct socket *);
291 static int uipc_ctloutput(struct socket *, struct sockopt *);
292 static int unp_connect(struct socket *, struct sockaddr *,
294 static int unp_connectat(int, struct socket *, struct sockaddr *,
296 static void unp_connect2(struct socket *so, struct socket *so2, int);
297 static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2);
298 static void unp_dispose(struct socket *so);
299 static void unp_shutdown(struct unpcb *);
300 static void unp_drop(struct unpcb *);
301 static void unp_gc(__unused void *, int);
302 static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int));
303 static void unp_discard(struct file *);
304 static void unp_freerights(struct filedescent **, int);
305 static int unp_internalize(struct mbuf **, struct thread *);
306 static void unp_internalize_fp(struct file *);
307 static int unp_externalize(struct mbuf *, struct mbuf **, int);
308 static int unp_externalize_fp(struct file *);
309 static struct mbuf *unp_addsockcred(struct thread *, struct mbuf *, int);
310 static void unp_process_defers(void * __unused, int);
313 unp_pcb_hold(struct unpcb *unp)
317 old = refcount_acquire(&unp->unp_refcount);
318 KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp));
321 static __result_use_check bool
322 unp_pcb_rele(struct unpcb *unp)
326 UNP_PCB_LOCK_ASSERT(unp);
328 if ((ret = refcount_release(&unp->unp_refcount))) {
330 UNP_PCB_LOCK_DESTROY(unp);
331 uma_zfree(unp_zone, unp);
337 unp_pcb_rele_notlast(struct unpcb *unp)
341 ret = refcount_release(&unp->unp_refcount);
342 KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp));
346 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2)
348 UNP_PCB_UNLOCK_ASSERT(unp);
349 UNP_PCB_UNLOCK_ASSERT(unp2);
353 } else if ((uintptr_t)unp2 > (uintptr_t)unp) {
363 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2)
367 UNP_PCB_UNLOCK(unp2);
371 * Try to lock the connected peer of an already locked socket. In some cases
372 * this requires that we unlock the current socket. The pairbusy counter is
373 * used to block concurrent connection attempts while the lock is dropped. The
374 * caller must be careful to revalidate PCB state.
376 static struct unpcb *
377 unp_pcb_lock_peer(struct unpcb *unp)
381 UNP_PCB_LOCK_ASSERT(unp);
382 unp2 = unp->unp_conn;
385 if (__predict_false(unp == unp2))
388 UNP_PCB_UNLOCK_ASSERT(unp2);
390 if (__predict_true(UNP_PCB_TRYLOCK(unp2)))
392 if ((uintptr_t)unp2 > (uintptr_t)unp) {
402 KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL,
403 ("%s: socket %p was reconnected", __func__, unp));
404 if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) {
405 unp->unp_flags &= ~UNP_WAITING;
408 if (unp_pcb_rele(unp2)) {
409 /* unp2 is unlocked. */
412 if (unp->unp_conn == NULL) {
413 UNP_PCB_UNLOCK(unp2);
420 * Definitions of protocols supported in the LOCAL domain.
422 static struct domain localdomain;
423 static struct pr_usrreqs uipc_usrreqs_dgram, uipc_usrreqs_stream;
424 static struct pr_usrreqs uipc_usrreqs_seqpacket;
425 static struct protosw localsw[] = {
427 .pr_type = SOCK_STREAM,
428 .pr_domain = &localdomain,
429 .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS|
431 .pr_ctloutput = &uipc_ctloutput,
432 .pr_usrreqs = &uipc_usrreqs_stream
435 .pr_type = SOCK_DGRAM,
436 .pr_domain = &localdomain,
437 .pr_flags = PR_ATOMIC|PR_ADDR|PR_RIGHTS|PR_CAPATTACH,
438 .pr_ctloutput = &uipc_ctloutput,
439 .pr_usrreqs = &uipc_usrreqs_dgram
442 .pr_type = SOCK_SEQPACKET,
443 .pr_domain = &localdomain,
446 * XXXRW: For now, PR_ADDR because soreceive will bump into them
447 * due to our use of sbappendaddr. A new sbappend variants is needed
448 * that supports both atomic record writes and control data.
450 .pr_flags = PR_ADDR|PR_ATOMIC|PR_CONNREQUIRED|
451 PR_WANTRCVD|PR_RIGHTS|PR_CAPATTACH,
452 .pr_ctloutput = &uipc_ctloutput,
453 .pr_usrreqs = &uipc_usrreqs_seqpacket,
457 static struct domain localdomain = {
458 .dom_family = AF_LOCAL,
460 .dom_externalize = unp_externalize,
461 .dom_dispose = unp_dispose,
462 .dom_protosw = localsw,
463 .dom_protoswNPROTOSW = &localsw[nitems(localsw)]
468 uipc_abort(struct socket *so)
470 struct unpcb *unp, *unp2;
473 KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
474 UNP_PCB_UNLOCK_ASSERT(unp);
477 unp2 = unp->unp_conn;
487 uipc_accept(struct socket *so, struct sockaddr **nam)
489 struct unpcb *unp, *unp2;
490 const struct sockaddr *sa;
493 * Pass back name of connected socket, if it was bound and we are
494 * still connected (our peer may have closed already!).
497 KASSERT(unp != NULL, ("uipc_accept: unp == NULL"));
499 *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
501 unp2 = unp_pcb_lock_peer(unp);
502 if (unp2 != NULL && unp2->unp_addr != NULL)
503 sa = (struct sockaddr *)unp2->unp_addr;
506 bcopy(sa, *nam, sa->sa_len);
508 unp_pcb_unlock_pair(unp, unp2);
515 uipc_attach(struct socket *so, int proto, struct thread *td)
517 u_long sendspace, recvspace;
522 KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL"));
523 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
524 switch (so->so_type) {
526 sendspace = unpst_sendspace;
527 recvspace = unpst_recvspace;
531 sendspace = unpdg_maxdgram;
532 recvspace = unpdg_recvspace;
536 sendspace = unpsp_sendspace;
537 recvspace = unpsp_recvspace;
541 panic("uipc_attach");
543 error = soreserve(so, sendspace, recvspace);
547 unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO);
550 LIST_INIT(&unp->unp_refs);
551 UNP_PCB_LOCK_INIT(unp);
552 unp->unp_socket = so;
554 refcount_init(&unp->unp_refcount, 1);
556 if ((locked = UNP_LINK_WOWNED()) == false)
559 unp->unp_gencnt = ++unp_gencnt;
560 unp->unp_ino = ++unp_ino;
562 switch (so->so_type) {
564 LIST_INSERT_HEAD(&unp_shead, unp, unp_link);
568 LIST_INSERT_HEAD(&unp_dhead, unp, unp_link);
572 LIST_INSERT_HEAD(&unp_sphead, unp, unp_link);
576 panic("uipc_attach");
586 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
588 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
598 if (nam->sa_family != AF_UNIX)
599 return (EAFNOSUPPORT);
602 KASSERT(unp != NULL, ("uipc_bind: unp == NULL"));
604 if (soun->sun_len > sizeof(struct sockaddr_un))
606 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
611 * We don't allow simultaneous bind() calls on a single UNIX domain
612 * socket, so flag in-progress operations, and return an error if an
613 * operation is already in progress.
615 * Historically, we have not allowed a socket to be rebound, so this
616 * also returns an error. Not allowing re-binding simplifies the
617 * implementation and avoids a great many possible failure modes.
620 if (unp->unp_vnode != NULL) {
624 if (unp->unp_flags & UNP_BINDING) {
628 unp->unp_flags |= UNP_BINDING;
631 buf = malloc(namelen + 1, M_TEMP, M_WAITOK);
632 bcopy(soun->sun_path, buf, namelen);
636 NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | SAVENAME | NOCACHE,
637 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT));
638 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
643 if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
654 error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH);
660 vattr.va_type = VSOCK;
661 vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_pd->pd_cmask);
663 error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd,
667 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
670 VOP_VPUT_PAIR(nd.ni_dvp, NULL, true);
671 vn_finished_write(mp);
672 if (error == ERELOOKUP)
677 ASSERT_VOP_ELOCKED(vp, "uipc_bind");
678 soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
681 VOP_UNP_BIND(vp, unp);
683 unp->unp_addr = soun;
684 unp->unp_flags &= ~UNP_BINDING;
687 VOP_VPUT_PAIR(nd.ni_dvp, &vp, true);
688 vn_finished_write(mp);
694 unp->unp_flags &= ~UNP_BINDING;
701 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
704 return (uipc_bindat(AT_FDCWD, so, nam, td));
708 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
712 KASSERT(td == curthread, ("uipc_connect: td != curthread"));
713 error = unp_connect(so, nam, td);
718 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam,
723 KASSERT(td == curthread, ("uipc_connectat: td != curthread"));
724 error = unp_connectat(fd, so, nam, td);
729 uipc_close(struct socket *so)
731 struct unpcb *unp, *unp2;
732 struct vnode *vp = NULL;
736 KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
739 if ((vp = unp->unp_vnode) != NULL) {
740 vplock = mtx_pool_find(mtxpool_sleep, vp);
744 if (vp && unp->unp_vnode == NULL) {
750 unp->unp_vnode = NULL;
752 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
753 unp_disconnect(unp, unp2);
763 uipc_connect2(struct socket *so1, struct socket *so2)
765 struct unpcb *unp, *unp2;
767 if (so1->so_type != so2->so_type)
771 KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
773 KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
774 unp_pcb_lock_pair(unp, unp2);
775 unp_connect2(so1, so2, PRU_CONNECT2);
776 unp_pcb_unlock_pair(unp, unp2);
782 uipc_detach(struct socket *so)
784 struct unpcb *unp, *unp2;
787 int local_unp_rights;
790 KASSERT(unp != NULL, ("uipc_detach: unp == NULL"));
796 LIST_REMOVE(unp, unp_link);
797 if (unp->unp_gcflag & UNPGC_DEAD)
798 LIST_REMOVE(unp, unp_dead);
799 unp->unp_gencnt = ++unp_gencnt;
803 UNP_PCB_UNLOCK_ASSERT(unp);
805 if ((vp = unp->unp_vnode) != NULL) {
806 vplock = mtx_pool_find(mtxpool_sleep, vp);
810 if (unp->unp_vnode != vp && unp->unp_vnode != NULL) {
816 if ((vp = unp->unp_vnode) != NULL) {
818 unp->unp_vnode = NULL;
820 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
821 unp_disconnect(unp, unp2);
826 while (!LIST_EMPTY(&unp->unp_refs)) {
827 struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
830 UNP_REF_LIST_UNLOCK();
833 UNP_PCB_UNLOCK_ASSERT(ref);
837 UNP_REF_LIST_UNLOCK();
840 local_unp_rights = unp_rights;
841 unp->unp_socket->so_pcb = NULL;
842 unp->unp_socket = NULL;
843 free(unp->unp_addr, M_SONAME);
844 unp->unp_addr = NULL;
845 if (!unp_pcb_rele(unp))
851 if (local_unp_rights)
852 taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1);
856 uipc_disconnect(struct socket *so)
858 struct unpcb *unp, *unp2;
861 KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
864 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
865 unp_disconnect(unp, unp2);
872 uipc_listen(struct socket *so, int backlog, struct thread *td)
877 MPASS(so->so_type != SOCK_DGRAM);
880 * Synchronize with concurrent connection attempts.
885 if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0)
887 else if (unp->unp_vnode == NULL)
888 error = EDESTADDRREQ;
895 error = solisten_proto_check(so);
897 cru2xt(td, &unp->unp_peercred);
898 solisten_proto(so, backlog);
906 uipc_peeraddr(struct socket *so, struct sockaddr **nam)
908 struct unpcb *unp, *unp2;
909 const struct sockaddr *sa;
912 KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL"));
914 *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
917 * XXX: It seems that this test always fails even when connection is
918 * established. So, this else clause is added as workaround to
919 * return PF_LOCAL sockaddr.
921 unp2 = unp->unp_conn;
924 if (unp2->unp_addr != NULL)
925 sa = (struct sockaddr *) unp2->unp_addr;
928 bcopy(sa, *nam, sa->sa_len);
929 UNP_PCB_UNLOCK(unp2);
932 bcopy(sa, *nam, sa->sa_len);
939 uipc_rcvd(struct socket *so, int flags)
941 struct unpcb *unp, *unp2;
946 KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
947 KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
948 ("%s: socktype %d", __func__, so->so_type));
951 * Adjust backpressure on sender and wakeup any waiting to write.
953 * The unp lock is acquired to maintain the validity of the unp_conn
954 * pointer; no lock on unp2 is required as unp2->unp_socket will be
955 * static as long as we don't permit unp2 to disconnect from unp,
956 * which is prevented by the lock on unp. We cache values from
957 * so_rcv to avoid holding the so_rcv lock over the entire
958 * transaction on the remote so_snd.
960 SOCKBUF_LOCK(&so->so_rcv);
961 mbcnt = so->so_rcv.sb_mbcnt;
962 sbcc = sbavail(&so->so_rcv);
963 SOCKBUF_UNLOCK(&so->so_rcv);
965 * There is a benign race condition at this point. If we're planning to
966 * clear SB_STOP, but uipc_send is called on the connected socket at
967 * this instant, it might add data to the sockbuf and set SB_STOP. Then
968 * we would erroneously clear SB_STOP below, even though the sockbuf is
969 * full. The race is benign because the only ill effect is to allow the
970 * sockbuf to exceed its size limit, and the size limits are not
971 * strictly guaranteed anyway.
974 unp2 = unp->unp_conn;
979 so2 = unp2->unp_socket;
980 SOCKBUF_LOCK(&so2->so_snd);
981 if (sbcc < so2->so_snd.sb_hiwat && mbcnt < so2->so_snd.sb_mbmax)
982 so2->so_snd.sb_flags &= ~SB_STOP;
983 sowwakeup_locked(so2);
989 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
990 struct mbuf *control, struct thread *td)
992 struct unpcb *unp, *unp2;
998 KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
999 KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
1000 ("%s: socktype %d", __func__, so->so_type));
1003 if (flags & PRUS_OOB) {
1007 if (control != NULL && (error = unp_internalize(&control, td)))
1011 if ((so->so_state & SS_ISCONNECTED) == 0) {
1013 if ((error = unp_connect(so, nam, td)) != 0)
1022 if ((unp2 = unp_pcb_lock_peer(unp)) == NULL) {
1023 UNP_PCB_UNLOCK(unp);
1026 } else if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1027 unp_pcb_unlock_pair(unp, unp2);
1031 UNP_PCB_UNLOCK(unp);
1032 if ((so2 = unp2->unp_socket) == NULL) {
1033 UNP_PCB_UNLOCK(unp2);
1037 SOCKBUF_LOCK(&so2->so_rcv);
1038 if (unp2->unp_flags & UNP_WANTCRED_MASK) {
1040 * Credentials are passed only once on SOCK_STREAM and
1041 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or
1042 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS).
1044 control = unp_addsockcred(td, control, unp2->unp_flags);
1045 unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT;
1049 * Send to paired receive port and wake up readers. Don't
1050 * check for space available in the receive buffer if we're
1051 * attaching ancillary data; Unix domain sockets only check
1052 * for space in the sending sockbuf, and that check is
1053 * performed one level up the stack. At that level we cannot
1054 * precisely account for the amount of buffer space used
1055 * (e.g., because control messages are not yet internalized).
1057 switch (so->so_type) {
1059 if (control != NULL) {
1060 sbappendcontrol_locked(&so2->so_rcv, m,
1064 sbappend_locked(&so2->so_rcv, m, flags);
1067 case SOCK_SEQPACKET:
1068 if (sbappendaddr_nospacecheck_locked(&so2->so_rcv,
1069 &sun_noname, m, control))
1074 mbcnt = so2->so_rcv.sb_mbcnt;
1075 sbcc = sbavail(&so2->so_rcv);
1077 sorwakeup_locked(so2);
1079 SOCKBUF_UNLOCK(&so2->so_rcv);
1082 * The PCB lock on unp2 protects the SB_STOP flag. Without it,
1083 * it would be possible for uipc_rcvd to be called at this
1084 * point, drain the receiving sockbuf, clear SB_STOP, and then
1085 * we would set SB_STOP below. That could lead to an empty
1086 * sockbuf having SB_STOP set
1088 SOCKBUF_LOCK(&so->so_snd);
1089 if (sbcc >= so->so_snd.sb_hiwat || mbcnt >= so->so_snd.sb_mbmax)
1090 so->so_snd.sb_flags |= SB_STOP;
1091 SOCKBUF_UNLOCK(&so->so_snd);
1092 UNP_PCB_UNLOCK(unp2);
1096 * PRUS_EOF is equivalent to pru_send followed by pru_shutdown.
1098 if (flags & PRUS_EOF) {
1102 UNP_PCB_UNLOCK(unp);
1104 if (control != NULL && error != 0)
1105 unp_scan(control, unp_freerights);
1108 if (control != NULL)
1111 * In case of PRUS_NOTREADY, uipc_ready() is responsible
1112 * for freeing memory.
1114 if (m != NULL && (flags & PRUS_NOTREADY) == 0)
1120 * PF_UNIX/SOCK_DGRAM send
1123 uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1124 struct mbuf *m, struct mbuf *c, int flags, struct thread *td)
1126 struct unpcb *unp, *unp2;
1127 const struct sockaddr *from;
1131 MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL));
1135 if (__predict_false(flags & MSG_OOB)) {
1140 if (__predict_false(uio->uio_resid > unpdg_maxdgram)) {
1144 m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR);
1145 if (__predict_false(m == NULL)) {
1149 if (c != NULL && (error = unp_internalize(&c, td)))
1152 /* pru_sosend() with mbuf usually is a kernel thread. */
1155 if (__predict_false(c != NULL))
1156 panic("%s: control from a kernel thread", __func__);
1158 if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) {
1162 /* Condition the foreign mbuf to our standards. */
1164 m_tag_delete_chain(m, NULL);
1165 m->m_pkthdr.rcvif = NULL;
1166 m->m_pkthdr.flowid = 0;
1167 m->m_pkthdr.csum_flags = 0;
1168 m->m_pkthdr.fibnum = 0;
1169 m->m_pkthdr.rsstype = 0;
1172 unp = sotounpcb(so);
1176 * XXXGL: would be cool to fully remove so_snd out of the equation
1177 * and avoid this lock, which is not only extraneous, but also being
1178 * released, thus still leaving possibility for a race. We can easily
1179 * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it
1180 * is more difficult to invent something to handle so_error.
1182 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
1185 SOCKBUF_LOCK(&so->so_snd);
1186 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1187 SOCK_SENDBUF_UNLOCK(so);
1191 if (so->so_error != 0) {
1192 error = so->so_error;
1194 SOCKBUF_UNLOCK(&so->so_snd);
1197 if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) {
1198 SOCKBUF_UNLOCK(&so->so_snd);
1199 error = EDESTADDRREQ;
1202 SOCKBUF_UNLOCK(&so->so_snd);
1204 if (addr != NULL && (error = unp_connect(so, addr, td)))
1209 * Because connect() and send() are non-atomic in a sendto() with a
1210 * target address, it's possible that the socket will have disconnected
1211 * before the send() can run. In that case return the slightly
1212 * counter-intuitive but otherwise correct error that the socket is not
1215 unp2 = unp_pcb_lock_peer(unp);
1217 UNP_PCB_UNLOCK(unp);
1222 if (unp2->unp_flags & UNP_WANTCRED_MASK)
1223 c = unp_addsockcred(td, c, unp2->unp_flags);
1224 if (unp->unp_addr != NULL)
1225 from = (struct sockaddr *)unp->unp_addr;
1228 so2 = unp2->unp_socket;
1229 SOCKBUF_LOCK(&so2->so_rcv);
1230 if (sbappendaddr_locked(&so2->so_rcv, from, m, c)) {
1231 sorwakeup_locked(so2);
1234 soroverflow_locked(so2);
1235 error = (so->so_state & SS_NBIO) ? EAGAIN : ENOBUFS;
1239 unp_disconnect(unp, unp2);
1241 unp_pcb_unlock_pair(unp, unp2);
1243 td->td_ru.ru_msgsnd++;
1246 SOCK_IO_SEND_UNLOCK(so);
1249 unp_scan(c, unp_freerights);
1260 * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK
1263 uipc_peek_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
1264 struct mbuf **controlp, int *flagsp)
1270 SOCKBUF_UNLOCK(&so->so_rcv);
1272 m = so->so_rcv.sb_mb;
1273 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1275 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1277 if ((m = m->m_next) == NULL) {
1278 /* XXXRW: Can this happen? */
1279 SOCK_IO_RECV_UNLOCK(so);
1284 * With MSG_PEEK the control isn't executed, just copied.
1286 while (m != NULL && m->m_type == MT_CONTROL) {
1287 if (controlp != NULL) {
1288 *controlp = m_copym(m, 0, m->m_len, M_WAITOK);
1289 controlp = &(*controlp)->m_next;
1293 KASSERT(m == NULL || m->m_type == MT_DATA,
1294 ("%s: not MT_DATA mbuf %p", __func__, m));
1295 while (m != NULL && uio->uio_resid > 0) {
1296 len = uio->uio_resid;
1299 error = uiomove(mtod(m, char *), (int)len, uio);
1301 SOCK_IO_RECV_UNLOCK(so);
1304 if (len == m->m_len)
1307 SOCK_IO_RECV_UNLOCK(so);
1309 if (m != NULL && flagsp != NULL)
1310 *flagsp |= MSG_TRUNC;
1316 * PF_UNIX/SOCK_DGRAM receive
1319 uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
1320 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1322 struct mbuf *m, *m2;
1331 if (controlp != NULL)
1334 flags = flagsp != NULL ? *flagsp : 0;
1335 nonblock = (so->so_state & SS_NBIO) ||
1336 (flags & (MSG_DONTWAIT | MSG_NBIO));
1338 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
1339 if (__predict_false(error))
1343 * Loop blocking while waiting for a datagram.
1345 SOCK_RECVBUF_LOCK(so);
1346 while ((m = so->so_rcv.sb_mb) == NULL) {
1347 KASSERT(sbavail(&so->so_rcv) == 0,
1348 ("soreceive_dgram: sb_mb NULL but sbavail %u",
1349 sbavail(&so->so_rcv)));
1351 error = so->so_error;
1353 SOCKBUF_UNLOCK(&so->so_rcv);
1354 SOCK_IO_RECV_UNLOCK(so);
1357 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
1358 uio->uio_resid == 0) {
1359 SOCKBUF_UNLOCK(&so->so_rcv);
1360 SOCK_IO_RECV_UNLOCK(so);
1364 SOCKBUF_UNLOCK(&so->so_rcv);
1365 SOCK_IO_RECV_UNLOCK(so);
1366 return (EWOULDBLOCK);
1368 SBLASTRECORDCHK(&so->so_rcv);
1369 SBLASTMBUFCHK(&so->so_rcv);
1370 error = sbwait(so, SO_RCV);
1372 SOCKBUF_UNLOCK(&so->so_rcv);
1373 SOCK_IO_RECV_UNLOCK(so);
1377 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1380 uio->uio_td->td_ru.ru_msgrcv++;
1381 SBLASTRECORDCHK(&so->so_rcv);
1382 SBLASTMBUFCHK(&so->so_rcv);
1384 if (__predict_false(flags & MSG_PEEK))
1385 return (uipc_peek_dgram(so, psa, uio, controlp, flagsp));
1388 * Advance the sb_mb, update sb_lastrecord if necessary.
1390 so->so_rcv.sb_mb = m->m_nextpkt;
1391 if (so->so_rcv.sb_mb == NULL) {
1392 KASSERT(so->so_rcv.sb_lastrecord == m,
1393 ("%s: lastrecord != m", __func__));
1394 so->so_rcv.sb_lastrecord = NULL;
1395 so->so_rcv.sb_mbtail = NULL;
1396 } else if (so->so_rcv.sb_mb->m_nextpkt == NULL)
1397 so->so_rcv.sb_lastrecord = so->so_rcv.sb_mb;
1400 * Walk 'm's chain and free that many bytes from the socket buffer.
1402 for (m2 = m; m2 != NULL; m2 = m2->m_next)
1403 sbfree(&so->so_rcv, m2);
1406 * Do a few last checks before we let go of the lock.
1408 SBLASTRECORDCHK(&so->so_rcv);
1409 SBLASTMBUFCHK(&so->so_rcv);
1410 SOCKBUF_UNLOCK(&so->so_rcv);
1412 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type));
1414 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK);
1418 /* XXXRW: Can this happen? */
1419 SOCK_IO_RECV_UNLOCK(so);
1424 * Packet to copyout() is now in 'm' and it is disconnected from the
1427 * Process one or more MT_CONTROL mbufs present before any data mbufs
1428 * in the first mbuf chain on the socket buffer. We call into the
1429 * unp_externalize() to perform externalization (or freeing if
1430 * controlp == NULL). In some cases there can be only MT_CONTROL mbufs
1431 * without MT_DATA mbufs.
1433 while (m != NULL && m->m_type == MT_CONTROL) {
1436 /* XXXGL: unp_externalize() is also dom_externalize() KBI and
1437 * it frees whole chain, so we must disconnect the mbuf.
1439 cm = m; m = m->m_next; cm->m_next = NULL;
1440 error = unp_externalize(cm, controlp, flags);
1442 SOCK_IO_RECV_UNLOCK(so);
1443 unp_scan(m, unp_freerights);
1447 if (controlp != NULL) {
1448 while (*controlp != NULL)
1449 controlp = &(*controlp)->m_next;
1452 KASSERT(m == NULL || m->m_type == MT_DATA,
1453 ("%s: not MT_DATA mbuf %p", __func__, m));
1454 while (m != NULL && uio->uio_resid > 0) {
1455 len = uio->uio_resid;
1458 error = uiomove(mtod(m, char *), (int)len, uio);
1460 SOCK_IO_RECV_UNLOCK(so);
1464 if (len == m->m_len)
1471 SOCK_IO_RECV_UNLOCK(so);
1484 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp)
1486 struct mbuf *mb, *n;
1490 if (SOLISTENING(so)) {
1497 if (sb->sb_fnrdy != NULL) {
1498 for (mb = sb->sb_mb, n = mb->m_nextpkt; mb != NULL;) {
1500 *errorp = sbready(sb, m, count);
1513 return (mb != NULL);
1517 uipc_ready(struct socket *so, struct mbuf *m, int count)
1519 struct unpcb *unp, *unp2;
1523 unp = sotounpcb(so);
1525 KASSERT(so->so_type == SOCK_STREAM,
1526 ("%s: unexpected socket type for %p", __func__, so));
1529 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
1530 UNP_PCB_UNLOCK(unp);
1531 so2 = unp2->unp_socket;
1532 SOCKBUF_LOCK(&so2->so_rcv);
1533 if ((error = sbready(&so2->so_rcv, m, count)) == 0)
1534 sorwakeup_locked(so2);
1536 SOCKBUF_UNLOCK(&so2->so_rcv);
1537 UNP_PCB_UNLOCK(unp2);
1540 UNP_PCB_UNLOCK(unp);
1543 * The receiving socket has been disconnected, but may still be valid.
1544 * In this case, the now-ready mbufs are still present in its socket
1545 * buffer, so perform an exhaustive search before giving up and freeing
1549 LIST_FOREACH(unp, &unp_shead, unp_link) {
1550 if (uipc_ready_scan(unp->unp_socket, m, count, &error))
1556 for (i = 0; i < count; i++)
1564 uipc_sense(struct socket *so, struct stat *sb)
1568 unp = sotounpcb(so);
1569 KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
1571 sb->st_blksize = so->so_snd.sb_hiwat;
1573 sb->st_ino = unp->unp_ino;
1578 uipc_shutdown(struct socket *so)
1582 unp = sotounpcb(so);
1583 KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL"));
1588 UNP_PCB_UNLOCK(unp);
1593 uipc_sockaddr(struct socket *so, struct sockaddr **nam)
1596 const struct sockaddr *sa;
1598 unp = sotounpcb(so);
1599 KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL"));
1601 *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1603 if (unp->unp_addr != NULL)
1604 sa = (struct sockaddr *) unp->unp_addr;
1607 bcopy(sa, *nam, sa->sa_len);
1608 UNP_PCB_UNLOCK(unp);
1612 static struct pr_usrreqs uipc_usrreqs_dgram = {
1613 .pru_abort = uipc_abort,
1614 .pru_accept = uipc_accept,
1615 .pru_attach = uipc_attach,
1616 .pru_bind = uipc_bind,
1617 .pru_bindat = uipc_bindat,
1618 .pru_connect = uipc_connect,
1619 .pru_connectat = uipc_connectat,
1620 .pru_connect2 = uipc_connect2,
1621 .pru_detach = uipc_detach,
1622 .pru_disconnect = uipc_disconnect,
1623 .pru_peeraddr = uipc_peeraddr,
1624 .pru_sosend = uipc_sosend_dgram,
1625 .pru_sense = uipc_sense,
1626 .pru_shutdown = uipc_shutdown,
1627 .pru_sockaddr = uipc_sockaddr,
1628 .pru_soreceive = uipc_soreceive_dgram,
1629 .pru_close = uipc_close,
1632 static struct pr_usrreqs uipc_usrreqs_seqpacket = {
1633 .pru_abort = uipc_abort,
1634 .pru_accept = uipc_accept,
1635 .pru_attach = uipc_attach,
1636 .pru_bind = uipc_bind,
1637 .pru_bindat = uipc_bindat,
1638 .pru_connect = uipc_connect,
1639 .pru_connectat = uipc_connectat,
1640 .pru_connect2 = uipc_connect2,
1641 .pru_detach = uipc_detach,
1642 .pru_disconnect = uipc_disconnect,
1643 .pru_listen = uipc_listen,
1644 .pru_peeraddr = uipc_peeraddr,
1645 .pru_rcvd = uipc_rcvd,
1646 .pru_send = uipc_send,
1647 .pru_sense = uipc_sense,
1648 .pru_shutdown = uipc_shutdown,
1649 .pru_sockaddr = uipc_sockaddr,
1650 .pru_soreceive = soreceive_generic, /* XXX: or...? */
1651 .pru_close = uipc_close,
1654 static struct pr_usrreqs uipc_usrreqs_stream = {
1655 .pru_abort = uipc_abort,
1656 .pru_accept = uipc_accept,
1657 .pru_attach = uipc_attach,
1658 .pru_bind = uipc_bind,
1659 .pru_bindat = uipc_bindat,
1660 .pru_connect = uipc_connect,
1661 .pru_connectat = uipc_connectat,
1662 .pru_connect2 = uipc_connect2,
1663 .pru_detach = uipc_detach,
1664 .pru_disconnect = uipc_disconnect,
1665 .pru_listen = uipc_listen,
1666 .pru_peeraddr = uipc_peeraddr,
1667 .pru_rcvd = uipc_rcvd,
1668 .pru_send = uipc_send,
1669 .pru_ready = uipc_ready,
1670 .pru_sense = uipc_sense,
1671 .pru_shutdown = uipc_shutdown,
1672 .pru_sockaddr = uipc_sockaddr,
1673 .pru_soreceive = soreceive_generic,
1674 .pru_close = uipc_close,
1678 uipc_ctloutput(struct socket *so, struct sockopt *sopt)
1684 if (sopt->sopt_level != SOL_LOCAL)
1687 unp = sotounpcb(so);
1688 KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL"));
1690 switch (sopt->sopt_dir) {
1692 switch (sopt->sopt_name) {
1693 case LOCAL_PEERCRED:
1695 if (unp->unp_flags & UNP_HAVEPC)
1696 xu = unp->unp_peercred;
1698 if (so->so_type == SOCK_STREAM)
1703 UNP_PCB_UNLOCK(unp);
1705 error = sooptcopyout(sopt, &xu, sizeof(xu));
1709 /* Unlocked read. */
1710 optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0;
1711 error = sooptcopyout(sopt, &optval, sizeof(optval));
1714 case LOCAL_CREDS_PERSISTENT:
1715 /* Unlocked read. */
1716 optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0;
1717 error = sooptcopyout(sopt, &optval, sizeof(optval));
1720 case LOCAL_CONNWAIT:
1721 /* Unlocked read. */
1722 optval = unp->unp_flags & UNP_CONNWAIT ? 1 : 0;
1723 error = sooptcopyout(sopt, &optval, sizeof(optval));
1733 switch (sopt->sopt_name) {
1735 case LOCAL_CREDS_PERSISTENT:
1736 case LOCAL_CONNWAIT:
1737 error = sooptcopyin(sopt, &optval, sizeof(optval),
1742 #define OPTSET(bit, exclusive) do { \
1743 UNP_PCB_LOCK(unp); \
1745 if ((unp->unp_flags & (exclusive)) != 0) { \
1746 UNP_PCB_UNLOCK(unp); \
1750 unp->unp_flags |= (bit); \
1752 unp->unp_flags &= ~(bit); \
1753 UNP_PCB_UNLOCK(unp); \
1756 switch (sopt->sopt_name) {
1758 OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS);
1761 case LOCAL_CREDS_PERSISTENT:
1762 OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT);
1765 case LOCAL_CONNWAIT:
1766 OPTSET(UNP_CONNWAIT, 0);
1775 error = ENOPROTOOPT;
1788 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1791 return (unp_connectat(AT_FDCWD, so, nam, td));
1795 unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
1799 struct sockaddr_un *soun;
1802 struct unpcb *unp, *unp2, *unp3;
1803 struct nameidata nd;
1804 char buf[SOCK_MAXADDRLEN];
1805 struct sockaddr *sa;
1806 cap_rights_t rights;
1810 if (nam->sa_family != AF_UNIX)
1811 return (EAFNOSUPPORT);
1812 if (nam->sa_len > sizeof(struct sockaddr_un))
1814 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
1817 soun = (struct sockaddr_un *)nam;
1818 bcopy(soun->sun_path, buf, len);
1822 unp = sotounpcb(so);
1826 * Wait for connection state to stabilize. If a connection
1827 * already exists, give up. For datagram sockets, which permit
1828 * multiple consecutive connect(2) calls, upper layers are
1829 * responsible for disconnecting in advance of a subsequent
1830 * connect(2), but this is not synchronized with PCB connection
1833 * Also make sure that no threads are currently attempting to
1834 * lock the peer socket, to ensure that unp_conn cannot
1835 * transition between two valid sockets while locks are dropped.
1837 if (SOLISTENING(so))
1839 else if (unp->unp_conn != NULL)
1841 else if ((unp->unp_flags & UNP_CONNECTING) != 0) {
1845 UNP_PCB_UNLOCK(unp);
1848 if (unp->unp_pairbusy > 0) {
1849 unp->unp_flags |= UNP_WAITING;
1850 mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0);
1855 unp->unp_flags |= UNP_CONNECTING;
1856 UNP_PCB_UNLOCK(unp);
1858 connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0;
1860 sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1863 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF,
1864 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT));
1870 ASSERT_VOP_LOCKED(vp, "unp_connect");
1871 NDFREE_NOTHING(&nd);
1875 if (vp->v_type != VSOCK) {
1880 error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD);
1884 error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td);
1888 unp = sotounpcb(so);
1889 KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
1891 vplock = mtx_pool_find(mtxpool_sleep, vp);
1893 VOP_UNP_CONNECT(vp, &unp2);
1895 error = ECONNREFUSED;
1898 so2 = unp2->unp_socket;
1899 if (so->so_type != so2->so_type) {
1904 if (SOLISTENING(so2)) {
1905 CURVNET_SET(so2->so_vnet);
1906 so2 = sonewconn(so2, 0);
1911 error = ECONNREFUSED;
1914 unp3 = sotounpcb(so2);
1915 unp_pcb_lock_pair(unp2, unp3);
1916 if (unp2->unp_addr != NULL) {
1917 bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
1918 unp3->unp_addr = (struct sockaddr_un *) sa;
1922 unp_copy_peercred(td, unp3, unp, unp2);
1924 UNP_PCB_UNLOCK(unp2);
1928 * It is safe to block on the PCB lock here since unp2 is
1929 * nascent and cannot be connected to any other sockets.
1933 mac_socketpeer_set_from_socket(so, so2);
1934 mac_socketpeer_set_from_socket(so2, so);
1937 unp_pcb_lock_pair(unp, unp2);
1939 KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 &&
1940 sotounpcb(so2) == unp2,
1941 ("%s: unp2 %p so2 %p", __func__, unp2, so2));
1942 unp_connect2(so, so2, PRU_CONNECT);
1943 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
1944 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
1945 unp->unp_flags &= ~UNP_CONNECTING;
1946 unp_pcb_unlock_pair(unp, unp2);
1954 if (__predict_false(error)) {
1956 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
1957 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
1958 unp->unp_flags &= ~UNP_CONNECTING;
1959 UNP_PCB_UNLOCK(unp);
1965 * Set socket peer credentials at connection time.
1967 * The client's PCB credentials are copied from its process structure. The
1968 * server's PCB credentials are copied from the socket on which it called
1969 * listen(2). uipc_listen cached that process's credentials at the time.
1972 unp_copy_peercred(struct thread *td, struct unpcb *client_unp,
1973 struct unpcb *server_unp, struct unpcb *listen_unp)
1975 cru2xt(td, &client_unp->unp_peercred);
1976 client_unp->unp_flags |= UNP_HAVEPC;
1978 memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred,
1979 sizeof(server_unp->unp_peercred));
1980 server_unp->unp_flags |= UNP_HAVEPC;
1981 client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK);
1985 unp_connect2(struct socket *so, struct socket *so2, int req)
1990 MPASS(so2->so_type == so->so_type);
1991 unp = sotounpcb(so);
1992 KASSERT(unp != NULL, ("unp_connect2: unp == NULL"));
1993 unp2 = sotounpcb(so2);
1994 KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
1996 UNP_PCB_LOCK_ASSERT(unp);
1997 UNP_PCB_LOCK_ASSERT(unp2);
1998 KASSERT(unp->unp_conn == NULL,
1999 ("%s: socket %p is already connected", __func__, unp));
2001 unp->unp_conn = unp2;
2004 switch (so->so_type) {
2006 UNP_REF_LIST_LOCK();
2007 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
2008 UNP_REF_LIST_UNLOCK();
2013 case SOCK_SEQPACKET:
2014 KASSERT(unp2->unp_conn == NULL,
2015 ("%s: socket %p is already connected", __func__, unp2));
2016 unp2->unp_conn = unp;
2017 if (req == PRU_CONNECT &&
2018 ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT))
2026 panic("unp_connect2");
2031 unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
2033 struct socket *so, *so2;
2035 struct unpcb *unptmp;
2038 UNP_PCB_LOCK_ASSERT(unp);
2039 UNP_PCB_LOCK_ASSERT(unp2);
2040 KASSERT(unp->unp_conn == unp2,
2041 ("%s: unpcb %p is not connected to %p", __func__, unp, unp2));
2043 unp->unp_conn = NULL;
2044 so = unp->unp_socket;
2045 so2 = unp2->unp_socket;
2046 switch (unp->unp_socket->so_type) {
2048 UNP_REF_LIST_LOCK();
2050 LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) {
2054 KASSERT(unptmp != NULL,
2055 ("%s: %p not found in reflist of %p", __func__, unp, unp2));
2057 LIST_REMOVE(unp, unp_reflink);
2058 UNP_REF_LIST_UNLOCK();
2061 so->so_state &= ~SS_ISCONNECTED;
2067 case SOCK_SEQPACKET:
2069 soisdisconnected(so);
2070 MPASS(unp2->unp_conn == unp);
2071 unp2->unp_conn = NULL;
2073 soisdisconnected(so2);
2078 unp_pcb_rele_notlast(unp);
2079 if (!unp_pcb_rele(unp))
2080 UNP_PCB_UNLOCK(unp);
2082 if (!unp_pcb_rele(unp))
2083 UNP_PCB_UNLOCK(unp);
2084 if (!unp_pcb_rele(unp2))
2085 UNP_PCB_UNLOCK(unp2);
2090 * unp_pcblist() walks the global list of struct unpcb's to generate a
2091 * pointer list, bumping the refcount on each unpcb. It then copies them out
2092 * sequentially, validating the generation number on each to see if it has
2093 * been detached. All of this is necessary because copyout() may sleep on
2097 unp_pcblist(SYSCTL_HANDLER_ARGS)
2099 struct unpcb *unp, **unp_list;
2101 struct xunpgen *xug;
2102 struct unp_head *head;
2107 switch ((intptr_t)arg1) {
2116 case SOCK_SEQPACKET:
2121 panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1);
2125 * The process of preparing the PCB list is too time-consuming and
2126 * resource-intensive to repeat twice on every request.
2128 if (req->oldptr == NULL) {
2130 req->oldidx = 2 * (sizeof *xug)
2131 + (n + n/8) * sizeof(struct xunpcb);
2135 if (req->newptr != NULL)
2139 * OK, now we're committed to doing something.
2141 xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO);
2143 gencnt = unp_gencnt;
2147 xug->xug_len = sizeof *xug;
2149 xug->xug_gen = gencnt;
2150 xug->xug_sogen = so_gencnt;
2151 error = SYSCTL_OUT(req, xug, sizeof *xug);
2157 unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
2160 for (unp = LIST_FIRST(head), i = 0; unp && i < n;
2161 unp = LIST_NEXT(unp, unp_link)) {
2163 if (unp->unp_gencnt <= gencnt) {
2164 if (cr_cansee(req->td->td_ucred,
2165 unp->unp_socket->so_cred)) {
2166 UNP_PCB_UNLOCK(unp);
2169 unp_list[i++] = unp;
2172 UNP_PCB_UNLOCK(unp);
2175 n = i; /* In case we lost some during malloc. */
2178 xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO);
2179 for (i = 0; i < n; i++) {
2182 if (unp_pcb_rele(unp))
2185 if (unp->unp_gencnt <= gencnt) {
2186 xu->xu_len = sizeof *xu;
2187 xu->xu_unpp = (uintptr_t)unp;
2189 * XXX - need more locking here to protect against
2190 * connect/disconnect races for SMP.
2192 if (unp->unp_addr != NULL)
2193 bcopy(unp->unp_addr, &xu->xu_addr,
2194 unp->unp_addr->sun_len);
2196 bzero(&xu->xu_addr, sizeof(xu->xu_addr));
2197 if (unp->unp_conn != NULL &&
2198 unp->unp_conn->unp_addr != NULL)
2199 bcopy(unp->unp_conn->unp_addr,
2201 unp->unp_conn->unp_addr->sun_len);
2203 bzero(&xu->xu_caddr, sizeof(xu->xu_caddr));
2204 xu->unp_vnode = (uintptr_t)unp->unp_vnode;
2205 xu->unp_conn = (uintptr_t)unp->unp_conn;
2206 xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs);
2207 xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink);
2208 xu->unp_gencnt = unp->unp_gencnt;
2209 sotoxsocket(unp->unp_socket, &xu->xu_socket);
2210 UNP_PCB_UNLOCK(unp);
2211 error = SYSCTL_OUT(req, xu, sizeof *xu);
2213 UNP_PCB_UNLOCK(unp);
2219 * Give the user an updated idea of our state. If the
2220 * generation differs from what we told her before, she knows
2221 * that something happened while we were processing this
2222 * request, and it might be necessary to retry.
2224 xug->xug_gen = unp_gencnt;
2225 xug->xug_sogen = so_gencnt;
2226 xug->xug_count = unp_count;
2227 error = SYSCTL_OUT(req, xug, sizeof *xug);
2229 free(unp_list, M_TEMP);
2234 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist,
2235 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2236 (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
2237 "List of active local datagram sockets");
2238 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist,
2239 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2240 (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
2241 "List of active local stream sockets");
2242 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist,
2243 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2244 (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
2245 "List of active local seqpacket sockets");
2248 unp_shutdown(struct unpcb *unp)
2253 UNP_PCB_LOCK_ASSERT(unp);
2255 unp2 = unp->unp_conn;
2256 if ((unp->unp_socket->so_type == SOCK_STREAM ||
2257 (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) {
2258 so = unp2->unp_socket;
2265 unp_drop(struct unpcb *unp)
2271 * Regardless of whether the socket's peer dropped the connection
2272 * with this socket by aborting or disconnecting, POSIX requires
2273 * that ECONNRESET is returned.
2277 so = unp->unp_socket;
2279 so->so_error = ECONNRESET;
2280 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
2281 /* Last reference dropped in unp_disconnect(). */
2282 unp_pcb_rele_notlast(unp);
2283 unp_disconnect(unp, unp2);
2284 } else if (!unp_pcb_rele(unp)) {
2285 UNP_PCB_UNLOCK(unp);
2290 unp_freerights(struct filedescent **fdep, int fdcount)
2295 KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount));
2297 for (i = 0; i < fdcount; i++) {
2298 fp = fdep[i]->fde_file;
2299 filecaps_free(&fdep[i]->fde_caps);
2302 free(fdep[0], M_FILECAPS);
2306 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags)
2308 struct thread *td = curthread; /* XXX */
2309 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
2312 struct filedesc *fdesc = td->td_proc->p_fd;
2313 struct filedescent **fdep;
2315 socklen_t clen = control->m_len, datalen;
2319 UNP_LINK_UNLOCK_ASSERT();
2322 if (controlp != NULL) /* controlp == NULL => free control messages */
2324 while (cm != NULL) {
2325 MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len);
2327 data = CMSG_DATA(cm);
2328 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
2329 if (cm->cmsg_level == SOL_SOCKET
2330 && cm->cmsg_type == SCM_RIGHTS) {
2331 newfds = datalen / sizeof(*fdep);
2336 /* If we're not outputting the descriptors free them. */
2337 if (error || controlp == NULL) {
2338 unp_freerights(fdep, newfds);
2341 FILEDESC_XLOCK(fdesc);
2344 * Now change each pointer to an fd in the global
2345 * table to an integer that is the index to the local
2346 * fd table entry that we set up to point to the
2347 * global one we are transferring.
2349 newlen = newfds * sizeof(int);
2350 *controlp = sbcreatecontrol(NULL, newlen,
2351 SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2354 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2355 if (fdallocn(td, 0, fdp, newfds) != 0) {
2356 FILEDESC_XUNLOCK(fdesc);
2358 unp_freerights(fdep, newfds);
2363 for (i = 0; i < newfds; i++, fdp++) {
2364 _finstall(fdesc, fdep[i]->fde_file, *fdp,
2365 (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0,
2366 &fdep[i]->fde_caps);
2367 unp_externalize_fp(fdep[i]->fde_file);
2371 * The new type indicates that the mbuf data refers to
2372 * kernel resources that may need to be released before
2373 * the mbuf is freed.
2375 m_chtype(*controlp, MT_EXTCONTROL);
2376 FILEDESC_XUNLOCK(fdesc);
2377 free(fdep[0], M_FILECAPS);
2379 /* We can just copy anything else across. */
2380 if (error || controlp == NULL)
2382 *controlp = sbcreatecontrol(NULL, datalen,
2383 cm->cmsg_type, cm->cmsg_level, M_WAITOK);
2385 CMSG_DATA(mtod(*controlp, struct cmsghdr *)),
2388 controlp = &(*controlp)->m_next;
2391 if (CMSG_SPACE(datalen) < clen) {
2392 clen -= CMSG_SPACE(datalen);
2393 cm = (struct cmsghdr *)
2394 ((caddr_t)cm + CMSG_SPACE(datalen));
2406 unp_zone_change(void *tag)
2409 uma_zone_set_max(unp_zone, maxsockets);
2414 unp_zdtor(void *mem, int size __unused, void *arg __unused)
2420 KASSERT(LIST_EMPTY(&unp->unp_refs),
2421 ("%s: unpcb %p has lingering refs", __func__, unp));
2422 KASSERT(unp->unp_socket == NULL,
2423 ("%s: unpcb %p has socket backpointer", __func__, unp));
2424 KASSERT(unp->unp_vnode == NULL,
2425 ("%s: unpcb %p has vnode references", __func__, unp));
2426 KASSERT(unp->unp_conn == NULL,
2427 ("%s: unpcb %p is still connected", __func__, unp));
2428 KASSERT(unp->unp_addr == NULL,
2429 ("%s: unpcb %p has leaked addr", __func__, unp));
2434 unp_init(void *arg __unused)
2443 unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor,
2444 NULL, NULL, UMA_ALIGN_CACHE, 0);
2445 uma_zone_set_max(unp_zone, maxsockets);
2446 uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached");
2447 EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change,
2448 NULL, EVENTHANDLER_PRI_ANY);
2449 LIST_INIT(&unp_dhead);
2450 LIST_INIT(&unp_shead);
2451 LIST_INIT(&unp_sphead);
2452 SLIST_INIT(&unp_defers);
2453 TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL);
2454 TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL);
2455 UNP_LINK_LOCK_INIT();
2456 UNP_DEFERRED_LOCK_INIT();
2458 SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL);
2461 unp_internalize_cleanup_rights(struct mbuf *control)
2468 for (m = control; m != NULL; m = m->m_next) {
2469 cp = mtod(m, struct cmsghdr *);
2470 if (cp->cmsg_level != SOL_SOCKET ||
2471 cp->cmsg_type != SCM_RIGHTS)
2473 data = CMSG_DATA(cp);
2474 datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data;
2475 unp_freerights(data, datalen / sizeof(struct filedesc *));
2480 unp_internalize(struct mbuf **controlp, struct thread *td)
2482 struct mbuf *control, **initial_controlp;
2484 struct filedesc *fdesc;
2487 struct cmsgcred *cmcred;
2488 struct filedescent *fde, **fdep, *fdev;
2491 struct timespec *ts;
2493 socklen_t clen, datalen;
2494 int i, j, error, *fdp, oldfds;
2497 UNP_LINK_UNLOCK_ASSERT();
2502 control = *controlp;
2504 initial_controlp = controlp;
2505 for (clen = control->m_len, cm = mtod(control, struct cmsghdr *),
2506 data = CMSG_DATA(cm);
2508 clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET &&
2509 clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) &&
2510 (char *)cm + cm->cmsg_len >= (char *)data;
2512 clen -= min(CMSG_SPACE(datalen), clen),
2513 cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)),
2514 data = CMSG_DATA(cm)) {
2515 datalen = (char *)cm + cm->cmsg_len - (char *)data;
2516 switch (cm->cmsg_type) {
2518 *controlp = sbcreatecontrol(NULL, sizeof(*cmcred),
2519 SCM_CREDS, SOL_SOCKET, M_WAITOK);
2520 cmcred = (struct cmsgcred *)
2521 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2522 cmcred->cmcred_pid = p->p_pid;
2523 cmcred->cmcred_uid = td->td_ucred->cr_ruid;
2524 cmcred->cmcred_gid = td->td_ucred->cr_rgid;
2525 cmcred->cmcred_euid = td->td_ucred->cr_uid;
2526 cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
2528 for (i = 0; i < cmcred->cmcred_ngroups; i++)
2529 cmcred->cmcred_groups[i] =
2530 td->td_ucred->cr_groups[i];
2534 oldfds = datalen / sizeof (int);
2537 /* On some machines sizeof pointer is bigger than
2538 * sizeof int, so we need to check if data fits into
2539 * single mbuf. We could allocate several mbufs, and
2540 * unp_externalize() should even properly handle that.
2541 * But it is not worth to complicate the code for an
2542 * insane scenario of passing over 200 file descriptors
2545 newlen = oldfds * sizeof(fdep[0]);
2546 if (CMSG_SPACE(newlen) > MCLBYTES) {
2551 * Check that all the FDs passed in refer to legal
2552 * files. If not, reject the entire operation.
2555 FILEDESC_SLOCK(fdesc);
2556 for (i = 0; i < oldfds; i++, fdp++) {
2557 fp = fget_noref(fdesc, *fdp);
2559 FILEDESC_SUNLOCK(fdesc);
2563 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) {
2564 FILEDESC_SUNLOCK(fdesc);
2571 * Now replace the integer FDs with pointers to the
2572 * file structure and capability rights.
2574 *controlp = sbcreatecontrol(NULL, newlen,
2575 SCM_RIGHTS, SOL_SOCKET, M_WAITOK);
2577 for (i = 0; i < oldfds; i++, fdp++) {
2578 if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) {
2580 for (j = 0; j < i; j++, fdp++) {
2581 fdrop(fdesc->fd_ofiles[*fdp].
2584 FILEDESC_SUNLOCK(fdesc);
2590 fdep = (struct filedescent **)
2591 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2592 fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS,
2594 for (i = 0; i < oldfds; i++, fdev++, fdp++) {
2595 fde = &fdesc->fd_ofiles[*fdp];
2597 fdep[i]->fde_file = fde->fde_file;
2598 filecaps_copy(&fde->fde_caps,
2599 &fdep[i]->fde_caps, true);
2600 unp_internalize_fp(fdep[i]->fde_file);
2602 FILEDESC_SUNLOCK(fdesc);
2606 *controlp = sbcreatecontrol(NULL, sizeof(*tv),
2607 SCM_TIMESTAMP, SOL_SOCKET, M_WAITOK);
2608 tv = (struct timeval *)
2609 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2614 *controlp = sbcreatecontrol(NULL, sizeof(*bt),
2615 SCM_BINTIME, SOL_SOCKET, M_WAITOK);
2616 bt = (struct bintime *)
2617 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2622 *controlp = sbcreatecontrol(NULL, sizeof(*ts),
2623 SCM_REALTIME, SOL_SOCKET, M_WAITOK);
2624 ts = (struct timespec *)
2625 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2630 *controlp = sbcreatecontrol(NULL, sizeof(*ts),
2631 SCM_MONOTONIC, SOL_SOCKET, M_WAITOK);
2632 ts = (struct timespec *)
2633 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2642 controlp = &(*controlp)->m_next;
2648 if (error != 0 && initial_controlp != NULL)
2649 unp_internalize_cleanup_rights(*initial_controlp);
2654 static struct mbuf *
2655 unp_addsockcred(struct thread *td, struct mbuf *control, int mode)
2657 struct mbuf *m, *n, *n_prev;
2658 const struct cmsghdr *cm;
2659 int ngroups, i, cmsgtype;
2662 ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX);
2663 if (mode & UNP_WANTCRED_ALWAYS) {
2664 ctrlsz = SOCKCRED2SIZE(ngroups);
2665 cmsgtype = SCM_CREDS2;
2667 ctrlsz = SOCKCREDSIZE(ngroups);
2668 cmsgtype = SCM_CREDS;
2671 m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT);
2675 if (mode & UNP_WANTCRED_ALWAYS) {
2676 struct sockcred2 *sc;
2678 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2680 sc->sc_pid = td->td_proc->p_pid;
2681 sc->sc_uid = td->td_ucred->cr_ruid;
2682 sc->sc_euid = td->td_ucred->cr_uid;
2683 sc->sc_gid = td->td_ucred->cr_rgid;
2684 sc->sc_egid = td->td_ucred->cr_gid;
2685 sc->sc_ngroups = ngroups;
2686 for (i = 0; i < sc->sc_ngroups; i++)
2687 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2689 struct sockcred *sc;
2691 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2692 sc->sc_uid = td->td_ucred->cr_ruid;
2693 sc->sc_euid = td->td_ucred->cr_uid;
2694 sc->sc_gid = td->td_ucred->cr_rgid;
2695 sc->sc_egid = td->td_ucred->cr_gid;
2696 sc->sc_ngroups = ngroups;
2697 for (i = 0; i < sc->sc_ngroups; i++)
2698 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2702 * Unlink SCM_CREDS control messages (struct cmsgcred), since just
2703 * created SCM_CREDS control message (struct sockcred) has another
2706 if (control != NULL && cmsgtype == SCM_CREDS)
2707 for (n = control, n_prev = NULL; n != NULL;) {
2708 cm = mtod(n, struct cmsghdr *);
2709 if (cm->cmsg_level == SOL_SOCKET &&
2710 cm->cmsg_type == SCM_CREDS) {
2712 control = n->m_next;
2714 n_prev->m_next = n->m_next;
2722 /* Prepend it to the head. */
2723 m->m_next = control;
2727 static struct unpcb *
2728 fptounp(struct file *fp)
2732 if (fp->f_type != DTYPE_SOCKET)
2734 if ((so = fp->f_data) == NULL)
2736 if (so->so_proto->pr_domain != &localdomain)
2738 return sotounpcb(so);
2742 unp_discard(struct file *fp)
2744 struct unp_defer *dr;
2746 if (unp_externalize_fp(fp)) {
2747 dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK);
2749 UNP_DEFERRED_LOCK();
2750 SLIST_INSERT_HEAD(&unp_defers, dr, ud_link);
2751 UNP_DEFERRED_UNLOCK();
2752 atomic_add_int(&unp_defers_count, 1);
2753 taskqueue_enqueue(taskqueue_thread, &unp_defer_task);
2755 closef_nothread(fp);
2759 unp_process_defers(void *arg __unused, int pending)
2761 struct unp_defer *dr;
2762 SLIST_HEAD(, unp_defer) drl;
2767 UNP_DEFERRED_LOCK();
2768 if (SLIST_FIRST(&unp_defers) == NULL) {
2769 UNP_DEFERRED_UNLOCK();
2772 SLIST_SWAP(&unp_defers, &drl, unp_defer);
2773 UNP_DEFERRED_UNLOCK();
2775 while ((dr = SLIST_FIRST(&drl)) != NULL) {
2776 SLIST_REMOVE_HEAD(&drl, ud_link);
2777 closef_nothread(dr->ud_fp);
2781 atomic_add_int(&unp_defers_count, -count);
2786 unp_internalize_fp(struct file *fp)
2791 if ((unp = fptounp(fp)) != NULL) {
2793 unp->unp_msgcount++;
2800 unp_externalize_fp(struct file *fp)
2806 if ((unp = fptounp(fp)) != NULL) {
2807 unp->unp_msgcount--;
2817 * unp_defer indicates whether additional work has been defered for a future
2818 * pass through unp_gc(). It is thread local and does not require explicit
2821 static int unp_marked;
2824 unp_remove_dead_ref(struct filedescent **fdep, int fdcount)
2831 * This function can only be called from the gc task.
2833 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
2834 ("%s: not on gc callout", __func__));
2835 UNP_LINK_LOCK_ASSERT();
2837 for (i = 0; i < fdcount; i++) {
2838 fp = fdep[i]->fde_file;
2839 if ((unp = fptounp(fp)) == NULL)
2841 if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
2848 unp_restore_undead_ref(struct filedescent **fdep, int fdcount)
2855 * This function can only be called from the gc task.
2857 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
2858 ("%s: not on gc callout", __func__));
2859 UNP_LINK_LOCK_ASSERT();
2861 for (i = 0; i < fdcount; i++) {
2862 fp = fdep[i]->fde_file;
2863 if ((unp = fptounp(fp)) == NULL)
2865 if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
2873 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int))
2875 struct socket *so, *soa;
2877 so = unp->unp_socket;
2879 if (SOLISTENING(so)) {
2881 * Mark all sockets in our accept queue.
2883 TAILQ_FOREACH(soa, &so->sol_comp, so_list) {
2884 if (sotounpcb(soa)->unp_gcflag & UNPGC_IGNORE_RIGHTS)
2886 SOCKBUF_LOCK(&soa->so_rcv);
2887 unp_scan(soa->so_rcv.sb_mb, op);
2888 SOCKBUF_UNLOCK(&soa->so_rcv);
2892 * Mark all sockets we reference with RIGHTS.
2894 if ((unp->unp_gcflag & UNPGC_IGNORE_RIGHTS) == 0) {
2895 SOCKBUF_LOCK(&so->so_rcv);
2896 unp_scan(so->so_rcv.sb_mb, op);
2897 SOCKBUF_UNLOCK(&so->so_rcv);
2903 static int unp_recycled;
2904 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0,
2905 "Number of unreachable sockets claimed by the garbage collector.");
2907 static int unp_taskcount;
2908 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0,
2909 "Number of times the garbage collector has run.");
2911 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0,
2912 "Number of active local sockets.");
2915 unp_gc(__unused void *arg, int pending)
2917 struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead,
2919 struct unp_head **head;
2920 struct unp_head unp_deadhead; /* List of potentially-dead sockets. */
2921 struct file *f, **unref;
2922 struct unpcb *unp, *unptmp;
2923 int i, total, unp_unreachable;
2925 LIST_INIT(&unp_deadhead);
2929 * First determine which sockets may be in cycles.
2931 unp_unreachable = 0;
2933 for (head = heads; *head != NULL; head++)
2934 LIST_FOREACH(unp, *head, unp_link) {
2935 KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0,
2936 ("%s: unp %p has unexpected gc flags 0x%x",
2937 __func__, unp, (unsigned int)unp->unp_gcflag));
2942 * Check for an unreachable socket potentially in a
2943 * cycle. It must be in a queue as indicated by
2944 * msgcount, and this must equal the file reference
2945 * count. Note that when msgcount is 0 the file is
2948 if (f != NULL && unp->unp_msgcount != 0 &&
2949 refcount_load(&f->f_count) == unp->unp_msgcount) {
2950 LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead);
2951 unp->unp_gcflag |= UNPGC_DEAD;
2952 unp->unp_gcrefs = unp->unp_msgcount;
2958 * Scan all sockets previously marked as potentially being in a cycle
2959 * and remove the references each socket holds on any UNPGC_DEAD
2960 * sockets in its queue. After this step, all remaining references on
2961 * sockets marked UNPGC_DEAD should not be part of any cycle.
2963 LIST_FOREACH(unp, &unp_deadhead, unp_dead)
2964 unp_gc_scan(unp, unp_remove_dead_ref);
2967 * If a socket still has a non-negative refcount, it cannot be in a
2968 * cycle. In this case increment refcount of all children iteratively.
2969 * Stop the scan once we do a complete loop without discovering
2970 * a new reachable socket.
2974 LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp)
2975 if (unp->unp_gcrefs > 0) {
2976 unp->unp_gcflag &= ~UNPGC_DEAD;
2977 LIST_REMOVE(unp, unp_dead);
2978 KASSERT(unp_unreachable > 0,
2979 ("%s: unp_unreachable underflow.",
2982 unp_gc_scan(unp, unp_restore_undead_ref);
2984 } while (unp_marked);
2988 if (unp_unreachable == 0)
2992 * Allocate space for a local array of dead unpcbs.
2993 * TODO: can this path be simplified by instead using the local
2994 * dead list at unp_deadhead, after taking out references
2995 * on the file object and/or unpcb and dropping the link lock?
2997 unref = malloc(unp_unreachable * sizeof(struct file *),
3001 * Iterate looking for sockets which have been specifically marked
3002 * as unreachable and store them locally.
3006 LIST_FOREACH(unp, &unp_deadhead, unp_dead) {
3007 KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0,
3008 ("%s: unp %p not marked UNPGC_DEAD", __func__, unp));
3009 unp->unp_gcflag &= ~UNPGC_DEAD;
3011 if (unp->unp_msgcount == 0 || f == NULL ||
3012 refcount_load(&f->f_count) != unp->unp_msgcount ||
3016 KASSERT(total <= unp_unreachable,
3017 ("%s: incorrect unreachable count.", __func__));
3022 * Now flush all sockets, free'ing rights. This will free the
3023 * struct files associated with these sockets but leave each socket
3024 * with one remaining ref.
3026 for (i = 0; i < total; i++) {
3029 so = unref[i]->f_data;
3030 CURVNET_SET(so->so_vnet);
3036 * And finally release the sockets so they can be reclaimed.
3038 for (i = 0; i < total; i++)
3039 fdrop(unref[i], NULL);
3040 unp_recycled += total;
3041 free(unref, M_TEMP);
3045 * Synchronize against unp_gc, which can trip over data as we are freeing it.
3048 unp_dispose(struct socket *so)
3050 struct sockbuf *sb = &so->so_rcv;
3054 MPASS(!SOLISTENING(so));
3056 unp = sotounpcb(so);
3058 unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS;
3062 * Grab our special mbufs before calling sbrelease().
3064 SOCK_RECVBUF_LOCK(so);
3065 m = sbcut_locked(sb, sb->sb_ccc);
3066 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
3067 ("%s: ccc %u mb %p mbcnt %u", __func__,
3068 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
3069 sbrelease_locked(so, SO_RCV);
3070 SOCK_RECVBUF_UNLOCK(so);
3071 if (SOCK_IO_RECV_OWNED(so))
3072 SOCK_IO_RECV_UNLOCK(so);
3075 unp_scan(m, unp_freerights);
3081 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int))
3086 socklen_t clen, datalen;
3088 while (m0 != NULL) {
3089 for (m = m0; m; m = m->m_next) {
3090 if (m->m_type != MT_CONTROL)
3093 cm = mtod(m, struct cmsghdr *);
3096 while (cm != NULL) {
3097 if (sizeof(*cm) > clen || cm->cmsg_len > clen)
3100 data = CMSG_DATA(cm);
3101 datalen = (caddr_t)cm + cm->cmsg_len
3104 if (cm->cmsg_level == SOL_SOCKET &&
3105 cm->cmsg_type == SCM_RIGHTS) {
3106 (*op)(data, datalen /
3107 sizeof(struct filedescent *));
3110 if (CMSG_SPACE(datalen) < clen) {
3111 clen -= CMSG_SPACE(datalen);
3112 cm = (struct cmsghdr *)
3113 ((caddr_t)cm + CMSG_SPACE(datalen));
3125 * A helper function called by VFS before socket-type vnode reclamation.
3126 * For an active vnode it clears unp_vnode pointer and decrements unp_vnode
3130 vfs_unp_reclaim(struct vnode *vp)
3136 ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim");
3137 KASSERT(vp->v_type == VSOCK,
3138 ("vfs_unp_reclaim: vp->v_type != VSOCK"));
3141 vplock = mtx_pool_find(mtxpool_sleep, vp);
3143 VOP_UNP_CONNECT(vp, &unp);
3147 if (unp->unp_vnode == vp) {
3149 unp->unp_vnode = NULL;
3152 UNP_PCB_UNLOCK(unp);
3161 db_print_indent(int indent)
3165 for (i = 0; i < indent; i++)
3170 db_print_unpflags(int unp_flags)
3175 if (unp_flags & UNP_HAVEPC) {
3176 db_printf("%sUNP_HAVEPC", comma ? ", " : "");
3179 if (unp_flags & UNP_WANTCRED_ALWAYS) {
3180 db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : "");
3183 if (unp_flags & UNP_WANTCRED_ONESHOT) {
3184 db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : "");
3187 if (unp_flags & UNP_CONNWAIT) {
3188 db_printf("%sUNP_CONNWAIT", comma ? ", " : "");
3191 if (unp_flags & UNP_CONNECTING) {
3192 db_printf("%sUNP_CONNECTING", comma ? ", " : "");
3195 if (unp_flags & UNP_BINDING) {
3196 db_printf("%sUNP_BINDING", comma ? ", " : "");
3202 db_print_xucred(int indent, struct xucred *xu)
3206 db_print_indent(indent);
3207 db_printf("cr_version: %u cr_uid: %u cr_pid: %d cr_ngroups: %d\n",
3208 xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups);
3209 db_print_indent(indent);
3210 db_printf("cr_groups: ");
3212 for (i = 0; i < xu->cr_ngroups; i++) {
3213 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]);
3220 db_print_unprefs(int indent, struct unp_head *uh)
3226 LIST_FOREACH(unp, uh, unp_reflink) {
3227 if (counter % 4 == 0)
3228 db_print_indent(indent);
3229 db_printf("%p ", unp);
3230 if (counter % 4 == 3)
3234 if (counter != 0 && counter % 4 != 0)
3238 DB_SHOW_COMMAND(unpcb, db_show_unpcb)
3243 db_printf("usage: show unpcb <addr>\n");
3246 unp = (struct unpcb *)addr;
3248 db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket,
3251 db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino,
3254 db_printf("unp_refs:\n");
3255 db_print_unprefs(2, &unp->unp_refs);
3257 /* XXXRW: Would be nice to print the full address, if any. */
3258 db_printf("unp_addr: %p\n", unp->unp_addr);
3260 db_printf("unp_gencnt: %llu\n",
3261 (unsigned long long)unp->unp_gencnt);
3263 db_printf("unp_flags: %x (", unp->unp_flags);
3264 db_print_unpflags(unp->unp_flags);
3267 db_printf("unp_peercred:\n");
3268 db_print_xucred(2, &unp->unp_peercred);
3270 db_printf("unp_refcount: %u\n", unp->unp_refcount);