2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All Rights Reserved.
6 * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved.
7 * Copyright (c) 2018 Matthew Macy
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
37 * UNIX Domain (Local) Sockets
39 * This is an implementation of UNIX (local) domain sockets. Each socket has
40 * an associated struct unpcb (UNIX protocol control block). Stream sockets
41 * may be connected to 0 or 1 other socket. Datagram sockets may be
42 * connected to 0, 1, or many other sockets. Sockets may be created and
43 * connected in pairs (socketpair(2)), or bound/connected to using the file
44 * system name space. For most purposes, only the receive socket buffer is
45 * used, as sending on one socket delivers directly to the receive socket
46 * buffer of a second socket.
48 * The implementation is substantially complicated by the fact that
49 * "ancillary data", such as file descriptors or credentials, may be passed
50 * across UNIX domain sockets. The potential for passing UNIX domain sockets
51 * over other UNIX domain sockets requires the implementation of a simple
52 * garbage collector to find and tear down cycles of disconnected sockets.
56 * rethink name space problems
57 * need a proper out-of-band
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
65 #include <sys/param.h>
66 #include <sys/capsicum.h>
67 #include <sys/domain.h>
68 #include <sys/eventhandler.h>
69 #include <sys/fcntl.h>
71 #include <sys/filedesc.h>
72 #include <sys/kernel.h>
74 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/mutex.h>
78 #include <sys/namei.h>
80 #include <sys/protosw.h>
81 #include <sys/queue.h>
82 #include <sys/resourcevar.h>
83 #include <sys/rwlock.h>
84 #include <sys/socket.h>
85 #include <sys/socketvar.h>
86 #include <sys/signalvar.h>
89 #include <sys/sysctl.h>
90 #include <sys/systm.h>
91 #include <sys/taskqueue.h>
93 #include <sys/unpcb.h>
94 #include <sys/vnode.h>
102 #include <security/mac/mac_framework.h>
106 MALLOC_DECLARE(M_FILECAPS);
109 * See unpcb.h for the locking key.
112 static uma_zone_t unp_zone;
113 static unp_gen_t unp_gencnt; /* (l) */
114 static u_int unp_count; /* (l) Count of local sockets. */
115 static ino_t unp_ino; /* Prototype for fake inode numbers. */
116 static int unp_rights; /* (g) File descriptors in flight. */
117 static struct unp_head unp_shead; /* (l) List of stream sockets. */
118 static struct unp_head unp_dhead; /* (l) List of datagram sockets. */
119 static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */
122 SLIST_ENTRY(unp_defer) ud_link;
125 static SLIST_HEAD(, unp_defer) unp_defers;
126 static int unp_defers_count;
128 static const struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL };
131 * Garbage collection of cyclic file descriptor/socket references occurs
132 * asynchronously in a taskqueue context in order to avoid recursion and
133 * reentrance in the UNIX domain socket, file descriptor, and socket layer
134 * code. See unp_gc() for a full description.
136 static struct timeout_task unp_gc_task;
139 * The close of unix domain sockets attached as SCM_RIGHTS is
140 * postponed to the taskqueue, to avoid arbitrary recursion depth.
141 * The attached sockets might have another sockets attached.
143 static struct task unp_defer_task;
146 * Both send and receive buffers are allocated PIPSIZ bytes of buffering for
147 * stream sockets, although the total for sender and receiver is actually
150 * Datagram sockets really use the sendspace as the maximum datagram size,
151 * and don't really want to reserve the sendspace. Their recvspace should be
152 * large enough for at least one max-size datagram plus address.
157 static u_long unpst_sendspace = PIPSIZ;
158 static u_long unpst_recvspace = PIPSIZ;
159 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */
160 static u_long unpdg_recvspace = 4*1024;
161 static u_long unpsp_sendspace = PIPSIZ; /* really max datagram size */
162 static u_long unpsp_recvspace = PIPSIZ;
164 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
166 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream,
167 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
169 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram,
170 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
172 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket,
173 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
176 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
177 &unpst_sendspace, 0, "Default stream send space.");
178 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
179 &unpst_recvspace, 0, "Default stream receive space.");
180 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
181 &unpdg_sendspace, 0, "Default datagram send space.");
182 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
183 &unpdg_recvspace, 0, "Default datagram receive space.");
184 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW,
185 &unpsp_sendspace, 0, "Default seqpacket send space.");
186 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW,
187 &unpsp_recvspace, 0, "Default seqpacket receive space.");
188 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
189 "File descriptors in flight.");
190 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
191 &unp_defers_count, 0,
192 "File descriptors deferred to taskqueue for close.");
195 * Locking and synchronization:
197 * Several types of locks exist in the local domain socket implementation:
198 * - a global linkage lock
199 * - a global connection list lock
201 * - per-unpcb mutexes
203 * The linkage lock protects the global socket lists, the generation number
204 * counter and garbage collector state.
206 * The connection list lock protects the list of referring sockets in a datagram
207 * socket PCB. This lock is also overloaded to protect a global list of
208 * sockets whose buffers contain socket references in the form of SCM_RIGHTS
209 * messages. To avoid recursion, such references are released by a dedicated
212 * The mtxpool lock protects the vnode from being modified while referenced.
213 * Lock ordering rules require that it be acquired before any PCB locks.
215 * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the
216 * unpcb. This includes the unp_conn field, which either links two connected
217 * PCBs together (for connected socket types) or points at the destination
218 * socket (for connectionless socket types). The operations of creating or
219 * destroying a connection therefore involve locking multiple PCBs. To avoid
220 * lock order reversals, in some cases this involves dropping a PCB lock and
221 * using a reference counter to maintain liveness.
223 * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
224 * allocated in pru_attach() and freed in pru_detach(). The validity of that
225 * pointer is an invariant, so no lock is required to dereference the so_pcb
226 * pointer if a valid socket reference is held by the caller. In practice,
227 * this is always true during operations performed on a socket. Each unpcb
228 * has a back-pointer to its socket, unp_socket, which will be stable under
229 * the same circumstances.
231 * This pointer may only be safely dereferenced as long as a valid reference
232 * to the unpcb is held. Typically, this reference will be from the socket,
233 * or from another unpcb when the referring unpcb's lock is held (in order
234 * that the reference not be invalidated during use). For example, to follow
235 * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee
236 * that detach is not run clearing unp_socket.
238 * Blocking with UNIX domain sockets is a tricky issue: unlike most network
239 * protocols, bind() is a non-atomic operation, and connect() requires
240 * potential sleeping in the protocol, due to potentially waiting on local or
241 * distributed file systems. We try to separate "lookup" operations, which
242 * may sleep, and the IPC operations themselves, which typically can occur
243 * with relative atomicity as locks can be held over the entire operation.
245 * Another tricky issue is simultaneous multi-threaded or multi-process
246 * access to a single UNIX domain socket. These are handled by the flags
247 * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or
248 * binding, both of which involve dropping UNIX domain socket locks in order
249 * to perform namei() and other file system operations.
251 static struct rwlock unp_link_rwlock;
252 static struct mtx unp_defers_lock;
254 #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \
257 #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \
259 #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \
262 #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock)
263 #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock)
264 #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock)
265 #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock)
266 #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \
268 #define UNP_LINK_WOWNED() rw_wowned(&unp_link_rwlock)
270 #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \
271 "unp_defer", NULL, MTX_DEF)
272 #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock)
273 #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock)
275 #define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK();
276 #define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK();
278 #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \
281 #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx)
282 #define UNP_PCB_LOCKPTR(unp) (&(unp)->unp_mtx)
283 #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx)
284 #define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx)
285 #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx)
286 #define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx)
287 #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED)
288 #define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED)
290 static int uipc_connect2(struct socket *, struct socket *);
291 static int uipc_ctloutput(struct socket *, struct sockopt *);
292 static int unp_connect(struct socket *, struct sockaddr *,
294 static int unp_connectat(int, struct socket *, struct sockaddr *,
296 static int unp_connect2(struct socket *so, struct socket *so2, int);
297 static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2);
298 static void unp_dispose(struct socket *so);
299 static void unp_dispose_mbuf(struct mbuf *);
300 static void unp_shutdown(struct unpcb *);
301 static void unp_drop(struct unpcb *);
302 static void unp_gc(__unused void *, int);
303 static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int));
304 static void unp_discard(struct file *);
305 static void unp_freerights(struct filedescent **, int);
306 static void unp_init(void);
307 static int unp_internalize(struct mbuf **, struct thread *);
308 static void unp_internalize_fp(struct file *);
309 static int unp_externalize(struct mbuf *, struct mbuf **, int);
310 static int unp_externalize_fp(struct file *);
311 static struct mbuf *unp_addsockcred(struct thread *, struct mbuf *, int);
312 static void unp_process_defers(void * __unused, int);
315 unp_pcb_hold(struct unpcb *unp)
319 old = refcount_acquire(&unp->unp_refcount);
320 KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp));
323 static __result_use_check bool
324 unp_pcb_rele(struct unpcb *unp)
328 UNP_PCB_LOCK_ASSERT(unp);
330 if ((ret = refcount_release(&unp->unp_refcount))) {
332 UNP_PCB_LOCK_DESTROY(unp);
333 uma_zfree(unp_zone, unp);
339 unp_pcb_rele_notlast(struct unpcb *unp)
343 ret = refcount_release(&unp->unp_refcount);
344 KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp));
348 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2)
350 UNP_PCB_UNLOCK_ASSERT(unp);
351 UNP_PCB_UNLOCK_ASSERT(unp2);
355 } else if ((uintptr_t)unp2 > (uintptr_t)unp) {
365 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2)
369 UNP_PCB_UNLOCK(unp2);
373 * Try to lock the connected peer of an already locked socket. In some cases
374 * this requires that we unlock the current socket. The pairbusy counter is
375 * used to block concurrent connection attempts while the lock is dropped. The
376 * caller must be careful to revalidate PCB state.
378 static struct unpcb *
379 unp_pcb_lock_peer(struct unpcb *unp)
383 UNP_PCB_LOCK_ASSERT(unp);
384 unp2 = unp->unp_conn;
387 if (__predict_false(unp == unp2))
390 UNP_PCB_UNLOCK_ASSERT(unp2);
392 if (__predict_true(UNP_PCB_TRYLOCK(unp2)))
394 if ((uintptr_t)unp2 > (uintptr_t)unp) {
404 KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL,
405 ("%s: socket %p was reconnected", __func__, unp));
406 if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) {
407 unp->unp_flags &= ~UNP_WAITING;
410 if (unp_pcb_rele(unp2)) {
411 /* unp2 is unlocked. */
414 if (unp->unp_conn == NULL) {
415 UNP_PCB_UNLOCK(unp2);
422 * Definitions of protocols supported in the LOCAL domain.
424 static struct domain localdomain;
425 static struct pr_usrreqs uipc_usrreqs_dgram, uipc_usrreqs_stream;
426 static struct pr_usrreqs uipc_usrreqs_seqpacket;
427 static struct protosw localsw[] = {
429 .pr_type = SOCK_STREAM,
430 .pr_domain = &localdomain,
431 .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS|
433 .pr_ctloutput = &uipc_ctloutput,
434 .pr_usrreqs = &uipc_usrreqs_stream
437 .pr_type = SOCK_DGRAM,
438 .pr_domain = &localdomain,
439 .pr_flags = PR_ATOMIC|PR_ADDR|PR_RIGHTS|PR_CAPATTACH,
440 .pr_ctloutput = &uipc_ctloutput,
441 .pr_usrreqs = &uipc_usrreqs_dgram
444 .pr_type = SOCK_SEQPACKET,
445 .pr_domain = &localdomain,
448 * XXXRW: For now, PR_ADDR because soreceive will bump into them
449 * due to our use of sbappendaddr. A new sbappend variants is needed
450 * that supports both atomic record writes and control data.
452 .pr_flags = PR_ADDR|PR_ATOMIC|PR_CONNREQUIRED|
453 PR_WANTRCVD|PR_RIGHTS|PR_CAPATTACH,
454 .pr_ctloutput = &uipc_ctloutput,
455 .pr_usrreqs = &uipc_usrreqs_seqpacket,
459 static struct domain localdomain = {
460 .dom_family = AF_LOCAL,
462 .dom_init = unp_init,
463 .dom_externalize = unp_externalize,
464 .dom_dispose = unp_dispose,
465 .dom_protosw = localsw,
466 .dom_protoswNPROTOSW = &localsw[nitems(localsw)]
471 uipc_abort(struct socket *so)
473 struct unpcb *unp, *unp2;
476 KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
477 UNP_PCB_UNLOCK_ASSERT(unp);
480 unp2 = unp->unp_conn;
490 uipc_accept(struct socket *so, struct sockaddr **nam)
492 struct unpcb *unp, *unp2;
493 const struct sockaddr *sa;
496 * Pass back name of connected socket, if it was bound and we are
497 * still connected (our peer may have closed already!).
500 KASSERT(unp != NULL, ("uipc_accept: unp == NULL"));
502 *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
504 unp2 = unp_pcb_lock_peer(unp);
505 if (unp2 != NULL && unp2->unp_addr != NULL)
506 sa = (struct sockaddr *)unp2->unp_addr;
509 bcopy(sa, *nam, sa->sa_len);
511 unp_pcb_unlock_pair(unp, unp2);
518 uipc_attach(struct socket *so, int proto, struct thread *td)
520 u_long sendspace, recvspace;
525 KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL"));
526 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
527 switch (so->so_type) {
529 sendspace = unpst_sendspace;
530 recvspace = unpst_recvspace;
534 sendspace = unpdg_sendspace;
535 recvspace = unpdg_recvspace;
539 sendspace = unpsp_sendspace;
540 recvspace = unpsp_recvspace;
544 panic("uipc_attach");
546 error = soreserve(so, sendspace, recvspace);
550 unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO);
553 LIST_INIT(&unp->unp_refs);
554 UNP_PCB_LOCK_INIT(unp);
555 unp->unp_socket = so;
557 refcount_init(&unp->unp_refcount, 1);
559 if ((locked = UNP_LINK_WOWNED()) == false)
562 unp->unp_gencnt = ++unp_gencnt;
563 unp->unp_ino = ++unp_ino;
565 switch (so->so_type) {
567 LIST_INSERT_HEAD(&unp_shead, unp, unp_link);
571 LIST_INSERT_HEAD(&unp_dhead, unp, unp_link);
575 LIST_INSERT_HEAD(&unp_sphead, unp, unp_link);
579 panic("uipc_attach");
589 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
591 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
601 if (nam->sa_family != AF_UNIX)
602 return (EAFNOSUPPORT);
605 KASSERT(unp != NULL, ("uipc_bind: unp == NULL"));
607 if (soun->sun_len > sizeof(struct sockaddr_un))
609 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
614 * We don't allow simultaneous bind() calls on a single UNIX domain
615 * socket, so flag in-progress operations, and return an error if an
616 * operation is already in progress.
618 * Historically, we have not allowed a socket to be rebound, so this
619 * also returns an error. Not allowing re-binding simplifies the
620 * implementation and avoids a great many possible failure modes.
623 if (unp->unp_vnode != NULL) {
627 if (unp->unp_flags & UNP_BINDING) {
631 unp->unp_flags |= UNP_BINDING;
634 buf = malloc(namelen + 1, M_TEMP, M_WAITOK);
635 bcopy(soun->sun_path, buf, namelen);
639 NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | SAVENAME | NOCACHE,
640 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT),
642 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
647 if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
648 NDFREE(&nd, NDF_ONLY_PNBUF);
658 error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH);
664 vattr.va_type = VSOCK;
665 vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_pd->pd_cmask);
667 error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd,
671 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
672 NDFREE(&nd, NDF_ONLY_PNBUF);
674 VOP_VPUT_PAIR(nd.ni_dvp, NULL, true);
675 vn_finished_write(mp);
676 if (error == ERELOOKUP)
681 ASSERT_VOP_ELOCKED(vp, "uipc_bind");
682 soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
685 VOP_UNP_BIND(vp, unp);
687 unp->unp_addr = soun;
688 unp->unp_flags &= ~UNP_BINDING;
691 VOP_VPUT_PAIR(nd.ni_dvp, &vp, true);
692 vn_finished_write(mp);
698 unp->unp_flags &= ~UNP_BINDING;
705 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
708 return (uipc_bindat(AT_FDCWD, so, nam, td));
712 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
716 KASSERT(td == curthread, ("uipc_connect: td != curthread"));
717 error = unp_connect(so, nam, td);
722 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam,
727 KASSERT(td == curthread, ("uipc_connectat: td != curthread"));
728 error = unp_connectat(fd, so, nam, td);
733 uipc_close(struct socket *so)
735 struct unpcb *unp, *unp2;
736 struct vnode *vp = NULL;
740 KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
743 if ((vp = unp->unp_vnode) != NULL) {
744 vplock = mtx_pool_find(mtxpool_sleep, vp);
748 if (vp && unp->unp_vnode == NULL) {
754 unp->unp_vnode = NULL;
756 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
757 unp_disconnect(unp, unp2);
767 uipc_connect2(struct socket *so1, struct socket *so2)
769 struct unpcb *unp, *unp2;
773 KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
775 KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
776 unp_pcb_lock_pair(unp, unp2);
777 error = unp_connect2(so1, so2, PRU_CONNECT2);
778 unp_pcb_unlock_pair(unp, unp2);
783 uipc_detach(struct socket *so)
785 struct unpcb *unp, *unp2;
788 int local_unp_rights;
791 KASSERT(unp != NULL, ("uipc_detach: unp == NULL"));
797 if (!SOLISTENING(so)) {
799 * Once the socket is removed from the global lists,
800 * uipc_ready() will not be able to locate its socket buffer, so
801 * clear the buffer now. At this point internalized rights have
802 * already been disposed of.
804 sbrelease(&so->so_rcv, so);
809 LIST_REMOVE(unp, unp_link);
810 if (unp->unp_gcflag & UNPGC_DEAD)
811 LIST_REMOVE(unp, unp_dead);
812 unp->unp_gencnt = ++unp_gencnt;
816 UNP_PCB_UNLOCK_ASSERT(unp);
818 if ((vp = unp->unp_vnode) != NULL) {
819 vplock = mtx_pool_find(mtxpool_sleep, vp);
823 if (unp->unp_vnode != vp && unp->unp_vnode != NULL) {
829 if ((vp = unp->unp_vnode) != NULL) {
831 unp->unp_vnode = NULL;
833 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
834 unp_disconnect(unp, unp2);
839 while (!LIST_EMPTY(&unp->unp_refs)) {
840 struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
843 UNP_REF_LIST_UNLOCK();
846 UNP_PCB_UNLOCK_ASSERT(ref);
850 UNP_REF_LIST_UNLOCK();
853 local_unp_rights = unp_rights;
854 unp->unp_socket->so_pcb = NULL;
855 unp->unp_socket = NULL;
856 free(unp->unp_addr, M_SONAME);
857 unp->unp_addr = NULL;
858 if (!unp_pcb_rele(unp))
864 if (local_unp_rights)
865 taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1);
869 uipc_disconnect(struct socket *so)
871 struct unpcb *unp, *unp2;
874 KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
877 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL)
878 unp_disconnect(unp, unp2);
885 uipc_listen(struct socket *so, int backlog, struct thread *td)
890 if (so->so_type != SOCK_STREAM && so->so_type != SOCK_SEQPACKET)
894 KASSERT(unp != NULL, ("uipc_listen: unp == NULL"));
897 if (unp->unp_vnode == NULL) {
898 /* Already connected or not bound to an address. */
899 error = unp->unp_conn != NULL ? EINVAL : EDESTADDRREQ;
905 error = solisten_proto_check(so);
907 cru2xt(td, &unp->unp_peercred);
908 solisten_proto(so, backlog);
916 uipc_peeraddr(struct socket *so, struct sockaddr **nam)
918 struct unpcb *unp, *unp2;
919 const struct sockaddr *sa;
922 KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL"));
924 *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
927 * XXX: It seems that this test always fails even when connection is
928 * established. So, this else clause is added as workaround to
929 * return PF_LOCAL sockaddr.
931 unp2 = unp->unp_conn;
934 if (unp2->unp_addr != NULL)
935 sa = (struct sockaddr *) unp2->unp_addr;
938 bcopy(sa, *nam, sa->sa_len);
939 UNP_PCB_UNLOCK(unp2);
942 bcopy(sa, *nam, sa->sa_len);
949 uipc_rcvd(struct socket *so, int flags)
951 struct unpcb *unp, *unp2;
956 KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
957 KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET,
958 ("%s: socktype %d", __func__, so->so_type));
961 * Adjust backpressure on sender and wakeup any waiting to write.
963 * The unp lock is acquired to maintain the validity of the unp_conn
964 * pointer; no lock on unp2 is required as unp2->unp_socket will be
965 * static as long as we don't permit unp2 to disconnect from unp,
966 * which is prevented by the lock on unp. We cache values from
967 * so_rcv to avoid holding the so_rcv lock over the entire
968 * transaction on the remote so_snd.
970 SOCKBUF_LOCK(&so->so_rcv);
971 mbcnt = so->so_rcv.sb_mbcnt;
972 sbcc = sbavail(&so->so_rcv);
973 SOCKBUF_UNLOCK(&so->so_rcv);
975 * There is a benign race condition at this point. If we're planning to
976 * clear SB_STOP, but uipc_send is called on the connected socket at
977 * this instant, it might add data to the sockbuf and set SB_STOP. Then
978 * we would erroneously clear SB_STOP below, even though the sockbuf is
979 * full. The race is benign because the only ill effect is to allow the
980 * sockbuf to exceed its size limit, and the size limits are not
981 * strictly guaranteed anyway.
984 unp2 = unp->unp_conn;
989 so2 = unp2->unp_socket;
990 SOCKBUF_LOCK(&so2->so_snd);
991 if (sbcc < so2->so_snd.sb_hiwat && mbcnt < so2->so_snd.sb_mbmax)
992 so2->so_snd.sb_flags &= ~SB_STOP;
993 sowwakeup_locked(so2);
999 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1000 struct mbuf *control, struct thread *td)
1002 struct unpcb *unp, *unp2;
1007 unp = sotounpcb(so);
1008 KASSERT(unp != NULL, ("%s: unp == NULL", __func__));
1009 KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_DGRAM ||
1010 so->so_type == SOCK_SEQPACKET,
1011 ("%s: socktype %d", __func__, so->so_type));
1014 if (flags & PRUS_OOB) {
1018 if (control != NULL && (error = unp_internalize(&control, td)))
1022 switch (so->so_type) {
1025 const struct sockaddr *from;
1028 error = unp_connect(so, nam, td);
1035 * Because connect() and send() are non-atomic in a sendto()
1036 * with a target address, it's possible that the socket will
1037 * have disconnected before the send() can run. In that case
1038 * return the slightly counter-intuitive but otherwise
1039 * correct error that the socket is not connected.
1041 unp2 = unp_pcb_lock_peer(unp);
1043 UNP_PCB_UNLOCK(unp);
1048 if (unp2->unp_flags & UNP_WANTCRED_MASK)
1049 control = unp_addsockcred(td, control,
1051 if (unp->unp_addr != NULL)
1052 from = (struct sockaddr *)unp->unp_addr;
1055 so2 = unp2->unp_socket;
1056 SOCKBUF_LOCK(&so2->so_rcv);
1057 if (sbappendaddr_locked(&so2->so_rcv, from, m,
1059 sorwakeup_locked(so2);
1063 SOCKBUF_UNLOCK(&so2->so_rcv);
1067 unp_disconnect(unp, unp2);
1069 unp_pcb_unlock_pair(unp, unp2);
1073 case SOCK_SEQPACKET:
1075 if ((so->so_state & SS_ISCONNECTED) == 0) {
1077 error = unp_connect(so, nam, td);
1087 if ((unp2 = unp_pcb_lock_peer(unp)) == NULL) {
1088 UNP_PCB_UNLOCK(unp);
1091 } else if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1092 unp_pcb_unlock_pair(unp, unp2);
1096 UNP_PCB_UNLOCK(unp);
1097 if ((so2 = unp2->unp_socket) == NULL) {
1098 UNP_PCB_UNLOCK(unp2);
1102 SOCKBUF_LOCK(&so2->so_rcv);
1103 if (unp2->unp_flags & UNP_WANTCRED_MASK) {
1105 * Credentials are passed only once on SOCK_STREAM and
1106 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or
1107 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS).
1109 control = unp_addsockcred(td, control, unp2->unp_flags);
1110 unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT;
1114 * Send to paired receive port and wake up readers. Don't
1115 * check for space available in the receive buffer if we're
1116 * attaching ancillary data; Unix domain sockets only check
1117 * for space in the sending sockbuf, and that check is
1118 * performed one level up the stack. At that level we cannot
1119 * precisely account for the amount of buffer space used
1120 * (e.g., because control messages are not yet internalized).
1122 switch (so->so_type) {
1124 if (control != NULL) {
1125 sbappendcontrol_locked(&so2->so_rcv, m,
1129 sbappend_locked(&so2->so_rcv, m, flags);
1132 case SOCK_SEQPACKET:
1133 if (sbappendaddr_nospacecheck_locked(&so2->so_rcv,
1134 &sun_noname, m, control))
1139 mbcnt = so2->so_rcv.sb_mbcnt;
1140 sbcc = sbavail(&so2->so_rcv);
1142 sorwakeup_locked(so2);
1144 SOCKBUF_UNLOCK(&so2->so_rcv);
1147 * The PCB lock on unp2 protects the SB_STOP flag. Without it,
1148 * it would be possible for uipc_rcvd to be called at this
1149 * point, drain the receiving sockbuf, clear SB_STOP, and then
1150 * we would set SB_STOP below. That could lead to an empty
1151 * sockbuf having SB_STOP set
1153 SOCKBUF_LOCK(&so->so_snd);
1154 if (sbcc >= so->so_snd.sb_hiwat || mbcnt >= so->so_snd.sb_mbmax)
1155 so->so_snd.sb_flags |= SB_STOP;
1156 SOCKBUF_UNLOCK(&so->so_snd);
1157 UNP_PCB_UNLOCK(unp2);
1163 * PRUS_EOF is equivalent to pru_send followed by pru_shutdown.
1165 if (flags & PRUS_EOF) {
1169 UNP_PCB_UNLOCK(unp);
1171 if (control != NULL && error != 0)
1172 unp_dispose_mbuf(control);
1175 if (control != NULL)
1178 * In case of PRUS_NOTREADY, uipc_ready() is responsible
1179 * for freeing memory.
1181 if (m != NULL && (flags & PRUS_NOTREADY) == 0)
1187 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp)
1189 struct mbuf *mb, *n;
1193 if (SOLISTENING(so)) {
1200 if (sb->sb_fnrdy != NULL) {
1201 for (mb = sb->sb_mb, n = mb->m_nextpkt; mb != NULL;) {
1203 *errorp = sbready(sb, m, count);
1216 return (mb != NULL);
1220 uipc_ready(struct socket *so, struct mbuf *m, int count)
1222 struct unpcb *unp, *unp2;
1226 unp = sotounpcb(so);
1228 KASSERT(so->so_type == SOCK_STREAM,
1229 ("%s: unexpected socket type for %p", __func__, so));
1232 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
1233 UNP_PCB_UNLOCK(unp);
1234 so2 = unp2->unp_socket;
1235 SOCKBUF_LOCK(&so2->so_rcv);
1236 if ((error = sbready(&so2->so_rcv, m, count)) == 0)
1237 sorwakeup_locked(so2);
1239 SOCKBUF_UNLOCK(&so2->so_rcv);
1240 UNP_PCB_UNLOCK(unp2);
1243 UNP_PCB_UNLOCK(unp);
1246 * The receiving socket has been disconnected, but may still be valid.
1247 * In this case, the now-ready mbufs are still present in its socket
1248 * buffer, so perform an exhaustive search before giving up and freeing
1252 LIST_FOREACH(unp, &unp_shead, unp_link) {
1253 if (uipc_ready_scan(unp->unp_socket, m, count, &error))
1259 for (i = 0; i < count; i++)
1267 uipc_sense(struct socket *so, struct stat *sb)
1271 unp = sotounpcb(so);
1272 KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
1274 sb->st_blksize = so->so_snd.sb_hiwat;
1276 sb->st_ino = unp->unp_ino;
1281 uipc_shutdown(struct socket *so)
1285 unp = sotounpcb(so);
1286 KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL"));
1291 UNP_PCB_UNLOCK(unp);
1296 uipc_sockaddr(struct socket *so, struct sockaddr **nam)
1299 const struct sockaddr *sa;
1301 unp = sotounpcb(so);
1302 KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL"));
1304 *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1306 if (unp->unp_addr != NULL)
1307 sa = (struct sockaddr *) unp->unp_addr;
1310 bcopy(sa, *nam, sa->sa_len);
1311 UNP_PCB_UNLOCK(unp);
1315 static struct pr_usrreqs uipc_usrreqs_dgram = {
1316 .pru_abort = uipc_abort,
1317 .pru_accept = uipc_accept,
1318 .pru_attach = uipc_attach,
1319 .pru_bind = uipc_bind,
1320 .pru_bindat = uipc_bindat,
1321 .pru_connect = uipc_connect,
1322 .pru_connectat = uipc_connectat,
1323 .pru_connect2 = uipc_connect2,
1324 .pru_detach = uipc_detach,
1325 .pru_disconnect = uipc_disconnect,
1326 .pru_listen = uipc_listen,
1327 .pru_peeraddr = uipc_peeraddr,
1328 .pru_rcvd = uipc_rcvd,
1329 .pru_send = uipc_send,
1330 .pru_sense = uipc_sense,
1331 .pru_shutdown = uipc_shutdown,
1332 .pru_sockaddr = uipc_sockaddr,
1333 .pru_soreceive = soreceive_dgram,
1334 .pru_close = uipc_close,
1337 static struct pr_usrreqs uipc_usrreqs_seqpacket = {
1338 .pru_abort = uipc_abort,
1339 .pru_accept = uipc_accept,
1340 .pru_attach = uipc_attach,
1341 .pru_bind = uipc_bind,
1342 .pru_bindat = uipc_bindat,
1343 .pru_connect = uipc_connect,
1344 .pru_connectat = uipc_connectat,
1345 .pru_connect2 = uipc_connect2,
1346 .pru_detach = uipc_detach,
1347 .pru_disconnect = uipc_disconnect,
1348 .pru_listen = uipc_listen,
1349 .pru_peeraddr = uipc_peeraddr,
1350 .pru_rcvd = uipc_rcvd,
1351 .pru_send = uipc_send,
1352 .pru_sense = uipc_sense,
1353 .pru_shutdown = uipc_shutdown,
1354 .pru_sockaddr = uipc_sockaddr,
1355 .pru_soreceive = soreceive_generic, /* XXX: or...? */
1356 .pru_close = uipc_close,
1359 static struct pr_usrreqs uipc_usrreqs_stream = {
1360 .pru_abort = uipc_abort,
1361 .pru_accept = uipc_accept,
1362 .pru_attach = uipc_attach,
1363 .pru_bind = uipc_bind,
1364 .pru_bindat = uipc_bindat,
1365 .pru_connect = uipc_connect,
1366 .pru_connectat = uipc_connectat,
1367 .pru_connect2 = uipc_connect2,
1368 .pru_detach = uipc_detach,
1369 .pru_disconnect = uipc_disconnect,
1370 .pru_listen = uipc_listen,
1371 .pru_peeraddr = uipc_peeraddr,
1372 .pru_rcvd = uipc_rcvd,
1373 .pru_send = uipc_send,
1374 .pru_ready = uipc_ready,
1375 .pru_sense = uipc_sense,
1376 .pru_shutdown = uipc_shutdown,
1377 .pru_sockaddr = uipc_sockaddr,
1378 .pru_soreceive = soreceive_generic,
1379 .pru_close = uipc_close,
1383 uipc_ctloutput(struct socket *so, struct sockopt *sopt)
1389 if (sopt->sopt_level != SOL_LOCAL)
1392 unp = sotounpcb(so);
1393 KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL"));
1395 switch (sopt->sopt_dir) {
1397 switch (sopt->sopt_name) {
1398 case LOCAL_PEERCRED:
1400 if (unp->unp_flags & UNP_HAVEPC)
1401 xu = unp->unp_peercred;
1403 if (so->so_type == SOCK_STREAM)
1408 UNP_PCB_UNLOCK(unp);
1410 error = sooptcopyout(sopt, &xu, sizeof(xu));
1414 /* Unlocked read. */
1415 optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0;
1416 error = sooptcopyout(sopt, &optval, sizeof(optval));
1419 case LOCAL_CREDS_PERSISTENT:
1420 /* Unlocked read. */
1421 optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0;
1422 error = sooptcopyout(sopt, &optval, sizeof(optval));
1425 case LOCAL_CONNWAIT:
1426 /* Unlocked read. */
1427 optval = unp->unp_flags & UNP_CONNWAIT ? 1 : 0;
1428 error = sooptcopyout(sopt, &optval, sizeof(optval));
1438 switch (sopt->sopt_name) {
1440 case LOCAL_CREDS_PERSISTENT:
1441 case LOCAL_CONNWAIT:
1442 error = sooptcopyin(sopt, &optval, sizeof(optval),
1447 #define OPTSET(bit, exclusive) do { \
1448 UNP_PCB_LOCK(unp); \
1450 if ((unp->unp_flags & (exclusive)) != 0) { \
1451 UNP_PCB_UNLOCK(unp); \
1455 unp->unp_flags |= (bit); \
1457 unp->unp_flags &= ~(bit); \
1458 UNP_PCB_UNLOCK(unp); \
1461 switch (sopt->sopt_name) {
1463 OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS);
1466 case LOCAL_CREDS_PERSISTENT:
1467 OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT);
1470 case LOCAL_CONNWAIT:
1471 OPTSET(UNP_CONNWAIT, 0);
1480 error = ENOPROTOOPT;
1493 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1496 return (unp_connectat(AT_FDCWD, so, nam, td));
1500 unp_connectat(int fd, struct socket *so, struct sockaddr *nam,
1504 struct sockaddr_un *soun;
1507 struct unpcb *unp, *unp2, *unp3;
1508 struct nameidata nd;
1509 char buf[SOCK_MAXADDRLEN];
1510 struct sockaddr *sa;
1511 cap_rights_t rights;
1515 if (nam->sa_family != AF_UNIX)
1516 return (EAFNOSUPPORT);
1517 if (nam->sa_len > sizeof(struct sockaddr_un))
1519 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
1522 soun = (struct sockaddr_un *)nam;
1523 bcopy(soun->sun_path, buf, len);
1526 unp = sotounpcb(so);
1530 * Wait for connection state to stabilize. If a connection
1531 * already exists, give up. For datagram sockets, which permit
1532 * multiple consecutive connect(2) calls, upper layers are
1533 * responsible for disconnecting in advance of a subsequent
1534 * connect(2), but this is not synchronized with PCB connection
1537 * Also make sure that no threads are currently attempting to
1538 * lock the peer socket, to ensure that unp_conn cannot
1539 * transition between two valid sockets while locks are dropped.
1541 if (unp->unp_conn != NULL) {
1542 UNP_PCB_UNLOCK(unp);
1545 if ((unp->unp_flags & UNP_CONNECTING) != 0) {
1546 UNP_PCB_UNLOCK(unp);
1549 if (unp->unp_pairbusy > 0) {
1550 unp->unp_flags |= UNP_WAITING;
1551 mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0);
1556 unp->unp_flags |= UNP_CONNECTING;
1557 UNP_PCB_UNLOCK(unp);
1559 connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0;
1561 sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1564 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF,
1565 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT),
1572 ASSERT_VOP_LOCKED(vp, "unp_connect");
1573 NDFREE_NOTHING(&nd);
1577 if (vp->v_type != VSOCK) {
1582 error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD);
1586 error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td);
1590 unp = sotounpcb(so);
1591 KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
1593 vplock = mtx_pool_find(mtxpool_sleep, vp);
1595 VOP_UNP_CONNECT(vp, &unp2);
1597 error = ECONNREFUSED;
1600 so2 = unp2->unp_socket;
1601 if (so->so_type != so2->so_type) {
1606 if (SOLISTENING(so2)) {
1607 CURVNET_SET(so2->so_vnet);
1608 so2 = sonewconn(so2, 0);
1613 error = ECONNREFUSED;
1616 unp3 = sotounpcb(so2);
1617 unp_pcb_lock_pair(unp2, unp3);
1618 if (unp2->unp_addr != NULL) {
1619 bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
1620 unp3->unp_addr = (struct sockaddr_un *) sa;
1624 unp_copy_peercred(td, unp3, unp, unp2);
1626 UNP_PCB_UNLOCK(unp2);
1630 * It is safe to block on the PCB lock here since unp2 is
1631 * nascent and cannot be connected to any other sockets.
1635 mac_socketpeer_set_from_socket(so, so2);
1636 mac_socketpeer_set_from_socket(so2, so);
1639 unp_pcb_lock_pair(unp, unp2);
1641 KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 &&
1642 sotounpcb(so2) == unp2,
1643 ("%s: unp2 %p so2 %p", __func__, unp2, so2));
1644 error = unp_connect2(so, so2, PRU_CONNECT);
1645 unp_pcb_unlock_pair(unp, unp2);
1654 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0,
1655 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp));
1656 unp->unp_flags &= ~UNP_CONNECTING;
1657 UNP_PCB_UNLOCK(unp);
1662 * Set socket peer credentials at connection time.
1664 * The client's PCB credentials are copied from its process structure. The
1665 * server's PCB credentials are copied from the socket on which it called
1666 * listen(2). uipc_listen cached that process's credentials at the time.
1669 unp_copy_peercred(struct thread *td, struct unpcb *client_unp,
1670 struct unpcb *server_unp, struct unpcb *listen_unp)
1672 cru2xt(td, &client_unp->unp_peercred);
1673 client_unp->unp_flags |= UNP_HAVEPC;
1675 memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred,
1676 sizeof(server_unp->unp_peercred));
1677 server_unp->unp_flags |= UNP_HAVEPC;
1678 client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK);
1682 unp_connect2(struct socket *so, struct socket *so2, int req)
1687 unp = sotounpcb(so);
1688 KASSERT(unp != NULL, ("unp_connect2: unp == NULL"));
1689 unp2 = sotounpcb(so2);
1690 KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
1692 UNP_PCB_LOCK_ASSERT(unp);
1693 UNP_PCB_LOCK_ASSERT(unp2);
1694 KASSERT(unp->unp_conn == NULL,
1695 ("%s: socket %p is already connected", __func__, unp));
1697 if (so2->so_type != so->so_type)
1698 return (EPROTOTYPE);
1699 unp->unp_conn = unp2;
1702 switch (so->so_type) {
1704 UNP_REF_LIST_LOCK();
1705 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
1706 UNP_REF_LIST_UNLOCK();
1711 case SOCK_SEQPACKET:
1712 KASSERT(unp2->unp_conn == NULL,
1713 ("%s: socket %p is already connected", __func__, unp2));
1714 unp2->unp_conn = unp;
1715 if (req == PRU_CONNECT &&
1716 ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT))
1724 panic("unp_connect2");
1730 unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
1732 struct socket *so, *so2;
1734 struct unpcb *unptmp;
1737 UNP_PCB_LOCK_ASSERT(unp);
1738 UNP_PCB_LOCK_ASSERT(unp2);
1739 KASSERT(unp->unp_conn == unp2,
1740 ("%s: unpcb %p is not connected to %p", __func__, unp, unp2));
1742 unp->unp_conn = NULL;
1743 so = unp->unp_socket;
1744 so2 = unp2->unp_socket;
1745 switch (unp->unp_socket->so_type) {
1747 UNP_REF_LIST_LOCK();
1749 LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) {
1753 KASSERT(unptmp != NULL,
1754 ("%s: %p not found in reflist of %p", __func__, unp, unp2));
1756 LIST_REMOVE(unp, unp_reflink);
1757 UNP_REF_LIST_UNLOCK();
1760 so->so_state &= ~SS_ISCONNECTED;
1766 case SOCK_SEQPACKET:
1768 soisdisconnected(so);
1769 MPASS(unp2->unp_conn == unp);
1770 unp2->unp_conn = NULL;
1772 soisdisconnected(so2);
1777 unp_pcb_rele_notlast(unp);
1778 if (!unp_pcb_rele(unp))
1779 UNP_PCB_UNLOCK(unp);
1781 if (!unp_pcb_rele(unp))
1782 UNP_PCB_UNLOCK(unp);
1783 if (!unp_pcb_rele(unp2))
1784 UNP_PCB_UNLOCK(unp2);
1789 * unp_pcblist() walks the global list of struct unpcb's to generate a
1790 * pointer list, bumping the refcount on each unpcb. It then copies them out
1791 * sequentially, validating the generation number on each to see if it has
1792 * been detached. All of this is necessary because copyout() may sleep on
1796 unp_pcblist(SYSCTL_HANDLER_ARGS)
1798 struct unpcb *unp, **unp_list;
1800 struct xunpgen *xug;
1801 struct unp_head *head;
1806 switch ((intptr_t)arg1) {
1815 case SOCK_SEQPACKET:
1820 panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1);
1824 * The process of preparing the PCB list is too time-consuming and
1825 * resource-intensive to repeat twice on every request.
1827 if (req->oldptr == NULL) {
1829 req->oldidx = 2 * (sizeof *xug)
1830 + (n + n/8) * sizeof(struct xunpcb);
1834 if (req->newptr != NULL)
1838 * OK, now we're committed to doing something.
1840 xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO);
1842 gencnt = unp_gencnt;
1846 xug->xug_len = sizeof *xug;
1848 xug->xug_gen = gencnt;
1849 xug->xug_sogen = so_gencnt;
1850 error = SYSCTL_OUT(req, xug, sizeof *xug);
1856 unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
1859 for (unp = LIST_FIRST(head), i = 0; unp && i < n;
1860 unp = LIST_NEXT(unp, unp_link)) {
1862 if (unp->unp_gencnt <= gencnt) {
1863 if (cr_cansee(req->td->td_ucred,
1864 unp->unp_socket->so_cred)) {
1865 UNP_PCB_UNLOCK(unp);
1868 unp_list[i++] = unp;
1871 UNP_PCB_UNLOCK(unp);
1874 n = i; /* In case we lost some during malloc. */
1877 xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO);
1878 for (i = 0; i < n; i++) {
1881 if (unp_pcb_rele(unp))
1884 if (unp->unp_gencnt <= gencnt) {
1885 xu->xu_len = sizeof *xu;
1886 xu->xu_unpp = (uintptr_t)unp;
1888 * XXX - need more locking here to protect against
1889 * connect/disconnect races for SMP.
1891 if (unp->unp_addr != NULL)
1892 bcopy(unp->unp_addr, &xu->xu_addr,
1893 unp->unp_addr->sun_len);
1895 bzero(&xu->xu_addr, sizeof(xu->xu_addr));
1896 if (unp->unp_conn != NULL &&
1897 unp->unp_conn->unp_addr != NULL)
1898 bcopy(unp->unp_conn->unp_addr,
1900 unp->unp_conn->unp_addr->sun_len);
1902 bzero(&xu->xu_caddr, sizeof(xu->xu_caddr));
1903 xu->unp_vnode = (uintptr_t)unp->unp_vnode;
1904 xu->unp_conn = (uintptr_t)unp->unp_conn;
1905 xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs);
1906 xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink);
1907 xu->unp_gencnt = unp->unp_gencnt;
1908 sotoxsocket(unp->unp_socket, &xu->xu_socket);
1909 UNP_PCB_UNLOCK(unp);
1910 error = SYSCTL_OUT(req, xu, sizeof *xu);
1912 UNP_PCB_UNLOCK(unp);
1918 * Give the user an updated idea of our state. If the
1919 * generation differs from what we told her before, she knows
1920 * that something happened while we were processing this
1921 * request, and it might be necessary to retry.
1923 xug->xug_gen = unp_gencnt;
1924 xug->xug_sogen = so_gencnt;
1925 xug->xug_count = unp_count;
1926 error = SYSCTL_OUT(req, xug, sizeof *xug);
1928 free(unp_list, M_TEMP);
1933 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist,
1934 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
1935 (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
1936 "List of active local datagram sockets");
1937 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist,
1938 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
1939 (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
1940 "List of active local stream sockets");
1941 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist,
1942 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
1943 (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
1944 "List of active local seqpacket sockets");
1947 unp_shutdown(struct unpcb *unp)
1952 UNP_PCB_LOCK_ASSERT(unp);
1954 unp2 = unp->unp_conn;
1955 if ((unp->unp_socket->so_type == SOCK_STREAM ||
1956 (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) {
1957 so = unp2->unp_socket;
1964 unp_drop(struct unpcb *unp)
1966 struct socket *so = unp->unp_socket;
1970 * Regardless of whether the socket's peer dropped the connection
1971 * with this socket by aborting or disconnecting, POSIX requires
1972 * that ECONNRESET is returned.
1977 so->so_error = ECONNRESET;
1978 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) {
1979 /* Last reference dropped in unp_disconnect(). */
1980 unp_pcb_rele_notlast(unp);
1981 unp_disconnect(unp, unp2);
1982 } else if (!unp_pcb_rele(unp)) {
1983 UNP_PCB_UNLOCK(unp);
1988 unp_freerights(struct filedescent **fdep, int fdcount)
1993 KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount));
1995 for (i = 0; i < fdcount; i++) {
1996 fp = fdep[i]->fde_file;
1997 filecaps_free(&fdep[i]->fde_caps);
2000 free(fdep[0], M_FILECAPS);
2004 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags)
2006 struct thread *td = curthread; /* XXX */
2007 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
2010 struct filedesc *fdesc = td->td_proc->p_fd;
2011 struct filedescent **fdep;
2013 socklen_t clen = control->m_len, datalen;
2017 UNP_LINK_UNLOCK_ASSERT();
2020 if (controlp != NULL) /* controlp == NULL => free control messages */
2022 while (cm != NULL) {
2023 if (sizeof(*cm) > clen || cm->cmsg_len > clen) {
2027 data = CMSG_DATA(cm);
2028 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
2029 if (cm->cmsg_level == SOL_SOCKET
2030 && cm->cmsg_type == SCM_RIGHTS) {
2031 newfds = datalen / sizeof(*fdep);
2036 /* If we're not outputting the descriptors free them. */
2037 if (error || controlp == NULL) {
2038 unp_freerights(fdep, newfds);
2041 FILEDESC_XLOCK(fdesc);
2044 * Now change each pointer to an fd in the global
2045 * table to an integer that is the index to the local
2046 * fd table entry that we set up to point to the
2047 * global one we are transferring.
2049 newlen = newfds * sizeof(int);
2050 *controlp = sbcreatecontrol(NULL, newlen,
2051 SCM_RIGHTS, SOL_SOCKET);
2052 if (*controlp == NULL) {
2053 FILEDESC_XUNLOCK(fdesc);
2055 unp_freerights(fdep, newfds);
2060 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2061 if (fdallocn(td, 0, fdp, newfds) != 0) {
2062 FILEDESC_XUNLOCK(fdesc);
2064 unp_freerights(fdep, newfds);
2069 for (i = 0; i < newfds; i++, fdp++) {
2070 _finstall(fdesc, fdep[i]->fde_file, *fdp,
2071 (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0,
2072 &fdep[i]->fde_caps);
2073 unp_externalize_fp(fdep[i]->fde_file);
2077 * The new type indicates that the mbuf data refers to
2078 * kernel resources that may need to be released before
2079 * the mbuf is freed.
2081 m_chtype(*controlp, MT_EXTCONTROL);
2082 FILEDESC_XUNLOCK(fdesc);
2083 free(fdep[0], M_FILECAPS);
2085 /* We can just copy anything else across. */
2086 if (error || controlp == NULL)
2088 *controlp = sbcreatecontrol(NULL, datalen,
2089 cm->cmsg_type, cm->cmsg_level);
2090 if (*controlp == NULL) {
2095 CMSG_DATA(mtod(*controlp, struct cmsghdr *)),
2098 controlp = &(*controlp)->m_next;
2101 if (CMSG_SPACE(datalen) < clen) {
2102 clen -= CMSG_SPACE(datalen);
2103 cm = (struct cmsghdr *)
2104 ((caddr_t)cm + CMSG_SPACE(datalen));
2116 unp_zone_change(void *tag)
2119 uma_zone_set_max(unp_zone, maxsockets);
2124 unp_zdtor(void *mem, int size __unused, void *arg __unused)
2130 KASSERT(LIST_EMPTY(&unp->unp_refs),
2131 ("%s: unpcb %p has lingering refs", __func__, unp));
2132 KASSERT(unp->unp_socket == NULL,
2133 ("%s: unpcb %p has socket backpointer", __func__, unp));
2134 KASSERT(unp->unp_vnode == NULL,
2135 ("%s: unpcb %p has vnode references", __func__, unp));
2136 KASSERT(unp->unp_conn == NULL,
2137 ("%s: unpcb %p is still connected", __func__, unp));
2138 KASSERT(unp->unp_addr == NULL,
2139 ("%s: unpcb %p has leaked addr", __func__, unp));
2149 if (!IS_DEFAULT_VNET(curvnet))
2158 unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor,
2159 NULL, NULL, UMA_ALIGN_CACHE, 0);
2160 uma_zone_set_max(unp_zone, maxsockets);
2161 uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached");
2162 EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change,
2163 NULL, EVENTHANDLER_PRI_ANY);
2164 LIST_INIT(&unp_dhead);
2165 LIST_INIT(&unp_shead);
2166 LIST_INIT(&unp_sphead);
2167 SLIST_INIT(&unp_defers);
2168 TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL);
2169 TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL);
2170 UNP_LINK_LOCK_INIT();
2171 UNP_DEFERRED_LOCK_INIT();
2175 unp_internalize_cleanup_rights(struct mbuf *control)
2182 for (m = control; m != NULL; m = m->m_next) {
2183 cp = mtod(m, struct cmsghdr *);
2184 if (cp->cmsg_level != SOL_SOCKET ||
2185 cp->cmsg_type != SCM_RIGHTS)
2187 data = CMSG_DATA(cp);
2188 datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data;
2189 unp_freerights(data, datalen / sizeof(struct filedesc *));
2194 unp_internalize(struct mbuf **controlp, struct thread *td)
2196 struct mbuf *control, **initial_controlp;
2198 struct filedesc *fdesc;
2201 struct cmsgcred *cmcred;
2202 struct filedescent *fde, **fdep, *fdev;
2205 struct timespec *ts;
2207 socklen_t clen, datalen;
2208 int i, j, error, *fdp, oldfds;
2211 UNP_LINK_UNLOCK_ASSERT();
2216 control = *controlp;
2217 clen = control->m_len;
2219 initial_controlp = controlp;
2220 for (cm = mtod(control, struct cmsghdr *); cm != NULL;) {
2221 if (sizeof(*cm) > clen || cm->cmsg_level != SOL_SOCKET
2222 || cm->cmsg_len > clen || cm->cmsg_len < sizeof(*cm)) {
2226 data = CMSG_DATA(cm);
2227 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
2229 switch (cm->cmsg_type) {
2231 * Fill in credential information.
2234 *controlp = sbcreatecontrol(NULL, sizeof(*cmcred),
2235 SCM_CREDS, SOL_SOCKET);
2236 if (*controlp == NULL) {
2240 cmcred = (struct cmsgcred *)
2241 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2242 cmcred->cmcred_pid = p->p_pid;
2243 cmcred->cmcred_uid = td->td_ucred->cr_ruid;
2244 cmcred->cmcred_gid = td->td_ucred->cr_rgid;
2245 cmcred->cmcred_euid = td->td_ucred->cr_uid;
2246 cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
2248 for (i = 0; i < cmcred->cmcred_ngroups; i++)
2249 cmcred->cmcred_groups[i] =
2250 td->td_ucred->cr_groups[i];
2254 oldfds = datalen / sizeof (int);
2258 * Check that all the FDs passed in refer to legal
2259 * files. If not, reject the entire operation.
2262 FILEDESC_SLOCK(fdesc);
2263 for (i = 0; i < oldfds; i++, fdp++) {
2264 fp = fget_locked(fdesc, *fdp);
2266 FILEDESC_SUNLOCK(fdesc);
2270 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) {
2271 FILEDESC_SUNLOCK(fdesc);
2278 * Now replace the integer FDs with pointers to the
2279 * file structure and capability rights.
2281 newlen = oldfds * sizeof(fdep[0]);
2282 *controlp = sbcreatecontrol(NULL, newlen,
2283 SCM_RIGHTS, SOL_SOCKET);
2284 if (*controlp == NULL) {
2285 FILEDESC_SUNLOCK(fdesc);
2290 for (i = 0; i < oldfds; i++, fdp++) {
2291 if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) {
2293 for (j = 0; j < i; j++, fdp++) {
2294 fdrop(fdesc->fd_ofiles[*fdp].
2297 FILEDESC_SUNLOCK(fdesc);
2303 fdep = (struct filedescent **)
2304 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2305 fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS,
2307 for (i = 0; i < oldfds; i++, fdev++, fdp++) {
2308 fde = &fdesc->fd_ofiles[*fdp];
2310 fdep[i]->fde_file = fde->fde_file;
2311 filecaps_copy(&fde->fde_caps,
2312 &fdep[i]->fde_caps, true);
2313 unp_internalize_fp(fdep[i]->fde_file);
2315 FILEDESC_SUNLOCK(fdesc);
2319 *controlp = sbcreatecontrol(NULL, sizeof(*tv),
2320 SCM_TIMESTAMP, SOL_SOCKET);
2321 if (*controlp == NULL) {
2325 tv = (struct timeval *)
2326 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2331 *controlp = sbcreatecontrol(NULL, sizeof(*bt),
2332 SCM_BINTIME, SOL_SOCKET);
2333 if (*controlp == NULL) {
2337 bt = (struct bintime *)
2338 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2343 *controlp = sbcreatecontrol(NULL, sizeof(*ts),
2344 SCM_REALTIME, SOL_SOCKET);
2345 if (*controlp == NULL) {
2349 ts = (struct timespec *)
2350 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2355 *controlp = sbcreatecontrol(NULL, sizeof(*ts),
2356 SCM_MONOTONIC, SOL_SOCKET);
2357 if (*controlp == NULL) {
2361 ts = (struct timespec *)
2362 CMSG_DATA(mtod(*controlp, struct cmsghdr *));
2371 if (*controlp != NULL)
2372 controlp = &(*controlp)->m_next;
2373 if (CMSG_SPACE(datalen) < clen) {
2374 clen -= CMSG_SPACE(datalen);
2375 cm = (struct cmsghdr *)
2376 ((caddr_t)cm + CMSG_SPACE(datalen));
2384 if (error != 0 && initial_controlp != NULL)
2385 unp_internalize_cleanup_rights(*initial_controlp);
2390 static struct mbuf *
2391 unp_addsockcred(struct thread *td, struct mbuf *control, int mode)
2393 struct mbuf *m, *n, *n_prev;
2394 const struct cmsghdr *cm;
2395 int ngroups, i, cmsgtype;
2398 ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX);
2399 if (mode & UNP_WANTCRED_ALWAYS) {
2400 ctrlsz = SOCKCRED2SIZE(ngroups);
2401 cmsgtype = SCM_CREDS2;
2403 ctrlsz = SOCKCREDSIZE(ngroups);
2404 cmsgtype = SCM_CREDS;
2407 m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET);
2411 if (mode & UNP_WANTCRED_ALWAYS) {
2412 struct sockcred2 *sc;
2414 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2416 sc->sc_pid = td->td_proc->p_pid;
2417 sc->sc_uid = td->td_ucred->cr_ruid;
2418 sc->sc_euid = td->td_ucred->cr_uid;
2419 sc->sc_gid = td->td_ucred->cr_rgid;
2420 sc->sc_egid = td->td_ucred->cr_gid;
2421 sc->sc_ngroups = ngroups;
2422 for (i = 0; i < sc->sc_ngroups; i++)
2423 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2425 struct sockcred *sc;
2427 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *));
2428 sc->sc_uid = td->td_ucred->cr_ruid;
2429 sc->sc_euid = td->td_ucred->cr_uid;
2430 sc->sc_gid = td->td_ucred->cr_rgid;
2431 sc->sc_egid = td->td_ucred->cr_gid;
2432 sc->sc_ngroups = ngroups;
2433 for (i = 0; i < sc->sc_ngroups; i++)
2434 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
2438 * Unlink SCM_CREDS control messages (struct cmsgcred), since just
2439 * created SCM_CREDS control message (struct sockcred) has another
2442 if (control != NULL && cmsgtype == SCM_CREDS)
2443 for (n = control, n_prev = NULL; n != NULL;) {
2444 cm = mtod(n, struct cmsghdr *);
2445 if (cm->cmsg_level == SOL_SOCKET &&
2446 cm->cmsg_type == SCM_CREDS) {
2448 control = n->m_next;
2450 n_prev->m_next = n->m_next;
2458 /* Prepend it to the head. */
2459 m->m_next = control;
2463 static struct unpcb *
2464 fptounp(struct file *fp)
2468 if (fp->f_type != DTYPE_SOCKET)
2470 if ((so = fp->f_data) == NULL)
2472 if (so->so_proto->pr_domain != &localdomain)
2474 return sotounpcb(so);
2478 unp_discard(struct file *fp)
2480 struct unp_defer *dr;
2482 if (unp_externalize_fp(fp)) {
2483 dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK);
2485 UNP_DEFERRED_LOCK();
2486 SLIST_INSERT_HEAD(&unp_defers, dr, ud_link);
2487 UNP_DEFERRED_UNLOCK();
2488 atomic_add_int(&unp_defers_count, 1);
2489 taskqueue_enqueue(taskqueue_thread, &unp_defer_task);
2491 closef_nothread(fp);
2495 unp_process_defers(void *arg __unused, int pending)
2497 struct unp_defer *dr;
2498 SLIST_HEAD(, unp_defer) drl;
2503 UNP_DEFERRED_LOCK();
2504 if (SLIST_FIRST(&unp_defers) == NULL) {
2505 UNP_DEFERRED_UNLOCK();
2508 SLIST_SWAP(&unp_defers, &drl, unp_defer);
2509 UNP_DEFERRED_UNLOCK();
2511 while ((dr = SLIST_FIRST(&drl)) != NULL) {
2512 SLIST_REMOVE_HEAD(&drl, ud_link);
2513 closef_nothread(dr->ud_fp);
2517 atomic_add_int(&unp_defers_count, -count);
2522 unp_internalize_fp(struct file *fp)
2527 if ((unp = fptounp(fp)) != NULL) {
2529 unp->unp_msgcount++;
2536 unp_externalize_fp(struct file *fp)
2542 if ((unp = fptounp(fp)) != NULL) {
2543 unp->unp_msgcount--;
2553 * unp_defer indicates whether additional work has been defered for a future
2554 * pass through unp_gc(). It is thread local and does not require explicit
2557 static int unp_marked;
2560 unp_remove_dead_ref(struct filedescent **fdep, int fdcount)
2567 * This function can only be called from the gc task.
2569 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
2570 ("%s: not on gc callout", __func__));
2571 UNP_LINK_LOCK_ASSERT();
2573 for (i = 0; i < fdcount; i++) {
2574 fp = fdep[i]->fde_file;
2575 if ((unp = fptounp(fp)) == NULL)
2577 if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
2584 unp_restore_undead_ref(struct filedescent **fdep, int fdcount)
2591 * This function can only be called from the gc task.
2593 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0,
2594 ("%s: not on gc callout", __func__));
2595 UNP_LINK_LOCK_ASSERT();
2597 for (i = 0; i < fdcount; i++) {
2598 fp = fdep[i]->fde_file;
2599 if ((unp = fptounp(fp)) == NULL)
2601 if ((unp->unp_gcflag & UNPGC_DEAD) == 0)
2609 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int))
2611 struct socket *so, *soa;
2613 so = unp->unp_socket;
2615 if (SOLISTENING(so)) {
2617 * Mark all sockets in our accept queue.
2619 TAILQ_FOREACH(soa, &so->sol_comp, so_list) {
2620 if (sotounpcb(soa)->unp_gcflag & UNPGC_IGNORE_RIGHTS)
2622 SOCKBUF_LOCK(&soa->so_rcv);
2623 unp_scan(soa->so_rcv.sb_mb, op);
2624 SOCKBUF_UNLOCK(&soa->so_rcv);
2628 * Mark all sockets we reference with RIGHTS.
2630 if ((unp->unp_gcflag & UNPGC_IGNORE_RIGHTS) == 0) {
2631 SOCKBUF_LOCK(&so->so_rcv);
2632 unp_scan(so->so_rcv.sb_mb, op);
2633 SOCKBUF_UNLOCK(&so->so_rcv);
2639 static int unp_recycled;
2640 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0,
2641 "Number of unreachable sockets claimed by the garbage collector.");
2643 static int unp_taskcount;
2644 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0,
2645 "Number of times the garbage collector has run.");
2647 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0,
2648 "Number of active local sockets.");
2651 unp_gc(__unused void *arg, int pending)
2653 struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead,
2655 struct unp_head **head;
2656 struct unp_head unp_deadhead; /* List of potentially-dead sockets. */
2657 struct file *f, **unref;
2658 struct unpcb *unp, *unptmp;
2659 int i, total, unp_unreachable;
2661 LIST_INIT(&unp_deadhead);
2665 * First determine which sockets may be in cycles.
2667 unp_unreachable = 0;
2669 for (head = heads; *head != NULL; head++)
2670 LIST_FOREACH(unp, *head, unp_link) {
2671 KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0,
2672 ("%s: unp %p has unexpected gc flags 0x%x",
2673 __func__, unp, (unsigned int)unp->unp_gcflag));
2678 * Check for an unreachable socket potentially in a
2679 * cycle. It must be in a queue as indicated by
2680 * msgcount, and this must equal the file reference
2681 * count. Note that when msgcount is 0 the file is
2684 if (f != NULL && unp->unp_msgcount != 0 &&
2685 refcount_load(&f->f_count) == unp->unp_msgcount) {
2686 LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead);
2687 unp->unp_gcflag |= UNPGC_DEAD;
2688 unp->unp_gcrefs = unp->unp_msgcount;
2694 * Scan all sockets previously marked as potentially being in a cycle
2695 * and remove the references each socket holds on any UNPGC_DEAD
2696 * sockets in its queue. After this step, all remaining references on
2697 * sockets marked UNPGC_DEAD should not be part of any cycle.
2699 LIST_FOREACH(unp, &unp_deadhead, unp_dead)
2700 unp_gc_scan(unp, unp_remove_dead_ref);
2703 * If a socket still has a non-negative refcount, it cannot be in a
2704 * cycle. In this case increment refcount of all children iteratively.
2705 * Stop the scan once we do a complete loop without discovering
2706 * a new reachable socket.
2710 LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp)
2711 if (unp->unp_gcrefs > 0) {
2712 unp->unp_gcflag &= ~UNPGC_DEAD;
2713 LIST_REMOVE(unp, unp_dead);
2714 KASSERT(unp_unreachable > 0,
2715 ("%s: unp_unreachable underflow.",
2718 unp_gc_scan(unp, unp_restore_undead_ref);
2720 } while (unp_marked);
2724 if (unp_unreachable == 0)
2728 * Allocate space for a local array of dead unpcbs.
2729 * TODO: can this path be simplified by instead using the local
2730 * dead list at unp_deadhead, after taking out references
2731 * on the file object and/or unpcb and dropping the link lock?
2733 unref = malloc(unp_unreachable * sizeof(struct file *),
2737 * Iterate looking for sockets which have been specifically marked
2738 * as unreachable and store them locally.
2742 LIST_FOREACH(unp, &unp_deadhead, unp_dead) {
2743 KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0,
2744 ("%s: unp %p not marked UNPGC_DEAD", __func__, unp));
2745 unp->unp_gcflag &= ~UNPGC_DEAD;
2747 if (unp->unp_msgcount == 0 || f == NULL ||
2748 refcount_load(&f->f_count) != unp->unp_msgcount ||
2752 KASSERT(total <= unp_unreachable,
2753 ("%s: incorrect unreachable count.", __func__));
2758 * Now flush all sockets, free'ing rights. This will free the
2759 * struct files associated with these sockets but leave each socket
2760 * with one remaining ref.
2762 for (i = 0; i < total; i++) {
2765 so = unref[i]->f_data;
2766 CURVNET_SET(so->so_vnet);
2772 * And finally release the sockets so they can be reclaimed.
2774 for (i = 0; i < total; i++)
2775 fdrop(unref[i], NULL);
2776 unp_recycled += total;
2777 free(unref, M_TEMP);
2781 unp_dispose_mbuf(struct mbuf *m)
2785 unp_scan(m, unp_freerights);
2789 * Synchronize against unp_gc, which can trip over data as we are freeing it.
2792 unp_dispose(struct socket *so)
2796 unp = sotounpcb(so);
2798 unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS;
2800 if (!SOLISTENING(so))
2801 unp_dispose_mbuf(so->so_rcv.sb_mb);
2805 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int))
2810 socklen_t clen, datalen;
2812 while (m0 != NULL) {
2813 for (m = m0; m; m = m->m_next) {
2814 if (m->m_type != MT_CONTROL)
2817 cm = mtod(m, struct cmsghdr *);
2820 while (cm != NULL) {
2821 if (sizeof(*cm) > clen || cm->cmsg_len > clen)
2824 data = CMSG_DATA(cm);
2825 datalen = (caddr_t)cm + cm->cmsg_len
2828 if (cm->cmsg_level == SOL_SOCKET &&
2829 cm->cmsg_type == SCM_RIGHTS) {
2830 (*op)(data, datalen /
2831 sizeof(struct filedescent *));
2834 if (CMSG_SPACE(datalen) < clen) {
2835 clen -= CMSG_SPACE(datalen);
2836 cm = (struct cmsghdr *)
2837 ((caddr_t)cm + CMSG_SPACE(datalen));
2849 * A helper function called by VFS before socket-type vnode reclamation.
2850 * For an active vnode it clears unp_vnode pointer and decrements unp_vnode
2854 vfs_unp_reclaim(struct vnode *vp)
2860 ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim");
2861 KASSERT(vp->v_type == VSOCK,
2862 ("vfs_unp_reclaim: vp->v_type != VSOCK"));
2865 vplock = mtx_pool_find(mtxpool_sleep, vp);
2867 VOP_UNP_CONNECT(vp, &unp);
2871 if (unp->unp_vnode == vp) {
2873 unp->unp_vnode = NULL;
2876 UNP_PCB_UNLOCK(unp);
2885 db_print_indent(int indent)
2889 for (i = 0; i < indent; i++)
2894 db_print_unpflags(int unp_flags)
2899 if (unp_flags & UNP_HAVEPC) {
2900 db_printf("%sUNP_HAVEPC", comma ? ", " : "");
2903 if (unp_flags & UNP_WANTCRED_ALWAYS) {
2904 db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : "");
2907 if (unp_flags & UNP_WANTCRED_ONESHOT) {
2908 db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : "");
2911 if (unp_flags & UNP_CONNWAIT) {
2912 db_printf("%sUNP_CONNWAIT", comma ? ", " : "");
2915 if (unp_flags & UNP_CONNECTING) {
2916 db_printf("%sUNP_CONNECTING", comma ? ", " : "");
2919 if (unp_flags & UNP_BINDING) {
2920 db_printf("%sUNP_BINDING", comma ? ", " : "");
2926 db_print_xucred(int indent, struct xucred *xu)
2930 db_print_indent(indent);
2931 db_printf("cr_version: %u cr_uid: %u cr_pid: %d cr_ngroups: %d\n",
2932 xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups);
2933 db_print_indent(indent);
2934 db_printf("cr_groups: ");
2936 for (i = 0; i < xu->cr_ngroups; i++) {
2937 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]);
2944 db_print_unprefs(int indent, struct unp_head *uh)
2950 LIST_FOREACH(unp, uh, unp_reflink) {
2951 if (counter % 4 == 0)
2952 db_print_indent(indent);
2953 db_printf("%p ", unp);
2954 if (counter % 4 == 3)
2958 if (counter != 0 && counter % 4 != 0)
2962 DB_SHOW_COMMAND(unpcb, db_show_unpcb)
2967 db_printf("usage: show unpcb <addr>\n");
2970 unp = (struct unpcb *)addr;
2972 db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket,
2975 db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino,
2978 db_printf("unp_refs:\n");
2979 db_print_unprefs(2, &unp->unp_refs);
2981 /* XXXRW: Would be nice to print the full address, if any. */
2982 db_printf("unp_addr: %p\n", unp->unp_addr);
2984 db_printf("unp_gencnt: %llu\n",
2985 (unsigned long long)unp->unp_gencnt);
2987 db_printf("unp_flags: %x (", unp->unp_flags);
2988 db_print_unpflags(unp->unp_flags);
2991 db_printf("unp_peercred:\n");
2992 db_print_xucred(2, &unp->unp_peercred);
2994 db_printf("unp_refcount: %u\n", unp->unp_refcount);