]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/kern/uipc_usrreq.c
MFC 197775,197777-197779,197781,197794,243152,243313,255478:
[FreeBSD/stable/8.git] / sys / kern / uipc_usrreq.c
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *      The Regents of the University of California.
4  * Copyright (c) 2004-2009 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 4. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *      From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
32  */
33
34 /*
35  * UNIX Domain (Local) Sockets
36  *
37  * This is an implementation of UNIX (local) domain sockets.  Each socket has
38  * an associated struct unpcb (UNIX protocol control block).  Stream sockets
39  * may be connected to 0 or 1 other socket.  Datagram sockets may be
40  * connected to 0, 1, or many other sockets.  Sockets may be created and
41  * connected in pairs (socketpair(2)), or bound/connected to using the file
42  * system name space.  For most purposes, only the receive socket buffer is
43  * used, as sending on one socket delivers directly to the receive socket
44  * buffer of a second socket.
45  *
46  * The implementation is substantially complicated by the fact that
47  * "ancillary data", such as file descriptors or credentials, may be passed
48  * across UNIX domain sockets.  The potential for passing UNIX domain sockets
49  * over other UNIX domain sockets requires the implementation of a simple
50  * garbage collector to find and tear down cycles of disconnected sockets.
51  *
52  * TODO:
53  *      RDM
54  *      distinguish datagram size limits from flow control limits in SEQPACKET
55  *      rethink name space problems
56  *      need a proper out-of-band
57  */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 #include "opt_ddb.h"
63
64 #include <sys/param.h>
65 #include <sys/domain.h>
66 #include <sys/fcntl.h>
67 #include <sys/malloc.h>         /* XXX must be before <sys/file.h> */
68 #include <sys/eventhandler.h>
69 #include <sys/file.h>
70 #include <sys/filedesc.h>
71 #include <sys/kernel.h>
72 #include <sys/lock.h>
73 #include <sys/mbuf.h>
74 #include <sys/mount.h>
75 #include <sys/mutex.h>
76 #include <sys/namei.h>
77 #include <sys/proc.h>
78 #include <sys/protosw.h>
79 #include <sys/queue.h>
80 #include <sys/resourcevar.h>
81 #include <sys/rwlock.h>
82 #include <sys/socket.h>
83 #include <sys/socketvar.h>
84 #include <sys/signalvar.h>
85 #include <sys/stat.h>
86 #include <sys/sx.h>
87 #include <sys/sysctl.h>
88 #include <sys/systm.h>
89 #include <sys/taskqueue.h>
90 #include <sys/un.h>
91 #include <sys/unpcb.h>
92 #include <sys/vnode.h>
93
94 #include <net/vnet.h>
95
96 #ifdef DDB
97 #include <ddb/ddb.h>
98 #endif
99
100 #include <security/mac/mac_framework.h>
101
102 #include <vm/uma.h>
103
104 /*
105  * Locking key:
106  * (l)  Locked using list lock
107  * (g)  Locked using linkage lock
108  */
109
110 static uma_zone_t       unp_zone;
111 static unp_gen_t        unp_gencnt;     /* (l) */
112 static u_int            unp_count;      /* (l) Count of local sockets. */
113 static ino_t            unp_ino;        /* Prototype for fake inode numbers. */
114 static int              unp_rights;     /* (g) File descriptors in flight. */
115 static struct unp_head  unp_shead;      /* (l) List of stream sockets. */
116 static struct unp_head  unp_dhead;      /* (l) List of datagram sockets. */
117 static struct unp_head  unp_sphead;     /* (l) List of seqpacket sockets. */
118
119 struct unp_defer {
120         SLIST_ENTRY(unp_defer) ud_link;
121         struct file *ud_fp;
122 };
123 static SLIST_HEAD(, unp_defer) unp_defers;
124 static int unp_defers_count;
125
126 static const struct sockaddr    sun_noname = { sizeof(sun_noname), AF_LOCAL };
127
128 /*
129  * Garbage collection of cyclic file descriptor/socket references occurs
130  * asynchronously in a taskqueue context in order to avoid recursion and
131  * reentrance in the UNIX domain socket, file descriptor, and socket layer
132  * code.  See unp_gc() for a full description.
133  */
134 static struct task      unp_gc_task;
135
136 /*
137  * The close of unix domain sockets attached as SCM_RIGHTS is
138  * postponed to the taskqueue, to avoid arbitrary recursion depth.
139  * The attached sockets might have another sockets attached.
140  */
141 static struct task      unp_defer_task;
142
143 /*
144  * Both send and receive buffers are allocated PIPSIZ bytes of buffering for
145  * stream sockets, although the total for sender and receiver is actually
146  * only PIPSIZ.
147  *
148  * Datagram sockets really use the sendspace as the maximum datagram size,
149  * and don't really want to reserve the sendspace.  Their recvspace should be
150  * large enough for at least one max-size datagram plus address.
151  */
152 #ifndef PIPSIZ
153 #define PIPSIZ  8192
154 #endif
155 static u_long   unpst_sendspace = PIPSIZ;
156 static u_long   unpst_recvspace = PIPSIZ;
157 static u_long   unpdg_sendspace = 2*1024;       /* really max datagram size */
158 static u_long   unpdg_recvspace = 4*1024;
159 static u_long   unpsp_sendspace = PIPSIZ;       /* really max datagram size */
160 static u_long   unpsp_recvspace = PIPSIZ;
161
162 SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW, 0, "Local domain");
163 SYSCTL_NODE(_net_local, SOCK_STREAM, stream, CTLFLAG_RW, 0, "SOCK_STREAM");
164 SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, CTLFLAG_RW, 0, "SOCK_DGRAM");
165 SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket, CTLFLAG_RW, 0,
166     "SOCK_SEQPACKET");
167
168 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
169            &unpst_sendspace, 0, "Default stream send space.");
170 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
171            &unpst_recvspace, 0, "Default stream receive space.");
172 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
173            &unpdg_sendspace, 0, "Default datagram send space.");
174 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
175            &unpdg_recvspace, 0, "Default datagram receive space.");
176 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW,
177            &unpsp_sendspace, 0, "Default seqpacket send space.");
178 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW,
179            &unpsp_recvspace, 0, "Default seqpacket receive space.");
180 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
181     "File descriptors in flight.");
182 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD,
183     &unp_defers_count, 0,
184     "File descriptors deferred to taskqueue for close.");
185
186 /*-
187  * Locking and synchronization:
188  *
189  * Three types of locks exit in the local domain socket implementation: a
190  * global list mutex, a global linkage rwlock, and per-unpcb mutexes.  Of the
191  * global locks, the list lock protects the socket count, global generation
192  * number, and stream/datagram global lists.  The linkage lock protects the
193  * interconnection of unpcbs, the v_socket and unp_vnode pointers, and can be
194  * held exclusively over the acquisition of multiple unpcb locks to prevent
195  * deadlock.
196  *
197  * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer,
198  * allocated in pru_attach() and freed in pru_detach().  The validity of that
199  * pointer is an invariant, so no lock is required to dereference the so_pcb
200  * pointer if a valid socket reference is held by the caller.  In practice,
201  * this is always true during operations performed on a socket.  Each unpcb
202  * has a back-pointer to its socket, unp_socket, which will be stable under
203  * the same circumstances.
204  *
205  * This pointer may only be safely dereferenced as long as a valid reference
206  * to the unpcb is held.  Typically, this reference will be from the socket,
207  * or from another unpcb when the referring unpcb's lock is held (in order
208  * that the reference not be invalidated during use).  For example, to follow
209  * unp->unp_conn->unp_socket, you need unlock the lock on unp, not unp_conn,
210  * as unp_socket remains valid as long as the reference to unp_conn is valid.
211  *
212  * Fields of unpcbss are locked using a per-unpcb lock, unp_mtx.  Individual
213  * atomic reads without the lock may be performed "lockless", but more
214  * complex reads and read-modify-writes require the mutex to be held.  No
215  * lock order is defined between unpcb locks -- multiple unpcb locks may be
216  * acquired at the same time only when holding the linkage rwlock
217  * exclusively, which prevents deadlocks.
218  *
219  * Blocking with UNIX domain sockets is a tricky issue: unlike most network
220  * protocols, bind() is a non-atomic operation, and connect() requires
221  * potential sleeping in the protocol, due to potentially waiting on local or
222  * distributed file systems.  We try to separate "lookup" operations, which
223  * may sleep, and the IPC operations themselves, which typically can occur
224  * with relative atomicity as locks can be held over the entire operation.
225  *
226  * Another tricky issue is simultaneous multi-threaded or multi-process
227  * access to a single UNIX domain socket.  These are handled by the flags
228  * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or
229  * binding, both of which involve dropping UNIX domain socket locks in order
230  * to perform namei() and other file system operations.
231  */
232 static struct rwlock    unp_link_rwlock;
233 static struct mtx       unp_list_lock;
234 static struct mtx       unp_defers_lock;
235
236 #define UNP_LINK_LOCK_INIT()            rw_init(&unp_link_rwlock,       \
237                                             "unp_link_rwlock")
238
239 #define UNP_LINK_LOCK_ASSERT()  rw_assert(&unp_link_rwlock,     \
240                                             RA_LOCKED)
241 #define UNP_LINK_UNLOCK_ASSERT()        rw_assert(&unp_link_rwlock,     \
242                                             RA_UNLOCKED)
243
244 #define UNP_LINK_RLOCK()                rw_rlock(&unp_link_rwlock)
245 #define UNP_LINK_RUNLOCK()              rw_runlock(&unp_link_rwlock)
246 #define UNP_LINK_WLOCK()                rw_wlock(&unp_link_rwlock)
247 #define UNP_LINK_WUNLOCK()              rw_wunlock(&unp_link_rwlock)
248 #define UNP_LINK_WLOCK_ASSERT()         rw_assert(&unp_link_rwlock,     \
249                                             RA_WLOCKED)
250
251 #define UNP_LIST_LOCK_INIT()            mtx_init(&unp_list_lock,        \
252                                             "unp_list_lock", NULL, MTX_DEF)
253 #define UNP_LIST_LOCK()                 mtx_lock(&unp_list_lock)
254 #define UNP_LIST_UNLOCK()               mtx_unlock(&unp_list_lock)
255
256 #define UNP_DEFERRED_LOCK_INIT()        mtx_init(&unp_defers_lock, \
257                                             "unp_defer", NULL, MTX_DEF)
258 #define UNP_DEFERRED_LOCK()             mtx_lock(&unp_defers_lock)
259 #define UNP_DEFERRED_UNLOCK()           mtx_unlock(&unp_defers_lock)
260
261 #define UNP_PCB_LOCK_INIT(unp)          mtx_init(&(unp)->unp_mtx,       \
262                                             "unp_mtx", "unp_mtx",       \
263                                             MTX_DUPOK|MTX_DEF|MTX_RECURSE)
264 #define UNP_PCB_LOCK_DESTROY(unp)       mtx_destroy(&(unp)->unp_mtx)
265 #define UNP_PCB_LOCK(unp)               mtx_lock(&(unp)->unp_mtx)
266 #define UNP_PCB_UNLOCK(unp)             mtx_unlock(&(unp)->unp_mtx)
267 #define UNP_PCB_LOCK_ASSERT(unp)        mtx_assert(&(unp)->unp_mtx, MA_OWNED)
268
269 static int      uipc_connect2(struct socket *, struct socket *);
270 static int      uipc_ctloutput(struct socket *, struct sockopt *);
271 static int      unp_connect(struct socket *, struct sockaddr *,
272                     struct thread *);
273 static int      unp_connect2(struct socket *so, struct socket *so2, int);
274 static void     unp_disconnect(struct unpcb *unp, struct unpcb *unp2);
275 static void     unp_dispose(struct mbuf *);
276 static void     unp_shutdown(struct unpcb *);
277 static void     unp_drop(struct unpcb *, int);
278 static void     unp_gc(__unused void *, int);
279 static void     unp_scan(struct mbuf *, void (*)(struct file *));
280 static void     unp_discard(struct file *);
281 static void     unp_freerights(struct file **, int);
282 static void     unp_init(void);
283 static int      unp_internalize(struct mbuf **, struct thread *);
284 static void     unp_internalize_fp(struct file *);
285 static int      unp_externalize(struct mbuf *, struct mbuf **);
286 static int      unp_externalize_fp(struct file *);
287 static struct mbuf      *unp_addsockcred(struct thread *, struct mbuf *);
288 static void     unp_process_defers(void * __unused, int);
289
290 /*
291  * Definitions of protocols supported in the LOCAL domain.
292  */
293 static struct domain localdomain;
294 static struct pr_usrreqs uipc_usrreqs_dgram, uipc_usrreqs_stream;
295 static struct pr_usrreqs uipc_usrreqs_seqpacket;
296 static struct protosw localsw[] = {
297 {
298         .pr_type =              SOCK_STREAM,
299         .pr_domain =            &localdomain,
300         .pr_flags =             PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS,
301         .pr_ctloutput =         &uipc_ctloutput,
302         .pr_usrreqs =           &uipc_usrreqs_stream
303 },
304 {
305         .pr_type =              SOCK_DGRAM,
306         .pr_domain =            &localdomain,
307         .pr_flags =             PR_ATOMIC|PR_ADDR|PR_RIGHTS,
308         .pr_usrreqs =           &uipc_usrreqs_dgram
309 },
310 {
311         .pr_type =              SOCK_SEQPACKET,
312         .pr_domain =            &localdomain,
313
314         /*
315          * XXXRW: For now, PR_ADDR because soreceive will bump into them
316          * due to our use of sbappendaddr.  A new sbappend variants is needed
317          * that supports both atomic record writes and control data.
318          */
319         .pr_flags =             PR_ADDR|PR_ATOMIC|PR_CONNREQUIRED|PR_WANTRCVD|
320                                     PR_RIGHTS,
321         .pr_ctloutput =         &uipc_ctloutput,
322         .pr_usrreqs =           &uipc_usrreqs_seqpacket,
323 },
324 };
325
326 static struct domain localdomain = {
327         .dom_family =           AF_LOCAL,
328         .dom_name =             "local",
329         .dom_init =             unp_init,
330         .dom_externalize =      unp_externalize,
331         .dom_dispose =          unp_dispose,
332         .dom_protosw =          localsw,
333         .dom_protoswNPROTOSW =  &localsw[sizeof(localsw)/sizeof(localsw[0])]
334 };
335 DOMAIN_SET(local);
336
337 static void
338 uipc_abort(struct socket *so)
339 {
340         struct unpcb *unp, *unp2;
341
342         unp = sotounpcb(so);
343         KASSERT(unp != NULL, ("uipc_abort: unp == NULL"));
344
345         UNP_LINK_WLOCK();
346         UNP_PCB_LOCK(unp);
347         unp2 = unp->unp_conn;
348         if (unp2 != NULL) {
349                 UNP_PCB_LOCK(unp2);
350                 unp_drop(unp2, ECONNABORTED);
351                 UNP_PCB_UNLOCK(unp2);
352         }
353         UNP_PCB_UNLOCK(unp);
354         UNP_LINK_WUNLOCK();
355 }
356
357 static int
358 uipc_accept(struct socket *so, struct sockaddr **nam)
359 {
360         struct unpcb *unp, *unp2;
361         const struct sockaddr *sa;
362
363         /*
364          * Pass back name of connected socket, if it was bound and we are
365          * still connected (our peer may have closed already!).
366          */
367         unp = sotounpcb(so);
368         KASSERT(unp != NULL, ("uipc_accept: unp == NULL"));
369
370         *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
371         UNP_LINK_RLOCK();
372         unp2 = unp->unp_conn;
373         if (unp2 != NULL && unp2->unp_addr != NULL) {
374                 UNP_PCB_LOCK(unp2);
375                 sa = (struct sockaddr *) unp2->unp_addr;
376                 bcopy(sa, *nam, sa->sa_len);
377                 UNP_PCB_UNLOCK(unp2);
378         } else {
379                 sa = &sun_noname;
380                 bcopy(sa, *nam, sa->sa_len);
381         }
382         UNP_LINK_RUNLOCK();
383         return (0);
384 }
385
386 static int
387 uipc_attach(struct socket *so, int proto, struct thread *td)
388 {
389         u_long sendspace, recvspace;
390         struct unpcb *unp;
391         int error;
392
393         KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL"));
394         if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
395                 switch (so->so_type) {
396                 case SOCK_STREAM:
397                         sendspace = unpst_sendspace;
398                         recvspace = unpst_recvspace;
399                         break;
400
401                 case SOCK_DGRAM:
402                         sendspace = unpdg_sendspace;
403                         recvspace = unpdg_recvspace;
404                         break;
405
406                 case SOCK_SEQPACKET:
407                         sendspace = unpsp_sendspace;
408                         recvspace = unpsp_recvspace;
409                         break;
410
411                 default:
412                         panic("uipc_attach");
413                 }
414                 error = soreserve(so, sendspace, recvspace);
415                 if (error)
416                         return (error);
417         }
418         unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO);
419         if (unp == NULL)
420                 return (ENOBUFS);
421         LIST_INIT(&unp->unp_refs);
422         UNP_PCB_LOCK_INIT(unp);
423         unp->unp_socket = so;
424         so->so_pcb = unp;
425         unp->unp_refcount = 1;
426
427         UNP_LIST_LOCK();
428         unp->unp_gencnt = ++unp_gencnt;
429         unp_count++;
430         switch (so->so_type) {
431         case SOCK_STREAM:
432                 LIST_INSERT_HEAD(&unp_shead, unp, unp_link);
433                 break;
434
435         case SOCK_DGRAM:
436                 LIST_INSERT_HEAD(&unp_dhead, unp, unp_link);
437                 break;
438
439         case SOCK_SEQPACKET:
440                 LIST_INSERT_HEAD(&unp_sphead, unp, unp_link);
441                 break;
442
443         default:
444                 panic("uipc_attach");
445         }
446         UNP_LIST_UNLOCK();
447
448         return (0);
449 }
450
451 static int
452 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
453 {
454         struct sockaddr_un *soun = (struct sockaddr_un *)nam;
455         struct vattr vattr;
456         int error, namelen, vfslocked;
457         struct nameidata nd;
458         struct unpcb *unp;
459         struct vnode *vp;
460         struct mount *mp;
461         char *buf;
462
463         unp = sotounpcb(so);
464         KASSERT(unp != NULL, ("uipc_bind: unp == NULL"));
465
466         if (soun->sun_len > sizeof(struct sockaddr_un))
467                 return (EINVAL);
468         namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
469         if (namelen <= 0)
470                 return (EINVAL);
471
472         /*
473          * We don't allow simultaneous bind() calls on a single UNIX domain
474          * socket, so flag in-progress operations, and return an error if an
475          * operation is already in progress.
476          *
477          * Historically, we have not allowed a socket to be rebound, so this
478          * also returns an error.  Not allowing re-binding simplifies the
479          * implementation and avoids a great many possible failure modes.
480          */
481         UNP_PCB_LOCK(unp);
482         if (unp->unp_vnode != NULL) {
483                 UNP_PCB_UNLOCK(unp);
484                 return (EINVAL);
485         }
486         if (unp->unp_flags & UNP_BINDING) {
487                 UNP_PCB_UNLOCK(unp);
488                 return (EALREADY);
489         }
490         unp->unp_flags |= UNP_BINDING;
491         UNP_PCB_UNLOCK(unp);
492
493         buf = malloc(namelen + 1, M_TEMP, M_WAITOK);
494         bcopy(soun->sun_path, buf, namelen);
495         buf[namelen] = 0;
496
497 restart:
498         vfslocked = 0;
499         NDINIT(&nd, CREATE, MPSAFE | NOFOLLOW | LOCKPARENT | SAVENAME,
500             UIO_SYSSPACE, buf, td);
501 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
502         error = namei(&nd);
503         if (error)
504                 goto error;
505         vp = nd.ni_vp;
506         vfslocked = NDHASGIANT(&nd);
507         if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) {
508                 NDFREE(&nd, NDF_ONLY_PNBUF);
509                 if (nd.ni_dvp == vp)
510                         vrele(nd.ni_dvp);
511                 else
512                         vput(nd.ni_dvp);
513                 if (vp != NULL) {
514                         vrele(vp);
515                         error = EADDRINUSE;
516                         goto error;
517                 }
518                 error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH);
519                 if (error)
520                         goto error;
521                 VFS_UNLOCK_GIANT(vfslocked);
522                 goto restart;
523         }
524         VATTR_NULL(&vattr);
525         vattr.va_type = VSOCK;
526         vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask);
527 #ifdef MAC
528         error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd,
529             &vattr);
530 #endif
531         if (error == 0)
532                 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
533         NDFREE(&nd, NDF_ONLY_PNBUF);
534         vput(nd.ni_dvp);
535         if (error) {
536                 vn_finished_write(mp);
537                 goto error;
538         }
539         vp = nd.ni_vp;
540         ASSERT_VOP_ELOCKED(vp, "uipc_bind");
541         soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK);
542
543         UNP_LINK_WLOCK();
544         UNP_PCB_LOCK(unp);
545         vp->v_socket = unp->unp_socket;
546         unp->unp_vnode = vp;
547         unp->unp_addr = soun;
548         unp->unp_flags &= ~UNP_BINDING;
549         UNP_PCB_UNLOCK(unp);
550         UNP_LINK_WUNLOCK();
551         VOP_UNLOCK(vp, 0);
552         vn_finished_write(mp);
553         VFS_UNLOCK_GIANT(vfslocked);
554         free(buf, M_TEMP);
555         return (0);
556
557 error:
558         VFS_UNLOCK_GIANT(vfslocked);
559         UNP_PCB_LOCK(unp);
560         unp->unp_flags &= ~UNP_BINDING;
561         UNP_PCB_UNLOCK(unp);
562         free(buf, M_TEMP);
563         return (error);
564 }
565
566 static int
567 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
568 {
569         int error;
570
571         KASSERT(td == curthread, ("uipc_connect: td != curthread"));
572         UNP_LINK_WLOCK();
573         error = unp_connect(so, nam, td);
574         UNP_LINK_WUNLOCK();
575         return (error);
576 }
577
578 static void
579 uipc_close(struct socket *so)
580 {
581         struct unpcb *unp, *unp2;
582
583         unp = sotounpcb(so);
584         KASSERT(unp != NULL, ("uipc_close: unp == NULL"));
585
586         UNP_LINK_WLOCK();
587         UNP_PCB_LOCK(unp);
588         unp2 = unp->unp_conn;
589         if (unp2 != NULL) {
590                 UNP_PCB_LOCK(unp2);
591                 unp_disconnect(unp, unp2);
592                 UNP_PCB_UNLOCK(unp2);
593         }
594         UNP_PCB_UNLOCK(unp);
595         UNP_LINK_WUNLOCK();
596 }
597
598 static int
599 uipc_connect2(struct socket *so1, struct socket *so2)
600 {
601         struct unpcb *unp, *unp2;
602         int error;
603
604         UNP_LINK_WLOCK();
605         unp = so1->so_pcb;
606         KASSERT(unp != NULL, ("uipc_connect2: unp == NULL"));
607         UNP_PCB_LOCK(unp);
608         unp2 = so2->so_pcb;
609         KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL"));
610         UNP_PCB_LOCK(unp2);
611         error = unp_connect2(so1, so2, PRU_CONNECT2);
612         UNP_PCB_UNLOCK(unp2);
613         UNP_PCB_UNLOCK(unp);
614         UNP_LINK_WUNLOCK();
615         return (error);
616 }
617
618 static void
619 uipc_detach(struct socket *so)
620 {
621         struct unpcb *unp, *unp2;
622         struct sockaddr_un *saved_unp_addr;
623         struct vnode *vp;
624         int freeunp, local_unp_rights;
625
626         unp = sotounpcb(so);
627         KASSERT(unp != NULL, ("uipc_detach: unp == NULL"));
628
629         UNP_LINK_WLOCK();
630         UNP_LIST_LOCK();
631         UNP_PCB_LOCK(unp);
632         LIST_REMOVE(unp, unp_link);
633         unp->unp_gencnt = ++unp_gencnt;
634         --unp_count;
635         UNP_LIST_UNLOCK();
636
637         /*
638          * XXXRW: Should assert vp->v_socket == so.
639          */
640         if ((vp = unp->unp_vnode) != NULL) {
641                 unp->unp_vnode->v_socket = NULL;
642                 unp->unp_vnode = NULL;
643         }
644         unp2 = unp->unp_conn;
645         if (unp2 != NULL) {
646                 UNP_PCB_LOCK(unp2);
647                 unp_disconnect(unp, unp2);
648                 UNP_PCB_UNLOCK(unp2);
649         }
650
651         /*
652          * We hold the linkage lock exclusively, so it's OK to acquire
653          * multiple pcb locks at a time.
654          */
655         while (!LIST_EMPTY(&unp->unp_refs)) {
656                 struct unpcb *ref = LIST_FIRST(&unp->unp_refs);
657
658                 UNP_PCB_LOCK(ref);
659                 unp_drop(ref, ECONNRESET);
660                 UNP_PCB_UNLOCK(ref);
661         }
662         local_unp_rights = unp_rights;
663         UNP_LINK_WUNLOCK();
664         unp->unp_socket->so_pcb = NULL;
665         saved_unp_addr = unp->unp_addr;
666         unp->unp_addr = NULL;
667         unp->unp_refcount--;
668         freeunp = (unp->unp_refcount == 0);
669         if (saved_unp_addr != NULL)
670                 free(saved_unp_addr, M_SONAME);
671         if (freeunp) {
672                 UNP_PCB_LOCK_DESTROY(unp);
673                 uma_zfree(unp_zone, unp);
674         } else
675                 UNP_PCB_UNLOCK(unp);
676         if (vp) {
677                 int vfslocked;
678
679                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
680                 vrele(vp);
681                 VFS_UNLOCK_GIANT(vfslocked);
682         }
683         if (local_unp_rights)
684                 taskqueue_enqueue(taskqueue_thread, &unp_gc_task);
685 }
686
687 static int
688 uipc_disconnect(struct socket *so)
689 {
690         struct unpcb *unp, *unp2;
691
692         unp = sotounpcb(so);
693         KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL"));
694
695         UNP_LINK_WLOCK();
696         UNP_PCB_LOCK(unp);
697         unp2 = unp->unp_conn;
698         if (unp2 != NULL) {
699                 UNP_PCB_LOCK(unp2);
700                 unp_disconnect(unp, unp2);
701                 UNP_PCB_UNLOCK(unp2);
702         }
703         UNP_PCB_UNLOCK(unp);
704         UNP_LINK_WUNLOCK();
705         return (0);
706 }
707
708 static int
709 uipc_listen(struct socket *so, int backlog, struct thread *td)
710 {
711         struct unpcb *unp;
712         int error;
713
714         unp = sotounpcb(so);
715         KASSERT(unp != NULL, ("uipc_listen: unp == NULL"));
716
717         UNP_PCB_LOCK(unp);
718         if (unp->unp_vnode == NULL) {
719                 UNP_PCB_UNLOCK(unp);
720                 return (EINVAL);
721         }
722
723         SOCK_LOCK(so);
724         error = solisten_proto_check(so);
725         if (error == 0) {
726                 cru2x(td->td_ucred, &unp->unp_peercred);
727                 unp->unp_flags |= UNP_HAVEPCCACHED;
728                 solisten_proto(so, backlog);
729         }
730         SOCK_UNLOCK(so);
731         UNP_PCB_UNLOCK(unp);
732         return (error);
733 }
734
735 static int
736 uipc_peeraddr(struct socket *so, struct sockaddr **nam)
737 {
738         struct unpcb *unp, *unp2;
739         const struct sockaddr *sa;
740
741         unp = sotounpcb(so);
742         KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL"));
743
744         *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
745         UNP_LINK_RLOCK();
746         /*
747          * XXX: It seems that this test always fails even when connection is
748          * established.  So, this else clause is added as workaround to
749          * return PF_LOCAL sockaddr.
750          */
751         unp2 = unp->unp_conn;
752         if (unp2 != NULL) {
753                 UNP_PCB_LOCK(unp2);
754                 if (unp2->unp_addr != NULL)
755                         sa = (struct sockaddr *) unp2->unp_addr;
756                 else
757                         sa = &sun_noname;
758                 bcopy(sa, *nam, sa->sa_len);
759                 UNP_PCB_UNLOCK(unp2);
760         } else {
761                 sa = &sun_noname;
762                 bcopy(sa, *nam, sa->sa_len);
763         }
764         UNP_LINK_RUNLOCK();
765         return (0);
766 }
767
768 static int
769 uipc_rcvd(struct socket *so, int flags)
770 {
771         struct unpcb *unp, *unp2;
772         struct socket *so2;
773         u_int mbcnt, sbcc;
774         u_long newhiwat;
775
776         unp = sotounpcb(so);
777         KASSERT(unp != NULL, ("uipc_rcvd: unp == NULL"));
778
779         if (so->so_type != SOCK_STREAM && so->so_type != SOCK_SEQPACKET)
780                 panic("uipc_rcvd socktype %d", so->so_type);
781
782         /*
783          * Adjust backpressure on sender and wakeup any waiting to write.
784          *
785          * The unp lock is acquired to maintain the validity of the unp_conn
786          * pointer; no lock on unp2 is required as unp2->unp_socket will be
787          * static as long as we don't permit unp2 to disconnect from unp,
788          * which is prevented by the lock on unp.  We cache values from
789          * so_rcv to avoid holding the so_rcv lock over the entire
790          * transaction on the remote so_snd.
791          */
792         SOCKBUF_LOCK(&so->so_rcv);
793         mbcnt = so->so_rcv.sb_mbcnt;
794         sbcc = so->so_rcv.sb_cc;
795         SOCKBUF_UNLOCK(&so->so_rcv);
796         UNP_PCB_LOCK(unp);
797         unp2 = unp->unp_conn;
798         if (unp2 == NULL) {
799                 UNP_PCB_UNLOCK(unp);
800                 return (0);
801         }
802         so2 = unp2->unp_socket;
803         SOCKBUF_LOCK(&so2->so_snd);
804         so2->so_snd.sb_mbmax += unp->unp_mbcnt - mbcnt;
805         newhiwat = so2->so_snd.sb_hiwat + unp->unp_cc - sbcc;
806         (void)chgsbsize(so2->so_cred->cr_uidinfo, &so2->so_snd.sb_hiwat,
807             newhiwat, RLIM_INFINITY);
808         sowwakeup_locked(so2);
809         unp->unp_mbcnt = mbcnt;
810         unp->unp_cc = sbcc;
811         UNP_PCB_UNLOCK(unp);
812         return (0);
813 }
814
815 static int
816 uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
817     struct mbuf *control, struct thread *td)
818 {
819         struct unpcb *unp, *unp2;
820         struct socket *so2;
821         u_int mbcnt_delta, sbcc;
822         u_int newhiwat;
823         int error = 0;
824
825         unp = sotounpcb(so);
826         KASSERT(unp != NULL, ("uipc_send: unp == NULL"));
827
828         if (flags & PRUS_OOB) {
829                 error = EOPNOTSUPP;
830                 goto release;
831         }
832         if (control != NULL && (error = unp_internalize(&control, td)))
833                 goto release;
834         if ((nam != NULL) || (flags & PRUS_EOF))
835                 UNP_LINK_WLOCK();
836         else
837                 UNP_LINK_RLOCK();
838         switch (so->so_type) {
839         case SOCK_DGRAM:
840         {
841                 const struct sockaddr *from;
842
843                 unp2 = unp->unp_conn;
844                 if (nam != NULL) {
845                         UNP_LINK_WLOCK_ASSERT();
846                         if (unp2 != NULL) {
847                                 error = EISCONN;
848                                 break;
849                         }
850                         error = unp_connect(so, nam, td);
851                         if (error)
852                                 break;
853                         unp2 = unp->unp_conn;
854                 }
855
856                 /*
857                  * Because connect() and send() are non-atomic in a sendto()
858                  * with a target address, it's possible that the socket will
859                  * have disconnected before the send() can run.  In that case
860                  * return the slightly counter-intuitive but otherwise
861                  * correct error that the socket is not connected.
862                  */
863                 if (unp2 == NULL) {
864                         error = ENOTCONN;
865                         break;
866                 }
867                 /* Lockless read. */
868                 if (unp2->unp_flags & UNP_WANTCRED)
869                         control = unp_addsockcred(td, control);
870                 UNP_PCB_LOCK(unp);
871                 if (unp->unp_addr != NULL)
872                         from = (struct sockaddr *)unp->unp_addr;
873                 else
874                         from = &sun_noname;
875                 so2 = unp2->unp_socket;
876                 SOCKBUF_LOCK(&so2->so_rcv);
877                 if (sbappendaddr_locked(&so2->so_rcv, from, m, control)) {
878                         sorwakeup_locked(so2);
879                         m = NULL;
880                         control = NULL;
881                 } else {
882                         SOCKBUF_UNLOCK(&so2->so_rcv);
883                         error = ENOBUFS;
884                 }
885                 if (nam != NULL) {
886                         UNP_LINK_WLOCK_ASSERT();
887                         UNP_PCB_LOCK(unp2);
888                         unp_disconnect(unp, unp2);
889                         UNP_PCB_UNLOCK(unp2);
890                 }
891                 UNP_PCB_UNLOCK(unp);
892                 break;
893         }
894
895         case SOCK_SEQPACKET:
896         case SOCK_STREAM:
897                 if ((so->so_state & SS_ISCONNECTED) == 0) {
898                         if (nam != NULL) {
899                                 UNP_LINK_WLOCK_ASSERT();
900                                 error = unp_connect(so, nam, td);
901                                 if (error)
902                                         break;  /* XXX */
903                         } else {
904                                 error = ENOTCONN;
905                                 break;
906                         }
907                 }
908
909                 /* Lockless read. */
910                 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
911                         error = EPIPE;
912                         break;
913                 }
914
915                 /*
916                  * Because connect() and send() are non-atomic in a sendto()
917                  * with a target address, it's possible that the socket will
918                  * have disconnected before the send() can run.  In that case
919                  * return the slightly counter-intuitive but otherwise
920                  * correct error that the socket is not connected.
921                  *
922                  * Locking here must be done carefully: the linkage lock
923                  * prevents interconnections between unpcbs from changing, so
924                  * we can traverse from unp to unp2 without acquiring unp's
925                  * lock.  Socket buffer locks follow unpcb locks, so we can
926                  * acquire both remote and lock socket buffer locks.
927                  */
928                 unp2 = unp->unp_conn;
929                 if (unp2 == NULL) {
930                         error = ENOTCONN;
931                         break;
932                 }
933                 so2 = unp2->unp_socket;
934                 UNP_PCB_LOCK(unp2);
935                 SOCKBUF_LOCK(&so2->so_rcv);
936                 if (unp2->unp_flags & UNP_WANTCRED) {
937                         /*
938                          * Credentials are passed only once on SOCK_STREAM
939                          * and SOCK_SEQPACKET.
940                          */
941                         unp2->unp_flags &= ~UNP_WANTCRED;
942                         control = unp_addsockcred(td, control);
943                 }
944                 /*
945                  * Send to paired receive port, and then reduce send buffer
946                  * hiwater marks to maintain backpressure.  Wake up readers.
947                  */
948                 switch (so->so_type) {
949                 case SOCK_STREAM:
950                         if (control != NULL) {
951                                 if (sbappendcontrol_locked(&so2->so_rcv, m,
952                                     control))
953                                         control = NULL;
954                         } else
955                                 sbappend_locked(&so2->so_rcv, m);
956                         break;
957
958                 case SOCK_SEQPACKET: {
959                         const struct sockaddr *from;
960
961                         from = &sun_noname;
962                         if (sbappendaddr_locked(&so2->so_rcv, from, m,
963                             control))
964                                 control = NULL;
965                         break;
966                         }
967                 }
968
969                 /*
970                  * XXXRW: While fine for SOCK_STREAM, this conflates maximum
971                  * datagram size and back-pressure for SOCK_SEQPACKET, which
972                  * can lead to undesired return of EMSGSIZE on send instead
973                  * of more desirable blocking.
974                  */
975                 mbcnt_delta = so2->so_rcv.sb_mbcnt - unp2->unp_mbcnt;
976                 unp2->unp_mbcnt = so2->so_rcv.sb_mbcnt;
977                 sbcc = so2->so_rcv.sb_cc;
978                 sorwakeup_locked(so2);
979
980                 SOCKBUF_LOCK(&so->so_snd);
981                 if ((int)so->so_snd.sb_hiwat >= (int)(sbcc - unp2->unp_cc))
982                         newhiwat = so->so_snd.sb_hiwat - (sbcc - unp2->unp_cc);
983                 else
984                         newhiwat = 0;
985                 (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat,
986                     newhiwat, RLIM_INFINITY);
987                 so->so_snd.sb_mbmax -= mbcnt_delta;
988                 SOCKBUF_UNLOCK(&so->so_snd);
989                 unp2->unp_cc = sbcc;
990                 UNP_PCB_UNLOCK(unp2);
991                 m = NULL;
992                 break;
993
994         default:
995                 panic("uipc_send unknown socktype");
996         }
997
998         /*
999          * PRUS_EOF is equivalent to pru_send followed by pru_shutdown.
1000          */
1001         if (flags & PRUS_EOF) {
1002                 UNP_PCB_LOCK(unp);
1003                 socantsendmore(so);
1004                 unp_shutdown(unp);
1005                 UNP_PCB_UNLOCK(unp);
1006         }
1007
1008         if ((nam != NULL) || (flags & PRUS_EOF))
1009                 UNP_LINK_WUNLOCK();
1010         else
1011                 UNP_LINK_RUNLOCK();
1012
1013         if (control != NULL && error != 0)
1014                 unp_dispose(control);
1015
1016 release:
1017         if (control != NULL)
1018                 m_freem(control);
1019         if (m != NULL)
1020                 m_freem(m);
1021         return (error);
1022 }
1023
1024 static int
1025 uipc_sense(struct socket *so, struct stat *sb)
1026 {
1027         struct unpcb *unp, *unp2;
1028         struct socket *so2;
1029
1030         unp = sotounpcb(so);
1031         KASSERT(unp != NULL, ("uipc_sense: unp == NULL"));
1032
1033         sb->st_blksize = so->so_snd.sb_hiwat;
1034         UNP_LINK_RLOCK();
1035         UNP_PCB_LOCK(unp);
1036         unp2 = unp->unp_conn;
1037         if ((so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET) &&
1038             unp2 != NULL) {
1039                 so2 = unp2->unp_socket;
1040                 sb->st_blksize += so2->so_rcv.sb_cc;
1041         }
1042         sb->st_dev = NODEV;
1043         if (unp->unp_ino == 0)
1044                 unp->unp_ino = (++unp_ino == 0) ? ++unp_ino : unp_ino;
1045         sb->st_ino = unp->unp_ino;
1046         UNP_PCB_UNLOCK(unp);
1047         UNP_LINK_RUNLOCK();
1048         return (0);
1049 }
1050
1051 static int
1052 uipc_shutdown(struct socket *so)
1053 {
1054         struct unpcb *unp;
1055
1056         unp = sotounpcb(so);
1057         KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL"));
1058
1059         UNP_LINK_WLOCK();
1060         UNP_PCB_LOCK(unp);
1061         socantsendmore(so);
1062         unp_shutdown(unp);
1063         UNP_PCB_UNLOCK(unp);
1064         UNP_LINK_WUNLOCK();
1065         return (0);
1066 }
1067
1068 static int
1069 uipc_sockaddr(struct socket *so, struct sockaddr **nam)
1070 {
1071         struct unpcb *unp;
1072         const struct sockaddr *sa;
1073
1074         unp = sotounpcb(so);
1075         KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL"));
1076
1077         *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1078         UNP_PCB_LOCK(unp);
1079         if (unp->unp_addr != NULL)
1080                 sa = (struct sockaddr *) unp->unp_addr;
1081         else
1082                 sa = &sun_noname;
1083         bcopy(sa, *nam, sa->sa_len);
1084         UNP_PCB_UNLOCK(unp);
1085         return (0);
1086 }
1087
1088 static struct pr_usrreqs uipc_usrreqs_dgram = {
1089         .pru_abort =            uipc_abort,
1090         .pru_accept =           uipc_accept,
1091         .pru_attach =           uipc_attach,
1092         .pru_bind =             uipc_bind,
1093         .pru_connect =          uipc_connect,
1094         .pru_connect2 =         uipc_connect2,
1095         .pru_detach =           uipc_detach,
1096         .pru_disconnect =       uipc_disconnect,
1097         .pru_listen =           uipc_listen,
1098         .pru_peeraddr =         uipc_peeraddr,
1099         .pru_rcvd =             uipc_rcvd,
1100         .pru_send =             uipc_send,
1101         .pru_sense =            uipc_sense,
1102         .pru_shutdown =         uipc_shutdown,
1103         .pru_sockaddr =         uipc_sockaddr,
1104         .pru_soreceive =        soreceive_dgram,
1105         .pru_close =            uipc_close,
1106 };
1107
1108 static struct pr_usrreqs uipc_usrreqs_seqpacket = {
1109         .pru_abort =            uipc_abort,
1110         .pru_accept =           uipc_accept,
1111         .pru_attach =           uipc_attach,
1112         .pru_bind =             uipc_bind,
1113         .pru_connect =          uipc_connect,
1114         .pru_connect2 =         uipc_connect2,
1115         .pru_detach =           uipc_detach,
1116         .pru_disconnect =       uipc_disconnect,
1117         .pru_listen =           uipc_listen,
1118         .pru_peeraddr =         uipc_peeraddr,
1119         .pru_rcvd =             uipc_rcvd,
1120         .pru_send =             uipc_send,
1121         .pru_sense =            uipc_sense,
1122         .pru_shutdown =         uipc_shutdown,
1123         .pru_sockaddr =         uipc_sockaddr,
1124         .pru_soreceive =        soreceive_generic,      /* XXX: or...? */
1125         .pru_close =            uipc_close,
1126 };
1127
1128 static struct pr_usrreqs uipc_usrreqs_stream = {
1129         .pru_abort =            uipc_abort,
1130         .pru_accept =           uipc_accept,
1131         .pru_attach =           uipc_attach,
1132         .pru_bind =             uipc_bind,
1133         .pru_connect =          uipc_connect,
1134         .pru_connect2 =         uipc_connect2,
1135         .pru_detach =           uipc_detach,
1136         .pru_disconnect =       uipc_disconnect,
1137         .pru_listen =           uipc_listen,
1138         .pru_peeraddr =         uipc_peeraddr,
1139         .pru_rcvd =             uipc_rcvd,
1140         .pru_send =             uipc_send,
1141         .pru_sense =            uipc_sense,
1142         .pru_shutdown =         uipc_shutdown,
1143         .pru_sockaddr =         uipc_sockaddr,
1144         .pru_soreceive =        soreceive_generic,
1145         .pru_close =            uipc_close,
1146 };
1147
1148 static int
1149 uipc_ctloutput(struct socket *so, struct sockopt *sopt)
1150 {
1151         struct unpcb *unp;
1152         struct xucred xu;
1153         int error, optval;
1154
1155         if (sopt->sopt_level != 0)
1156                 return (EINVAL);
1157
1158         unp = sotounpcb(so);
1159         KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL"));
1160         error = 0;
1161         switch (sopt->sopt_dir) {
1162         case SOPT_GET:
1163                 switch (sopt->sopt_name) {
1164                 case LOCAL_PEERCRED:
1165                         UNP_PCB_LOCK(unp);
1166                         if (unp->unp_flags & UNP_HAVEPC)
1167                                 xu = unp->unp_peercred;
1168                         else {
1169                                 if (so->so_type == SOCK_STREAM)
1170                                         error = ENOTCONN;
1171                                 else
1172                                         error = EINVAL;
1173                         }
1174                         UNP_PCB_UNLOCK(unp);
1175                         if (error == 0)
1176                                 error = sooptcopyout(sopt, &xu, sizeof(xu));
1177                         break;
1178
1179                 case LOCAL_CREDS:
1180                         /* Unlocked read. */
1181                         optval = unp->unp_flags & UNP_WANTCRED ? 1 : 0;
1182                         error = sooptcopyout(sopt, &optval, sizeof(optval));
1183                         break;
1184
1185                 case LOCAL_CONNWAIT:
1186                         /* Unlocked read. */
1187                         optval = unp->unp_flags & UNP_CONNWAIT ? 1 : 0;
1188                         error = sooptcopyout(sopt, &optval, sizeof(optval));
1189                         break;
1190
1191                 default:
1192                         error = EOPNOTSUPP;
1193                         break;
1194                 }
1195                 break;
1196
1197         case SOPT_SET:
1198                 switch (sopt->sopt_name) {
1199                 case LOCAL_CREDS:
1200                 case LOCAL_CONNWAIT:
1201                         error = sooptcopyin(sopt, &optval, sizeof(optval),
1202                                             sizeof(optval));
1203                         if (error)
1204                                 break;
1205
1206 #define OPTSET(bit) do {                                                \
1207         UNP_PCB_LOCK(unp);                                              \
1208         if (optval)                                                     \
1209                 unp->unp_flags |= bit;                                  \
1210         else                                                            \
1211                 unp->unp_flags &= ~bit;                                 \
1212         UNP_PCB_UNLOCK(unp);                                            \
1213 } while (0)
1214
1215                         switch (sopt->sopt_name) {
1216                         case LOCAL_CREDS:
1217                                 OPTSET(UNP_WANTCRED);
1218                                 break;
1219
1220                         case LOCAL_CONNWAIT:
1221                                 OPTSET(UNP_CONNWAIT);
1222                                 break;
1223
1224                         default:
1225                                 break;
1226                         }
1227                         break;
1228 #undef  OPTSET
1229                 default:
1230                         error = ENOPROTOOPT;
1231                         break;
1232                 }
1233                 break;
1234
1235         default:
1236                 error = EOPNOTSUPP;
1237                 break;
1238         }
1239         return (error);
1240 }
1241
1242 static int
1243 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1244 {
1245         struct sockaddr_un *soun = (struct sockaddr_un *)nam;
1246         struct vnode *vp;
1247         struct socket *so2, *so3;
1248         struct unpcb *unp, *unp2, *unp3;
1249         int error, len, vfslocked;
1250         struct nameidata nd;
1251         char buf[SOCK_MAXADDRLEN];
1252         struct sockaddr *sa;
1253
1254         UNP_LINK_WLOCK_ASSERT();
1255
1256         unp = sotounpcb(so);
1257         KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
1258
1259         if (nam->sa_len > sizeof(struct sockaddr_un))
1260                 return (EINVAL);
1261         len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
1262         if (len <= 0)
1263                 return (EINVAL);
1264         bcopy(soun->sun_path, buf, len);
1265         buf[len] = 0;
1266
1267         UNP_PCB_LOCK(unp);
1268         if (unp->unp_flags & UNP_CONNECTING) {
1269                 UNP_PCB_UNLOCK(unp);
1270                 return (EALREADY);
1271         }
1272         UNP_LINK_WUNLOCK();
1273         unp->unp_flags |= UNP_CONNECTING;
1274         UNP_PCB_UNLOCK(unp);
1275
1276         sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK);
1277         NDINIT(&nd, LOOKUP, MPSAFE | FOLLOW | LOCKSHARED | LOCKLEAF,
1278             UIO_SYSSPACE, buf, td);
1279         error = namei(&nd);
1280         if (error)
1281                 vp = NULL;
1282         else
1283                 vp = nd.ni_vp;
1284         ASSERT_VOP_LOCKED(vp, "unp_connect");
1285         vfslocked = NDHASGIANT(&nd);
1286         NDFREE(&nd, NDF_ONLY_PNBUF);
1287         if (error)
1288                 goto bad;
1289
1290         if (vp->v_type != VSOCK) {
1291                 error = ENOTSOCK;
1292                 goto bad;
1293         }
1294 #ifdef MAC
1295         error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD);
1296         if (error)
1297                 goto bad;
1298 #endif
1299         error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td);
1300         if (error)
1301                 goto bad;
1302         VFS_UNLOCK_GIANT(vfslocked);
1303
1304         unp = sotounpcb(so);
1305         KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
1306
1307         /*
1308          * Lock linkage lock for two reasons: make sure v_socket is stable,
1309          * and to protect simultaneous locking of multiple pcbs.
1310          */
1311         UNP_LINK_WLOCK();
1312         so2 = vp->v_socket;
1313         if (so2 == NULL) {
1314                 error = ECONNREFUSED;
1315                 goto bad2;
1316         }
1317         if (so->so_type != so2->so_type) {
1318                 error = EPROTOTYPE;
1319                 goto bad2;
1320         }
1321         if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
1322                 if (so2->so_options & SO_ACCEPTCONN) {
1323                         CURVNET_SET(so2->so_vnet);
1324                         so3 = sonewconn(so2, 0);
1325                         CURVNET_RESTORE();
1326                 } else
1327                         so3 = NULL;
1328                 if (so3 == NULL) {
1329                         error = ECONNREFUSED;
1330                         goto bad2;
1331                 }
1332                 unp = sotounpcb(so);
1333                 unp2 = sotounpcb(so2);
1334                 unp3 = sotounpcb(so3);
1335                 UNP_PCB_LOCK(unp);
1336                 UNP_PCB_LOCK(unp2);
1337                 UNP_PCB_LOCK(unp3);
1338                 if (unp2->unp_addr != NULL) {
1339                         bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len);
1340                         unp3->unp_addr = (struct sockaddr_un *) sa;
1341                         sa = NULL;
1342                 }
1343
1344                 /*
1345                  * The connecter's (client's) credentials are copied from its
1346                  * process structure at the time of connect() (which is now).
1347                  */
1348                 cru2x(td->td_ucred, &unp3->unp_peercred);
1349                 unp3->unp_flags |= UNP_HAVEPC;
1350
1351                 /*
1352                  * The receiver's (server's) credentials are copied from the
1353                  * unp_peercred member of socket on which the former called
1354                  * listen(); uipc_listen() cached that process's credentials
1355                  * at that time so we can use them now.
1356                  */
1357                 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED,
1358                     ("unp_connect: listener without cached peercred"));
1359                 memcpy(&unp->unp_peercred, &unp2->unp_peercred,
1360                     sizeof(unp->unp_peercred));
1361                 unp->unp_flags |= UNP_HAVEPC;
1362                 if (unp2->unp_flags & UNP_WANTCRED)
1363                         unp3->unp_flags |= UNP_WANTCRED;
1364                 UNP_PCB_UNLOCK(unp3);
1365                 UNP_PCB_UNLOCK(unp2);
1366                 UNP_PCB_UNLOCK(unp);
1367 #ifdef MAC
1368                 mac_socketpeer_set_from_socket(so, so3);
1369                 mac_socketpeer_set_from_socket(so3, so);
1370 #endif
1371
1372                 so2 = so3;
1373         }
1374         unp = sotounpcb(so);
1375         KASSERT(unp != NULL, ("unp_connect: unp == NULL"));
1376         unp2 = sotounpcb(so2);
1377         KASSERT(unp2 != NULL, ("unp_connect: unp2 == NULL"));
1378         UNP_PCB_LOCK(unp);
1379         UNP_PCB_LOCK(unp2);
1380         error = unp_connect2(so, so2, PRU_CONNECT);
1381         UNP_PCB_UNLOCK(unp2);
1382         UNP_PCB_UNLOCK(unp);
1383 bad2:
1384         UNP_LINK_WUNLOCK();
1385         if (vfslocked)
1386                 /* 
1387                  * Giant has been previously acquired. This means filesystem
1388                  * isn't MPSAFE.  Do it once again.
1389                  */
1390                 mtx_lock(&Giant);
1391 bad:
1392         if (vp != NULL)
1393                 vput(vp);
1394         VFS_UNLOCK_GIANT(vfslocked);
1395         free(sa, M_SONAME);
1396         UNP_LINK_WLOCK();
1397         UNP_PCB_LOCK(unp);
1398         unp->unp_flags &= ~UNP_CONNECTING;
1399         UNP_PCB_UNLOCK(unp);
1400         return (error);
1401 }
1402
1403 static int
1404 unp_connect2(struct socket *so, struct socket *so2, int req)
1405 {
1406         struct unpcb *unp;
1407         struct unpcb *unp2;
1408
1409         unp = sotounpcb(so);
1410         KASSERT(unp != NULL, ("unp_connect2: unp == NULL"));
1411         unp2 = sotounpcb(so2);
1412         KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL"));
1413
1414         UNP_LINK_WLOCK_ASSERT();
1415         UNP_PCB_LOCK_ASSERT(unp);
1416         UNP_PCB_LOCK_ASSERT(unp2);
1417
1418         if (so2->so_type != so->so_type)
1419                 return (EPROTOTYPE);
1420         unp->unp_conn = unp2;
1421
1422         switch (so->so_type) {
1423         case SOCK_DGRAM:
1424                 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
1425                 soisconnected(so);
1426                 break;
1427
1428         case SOCK_STREAM:
1429         case SOCK_SEQPACKET:
1430                 unp2->unp_conn = unp;
1431                 if (req == PRU_CONNECT &&
1432                     ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT))
1433                         soisconnecting(so);
1434                 else
1435                         soisconnected(so);
1436                 soisconnected(so2);
1437                 break;
1438
1439         default:
1440                 panic("unp_connect2");
1441         }
1442         return (0);
1443 }
1444
1445 static void
1446 unp_disconnect(struct unpcb *unp, struct unpcb *unp2)
1447 {
1448         struct socket *so;
1449
1450         KASSERT(unp2 != NULL, ("unp_disconnect: unp2 == NULL"));
1451
1452         UNP_LINK_WLOCK_ASSERT();
1453         UNP_PCB_LOCK_ASSERT(unp);
1454         UNP_PCB_LOCK_ASSERT(unp2);
1455
1456         unp->unp_conn = NULL;
1457         switch (unp->unp_socket->so_type) {
1458         case SOCK_DGRAM:
1459                 LIST_REMOVE(unp, unp_reflink);
1460                 so = unp->unp_socket;
1461                 SOCK_LOCK(so);
1462                 so->so_state &= ~SS_ISCONNECTED;
1463                 SOCK_UNLOCK(so);
1464                 break;
1465
1466         case SOCK_STREAM:
1467         case SOCK_SEQPACKET:
1468                 soisdisconnected(unp->unp_socket);
1469                 unp2->unp_conn = NULL;
1470                 soisdisconnected(unp2->unp_socket);
1471                 break;
1472         }
1473 }
1474
1475 /*
1476  * unp_pcblist() walks the global list of struct unpcb's to generate a
1477  * pointer list, bumping the refcount on each unpcb.  It then copies them out
1478  * sequentially, validating the generation number on each to see if it has
1479  * been detached.  All of this is necessary because copyout() may sleep on
1480  * disk I/O.
1481  */
1482 static int
1483 unp_pcblist(SYSCTL_HANDLER_ARGS)
1484 {
1485         int error, i, n;
1486         int freeunp;
1487         struct unpcb *unp, **unp_list;
1488         unp_gen_t gencnt;
1489         struct xunpgen *xug;
1490         struct unp_head *head;
1491         struct xunpcb *xu;
1492
1493         switch ((intptr_t)arg1) {
1494         case SOCK_STREAM:
1495                 head = &unp_shead;
1496                 break;
1497
1498         case SOCK_DGRAM:
1499                 head = &unp_dhead;
1500                 break;
1501
1502         case SOCK_SEQPACKET:
1503                 head = &unp_sphead;
1504                 break;
1505
1506         default:
1507                 panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1);
1508         }
1509
1510         /*
1511          * The process of preparing the PCB list is too time-consuming and
1512          * resource-intensive to repeat twice on every request.
1513          */
1514         if (req->oldptr == NULL) {
1515                 n = unp_count;
1516                 req->oldidx = 2 * (sizeof *xug)
1517                         + (n + n/8) * sizeof(struct xunpcb);
1518                 return (0);
1519         }
1520
1521         if (req->newptr != NULL)
1522                 return (EPERM);
1523
1524         /*
1525          * OK, now we're committed to doing something.
1526          */
1527         xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK);
1528         UNP_LIST_LOCK();
1529         gencnt = unp_gencnt;
1530         n = unp_count;
1531         UNP_LIST_UNLOCK();
1532
1533         xug->xug_len = sizeof *xug;
1534         xug->xug_count = n;
1535         xug->xug_gen = gencnt;
1536         xug->xug_sogen = so_gencnt;
1537         error = SYSCTL_OUT(req, xug, sizeof *xug);
1538         if (error) {
1539                 free(xug, M_TEMP);
1540                 return (error);
1541         }
1542
1543         unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK);
1544
1545         UNP_LIST_LOCK();
1546         for (unp = LIST_FIRST(head), i = 0; unp && i < n;
1547              unp = LIST_NEXT(unp, unp_link)) {
1548                 UNP_PCB_LOCK(unp);
1549                 if (unp->unp_gencnt <= gencnt) {
1550                         if (cr_cansee(req->td->td_ucred,
1551                             unp->unp_socket->so_cred)) {
1552                                 UNP_PCB_UNLOCK(unp);
1553                                 continue;
1554                         }
1555                         unp_list[i++] = unp;
1556                         unp->unp_refcount++;
1557                 }
1558                 UNP_PCB_UNLOCK(unp);
1559         }
1560         UNP_LIST_UNLOCK();
1561         n = i;                  /* In case we lost some during malloc. */
1562
1563         error = 0;
1564         xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO);
1565         for (i = 0; i < n; i++) {
1566                 unp = unp_list[i];
1567                 UNP_PCB_LOCK(unp);
1568                 unp->unp_refcount--;
1569                 if (unp->unp_refcount != 0 && unp->unp_gencnt <= gencnt) {
1570                         xu->xu_len = sizeof *xu;
1571                         xu->xu_unpp = unp;
1572                         /*
1573                          * XXX - need more locking here to protect against
1574                          * connect/disconnect races for SMP.
1575                          */
1576                         if (unp->unp_addr != NULL)
1577                                 bcopy(unp->unp_addr, &xu->xu_addr,
1578                                       unp->unp_addr->sun_len);
1579                         if (unp->unp_conn != NULL &&
1580                             unp->unp_conn->unp_addr != NULL)
1581                                 bcopy(unp->unp_conn->unp_addr,
1582                                       &xu->xu_caddr,
1583                                       unp->unp_conn->unp_addr->sun_len);
1584                         bcopy(unp, &xu->xu_unp, sizeof *unp);
1585                         sotoxsocket(unp->unp_socket, &xu->xu_socket);
1586                         UNP_PCB_UNLOCK(unp);
1587                         error = SYSCTL_OUT(req, xu, sizeof *xu);
1588                 } else {
1589                         freeunp = (unp->unp_refcount == 0);
1590                         UNP_PCB_UNLOCK(unp);
1591                         if (freeunp) {
1592                                 UNP_PCB_LOCK_DESTROY(unp);
1593                                 uma_zfree(unp_zone, unp);
1594                         }
1595                 }
1596         }
1597         free(xu, M_TEMP);
1598         if (!error) {
1599                 /*
1600                  * Give the user an updated idea of our state.  If the
1601                  * generation differs from what we told her before, she knows
1602                  * that something happened while we were processing this
1603                  * request, and it might be necessary to retry.
1604                  */
1605                 xug->xug_gen = unp_gencnt;
1606                 xug->xug_sogen = so_gencnt;
1607                 xug->xug_count = unp_count;
1608                 error = SYSCTL_OUT(req, xug, sizeof *xug);
1609         }
1610         free(unp_list, M_TEMP);
1611         free(xug, M_TEMP);
1612         return (error);
1613 }
1614
1615 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD,
1616             (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
1617             "List of active local datagram sockets");
1618 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD,
1619             (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
1620             "List of active local stream sockets");
1621 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLFLAG_RD,
1622             (caddr_t)(long)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb",
1623             "List of active local seqpacket sockets");
1624
1625 static void
1626 unp_shutdown(struct unpcb *unp)
1627 {
1628         struct unpcb *unp2;
1629         struct socket *so;
1630
1631         UNP_LINK_WLOCK_ASSERT();
1632         UNP_PCB_LOCK_ASSERT(unp);
1633
1634         unp2 = unp->unp_conn;
1635         if ((unp->unp_socket->so_type == SOCK_STREAM ||
1636             (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) {
1637                 so = unp2->unp_socket;
1638                 if (so != NULL)
1639                         socantrcvmore(so);
1640         }
1641 }
1642
1643 static void
1644 unp_drop(struct unpcb *unp, int errno)
1645 {
1646         struct socket *so = unp->unp_socket;
1647         struct unpcb *unp2;
1648
1649         UNP_LINK_WLOCK_ASSERT();
1650         UNP_PCB_LOCK_ASSERT(unp);
1651
1652         so->so_error = errno;
1653         unp2 = unp->unp_conn;
1654         if (unp2 == NULL)
1655                 return;
1656         UNP_PCB_LOCK(unp2);
1657         unp_disconnect(unp, unp2);
1658         UNP_PCB_UNLOCK(unp2);
1659 }
1660
1661 static void
1662 unp_freerights(struct file **rp, int fdcount)
1663 {
1664         int i;
1665         struct file *fp;
1666
1667         for (i = 0; i < fdcount; i++) {
1668                 fp = *rp;
1669                 *rp++ = NULL;
1670                 unp_discard(fp);
1671         }
1672 }
1673
1674 static int
1675 unp_externalize(struct mbuf *control, struct mbuf **controlp)
1676 {
1677         struct thread *td = curthread;          /* XXX */
1678         struct cmsghdr *cm = mtod(control, struct cmsghdr *);
1679         int i;
1680         int *fdp;
1681         struct file **rp;
1682         struct file *fp;
1683         void *data;
1684         socklen_t clen = control->m_len, datalen;
1685         int error, newfds;
1686         int f;
1687         u_int newlen;
1688
1689         UNP_LINK_UNLOCK_ASSERT();
1690
1691         error = 0;
1692         if (controlp != NULL) /* controlp == NULL => free control messages */
1693                 *controlp = NULL;
1694         while (cm != NULL) {
1695                 if (sizeof(*cm) > clen || cm->cmsg_len > clen) {
1696                         error = EINVAL;
1697                         break;
1698                 }
1699                 data = CMSG_DATA(cm);
1700                 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
1701                 if (cm->cmsg_level == SOL_SOCKET
1702                     && cm->cmsg_type == SCM_RIGHTS) {
1703                         newfds = datalen / sizeof(struct file *);
1704                         rp = data;
1705
1706                         /* If we're not outputting the descriptors free them. */
1707                         if (error || controlp == NULL) {
1708                                 unp_freerights(rp, newfds);
1709                                 goto next;
1710                         }
1711                         FILEDESC_XLOCK(td->td_proc->p_fd);
1712                         /* if the new FD's will not fit free them.  */
1713                         if (!fdavail(td, newfds)) {
1714                                 FILEDESC_XUNLOCK(td->td_proc->p_fd);
1715                                 error = EMSGSIZE;
1716                                 unp_freerights(rp, newfds);
1717                                 goto next;
1718                         }
1719
1720                         /*
1721                          * Now change each pointer to an fd in the global
1722                          * table to an integer that is the index to the local
1723                          * fd table entry that we set up to point to the
1724                          * global one we are transferring.
1725                          */
1726                         newlen = newfds * sizeof(int);
1727                         *controlp = sbcreatecontrol(NULL, newlen,
1728                             SCM_RIGHTS, SOL_SOCKET);
1729                         if (*controlp == NULL) {
1730                                 FILEDESC_XUNLOCK(td->td_proc->p_fd);
1731                                 error = E2BIG;
1732                                 unp_freerights(rp, newfds);
1733                                 goto next;
1734                         }
1735
1736                         fdp = (int *)
1737                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
1738                         for (i = 0; i < newfds; i++) {
1739                                 if (fdalloc(td, 0, &f))
1740                                         panic("unp_externalize fdalloc failed");
1741                                 fp = *rp++;
1742                                 td->td_proc->p_fd->fd_ofiles[f] = fp;
1743                                 unp_externalize_fp(fp);
1744                                 *fdp++ = f;
1745                         }
1746                         FILEDESC_XUNLOCK(td->td_proc->p_fd);
1747                 } else {
1748                         /* We can just copy anything else across. */
1749                         if (error || controlp == NULL)
1750                                 goto next;
1751                         *controlp = sbcreatecontrol(NULL, datalen,
1752                             cm->cmsg_type, cm->cmsg_level);
1753                         if (*controlp == NULL) {
1754                                 error = ENOBUFS;
1755                                 goto next;
1756                         }
1757                         bcopy(data,
1758                             CMSG_DATA(mtod(*controlp, struct cmsghdr *)),
1759                             datalen);
1760                 }
1761                 controlp = &(*controlp)->m_next;
1762
1763 next:
1764                 if (CMSG_SPACE(datalen) < clen) {
1765                         clen -= CMSG_SPACE(datalen);
1766                         cm = (struct cmsghdr *)
1767                             ((caddr_t)cm + CMSG_SPACE(datalen));
1768                 } else {
1769                         clen = 0;
1770                         cm = NULL;
1771                 }
1772         }
1773
1774         m_freem(control);
1775         return (error);
1776 }
1777
1778 static void
1779 unp_zone_change(void *tag)
1780 {
1781
1782         uma_zone_set_max(unp_zone, maxsockets);
1783 }
1784
1785 static void
1786 unp_init(void)
1787 {
1788
1789 #ifdef VIMAGE
1790         if (!IS_DEFAULT_VNET(curvnet))
1791                 return;
1792 #endif
1793         unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, NULL,
1794             NULL, NULL, UMA_ALIGN_PTR, 0);
1795         if (unp_zone == NULL)
1796                 panic("unp_init");
1797         uma_zone_set_max(unp_zone, maxsockets);
1798         EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change,
1799             NULL, EVENTHANDLER_PRI_ANY);
1800         LIST_INIT(&unp_dhead);
1801         LIST_INIT(&unp_shead);
1802         LIST_INIT(&unp_sphead);
1803         SLIST_INIT(&unp_defers);
1804         TASK_INIT(&unp_gc_task, 0, unp_gc, NULL);
1805         TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL);
1806         UNP_LINK_LOCK_INIT();
1807         UNP_LIST_LOCK_INIT();
1808         UNP_DEFERRED_LOCK_INIT();
1809 }
1810
1811 static int
1812 unp_internalize(struct mbuf **controlp, struct thread *td)
1813 {
1814         struct mbuf *control = *controlp;
1815         struct proc *p = td->td_proc;
1816         struct filedesc *fdescp = p->p_fd;
1817         struct bintime *bt;
1818         struct cmsghdr *cm = mtod(control, struct cmsghdr *);
1819         struct cmsgcred *cmcred;
1820         struct file **rp;
1821         struct file *fp;
1822         struct timeval *tv;
1823         int i, fd, *fdp;
1824         void *data;
1825         socklen_t clen = control->m_len, datalen;
1826         int error, oldfds;
1827         u_int newlen;
1828
1829         UNP_LINK_UNLOCK_ASSERT();
1830
1831         error = 0;
1832         *controlp = NULL;
1833         while (cm != NULL) {
1834                 if (sizeof(*cm) > clen || cm->cmsg_level != SOL_SOCKET
1835                     || cm->cmsg_len > clen) {
1836                         error = EINVAL;
1837                         goto out;
1838                 }
1839                 data = CMSG_DATA(cm);
1840                 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data;
1841
1842                 switch (cm->cmsg_type) {
1843                 /*
1844                  * Fill in credential information.
1845                  */
1846                 case SCM_CREDS:
1847                         *controlp = sbcreatecontrol(NULL, sizeof(*cmcred),
1848                             SCM_CREDS, SOL_SOCKET);
1849                         if (*controlp == NULL) {
1850                                 error = ENOBUFS;
1851                                 goto out;
1852                         }
1853                         cmcred = (struct cmsgcred *)
1854                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
1855                         cmcred->cmcred_pid = p->p_pid;
1856                         cmcred->cmcred_uid = td->td_ucred->cr_ruid;
1857                         cmcred->cmcred_gid = td->td_ucred->cr_rgid;
1858                         cmcred->cmcred_euid = td->td_ucred->cr_uid;
1859                         cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
1860                             CMGROUP_MAX);
1861                         for (i = 0; i < cmcred->cmcred_ngroups; i++)
1862                                 cmcred->cmcred_groups[i] =
1863                                     td->td_ucred->cr_groups[i];
1864                         break;
1865
1866                 case SCM_RIGHTS:
1867                         oldfds = datalen / sizeof (int);
1868                         /*
1869                          * Check that all the FDs passed in refer to legal
1870                          * files.  If not, reject the entire operation.
1871                          */
1872                         fdp = data;
1873                         FILEDESC_SLOCK(fdescp);
1874                         for (i = 0; i < oldfds; i++) {
1875                                 fd = *fdp++;
1876                                 if ((unsigned)fd >= fdescp->fd_nfiles ||
1877                                     fdescp->fd_ofiles[fd] == NULL) {
1878                                         FILEDESC_SUNLOCK(fdescp);
1879                                         error = EBADF;
1880                                         goto out;
1881                                 }
1882                                 fp = fdescp->fd_ofiles[fd];
1883                                 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) {
1884                                         FILEDESC_SUNLOCK(fdescp);
1885                                         error = EOPNOTSUPP;
1886                                         goto out;
1887                                 }
1888
1889                         }
1890
1891                         /*
1892                          * Now replace the integer FDs with pointers to the
1893                          * associated global file table entry..
1894                          */
1895                         newlen = oldfds * sizeof(struct file *);
1896                         *controlp = sbcreatecontrol(NULL, newlen,
1897                             SCM_RIGHTS, SOL_SOCKET);
1898                         if (*controlp == NULL) {
1899                                 FILEDESC_SUNLOCK(fdescp);
1900                                 error = E2BIG;
1901                                 goto out;
1902                         }
1903                         fdp = data;
1904                         rp = (struct file **)
1905                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
1906                         for (i = 0; i < oldfds; i++) {
1907                                 fp = fdescp->fd_ofiles[*fdp++];
1908                                 *rp++ = fp;
1909                                 unp_internalize_fp(fp);
1910                         }
1911                         FILEDESC_SUNLOCK(fdescp);
1912                         break;
1913
1914                 case SCM_TIMESTAMP:
1915                         *controlp = sbcreatecontrol(NULL, sizeof(*tv),
1916                             SCM_TIMESTAMP, SOL_SOCKET);
1917                         if (*controlp == NULL) {
1918                                 error = ENOBUFS;
1919                                 goto out;
1920                         }
1921                         tv = (struct timeval *)
1922                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
1923                         microtime(tv);
1924                         break;
1925
1926                 case SCM_BINTIME:
1927                         *controlp = sbcreatecontrol(NULL, sizeof(*bt),
1928                             SCM_BINTIME, SOL_SOCKET);
1929                         if (*controlp == NULL) {
1930                                 error = ENOBUFS;
1931                                 goto out;
1932                         }
1933                         bt = (struct bintime *)
1934                             CMSG_DATA(mtod(*controlp, struct cmsghdr *));
1935                         bintime(bt);
1936                         break;
1937
1938                 default:
1939                         error = EINVAL;
1940                         goto out;
1941                 }
1942
1943                 controlp = &(*controlp)->m_next;
1944                 if (CMSG_SPACE(datalen) < clen) {
1945                         clen -= CMSG_SPACE(datalen);
1946                         cm = (struct cmsghdr *)
1947                             ((caddr_t)cm + CMSG_SPACE(datalen));
1948                 } else {
1949                         clen = 0;
1950                         cm = NULL;
1951                 }
1952         }
1953
1954 out:
1955         m_freem(control);
1956         return (error);
1957 }
1958
1959 static struct mbuf *
1960 unp_addsockcred(struct thread *td, struct mbuf *control)
1961 {
1962         struct mbuf *m, *n, *n_prev;
1963         struct sockcred *sc;
1964         const struct cmsghdr *cm;
1965         int ngroups;
1966         int i;
1967
1968         ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX);
1969         m = sbcreatecontrol(NULL, SOCKCREDSIZE(ngroups), SCM_CREDS, SOL_SOCKET);
1970         if (m == NULL)
1971                 return (control);
1972
1973         sc = (struct sockcred *) CMSG_DATA(mtod(m, struct cmsghdr *));
1974         sc->sc_uid = td->td_ucred->cr_ruid;
1975         sc->sc_euid = td->td_ucred->cr_uid;
1976         sc->sc_gid = td->td_ucred->cr_rgid;
1977         sc->sc_egid = td->td_ucred->cr_gid;
1978         sc->sc_ngroups = ngroups;
1979         for (i = 0; i < sc->sc_ngroups; i++)
1980                 sc->sc_groups[i] = td->td_ucred->cr_groups[i];
1981
1982         /*
1983          * Unlink SCM_CREDS control messages (struct cmsgcred), since just
1984          * created SCM_CREDS control message (struct sockcred) has another
1985          * format.
1986          */
1987         if (control != NULL)
1988                 for (n = control, n_prev = NULL; n != NULL;) {
1989                         cm = mtod(n, struct cmsghdr *);
1990                         if (cm->cmsg_level == SOL_SOCKET &&
1991                             cm->cmsg_type == SCM_CREDS) {
1992                                 if (n_prev == NULL)
1993                                         control = n->m_next;
1994                                 else
1995                                         n_prev->m_next = n->m_next;
1996                                 n = m_free(n);
1997                         } else {
1998                                 n_prev = n;
1999                                 n = n->m_next;
2000                         }
2001                 }
2002
2003         /* Prepend it to the head. */
2004         m->m_next = control;
2005         return (m);
2006 }
2007
2008 static struct unpcb *
2009 fptounp(struct file *fp)
2010 {
2011         struct socket *so;
2012
2013         if (fp->f_type != DTYPE_SOCKET)
2014                 return (NULL);
2015         if ((so = fp->f_data) == NULL)
2016                 return (NULL);
2017         if (so->so_proto->pr_domain != &localdomain)
2018                 return (NULL);
2019         return sotounpcb(so);
2020 }
2021
2022 static void
2023 unp_discard(struct file *fp)
2024 {
2025         struct unp_defer *dr;
2026
2027         if (unp_externalize_fp(fp)) {
2028                 dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK);
2029                 dr->ud_fp = fp;
2030                 UNP_DEFERRED_LOCK();
2031                 SLIST_INSERT_HEAD(&unp_defers, dr, ud_link);
2032                 UNP_DEFERRED_UNLOCK();
2033                 atomic_add_int(&unp_defers_count, 1);
2034                 taskqueue_enqueue(taskqueue_thread, &unp_defer_task);
2035         } else
2036                 (void) closef(fp, (struct thread *)NULL);
2037 }
2038
2039 static void
2040 unp_process_defers(void *arg __unused, int pending)
2041 {
2042         struct unp_defer *dr;
2043         SLIST_HEAD(, unp_defer) drl;
2044         int count;
2045
2046         SLIST_INIT(&drl);
2047         for (;;) {
2048                 UNP_DEFERRED_LOCK();
2049                 if (SLIST_FIRST(&unp_defers) == NULL) {
2050                         UNP_DEFERRED_UNLOCK();
2051                         break;
2052                 }
2053                 SLIST_SWAP(&unp_defers, &drl, unp_defer);
2054                 UNP_DEFERRED_UNLOCK();
2055                 count = 0;
2056                 while ((dr = SLIST_FIRST(&drl)) != NULL) {
2057                         SLIST_REMOVE_HEAD(&drl, ud_link);
2058                         closef(dr->ud_fp, NULL);
2059                         free(dr, M_TEMP);
2060                         count++;
2061                 }
2062                 atomic_add_int(&unp_defers_count, -count);
2063         }
2064 }
2065
2066 static void
2067 unp_internalize_fp(struct file *fp)
2068 {
2069         struct unpcb *unp;
2070
2071         UNP_LINK_WLOCK();
2072         if ((unp = fptounp(fp)) != NULL) {
2073                 unp->unp_file = fp;
2074                 unp->unp_msgcount++;
2075         }
2076         fhold(fp);
2077         unp_rights++;
2078         UNP_LINK_WUNLOCK();
2079 }
2080
2081 static int
2082 unp_externalize_fp(struct file *fp)
2083 {
2084         struct unpcb *unp;
2085         int ret;
2086
2087         UNP_LINK_WLOCK();
2088         if ((unp = fptounp(fp)) != NULL) {
2089                 unp->unp_msgcount--;
2090                 ret = 1;
2091         } else
2092                 ret = 0;
2093         unp_rights--;
2094         UNP_LINK_WUNLOCK();
2095         return (ret);
2096 }
2097
2098 /*
2099  * unp_defer indicates whether additional work has been defered for a future
2100  * pass through unp_gc().  It is thread local and does not require explicit
2101  * synchronization.
2102  */
2103 static int      unp_marked;
2104 static int      unp_unreachable;
2105
2106 static void
2107 unp_accessable(struct file *fp)
2108 {
2109         struct unpcb *unp;
2110
2111         if ((unp = fptounp(fp)) == NULL)
2112                 return;
2113         if (unp->unp_gcflag & UNPGC_REF)
2114                 return;
2115         unp->unp_gcflag &= ~UNPGC_DEAD;
2116         unp->unp_gcflag |= UNPGC_REF;
2117         unp_marked++;
2118 }
2119
2120 static void
2121 unp_gc_process(struct unpcb *unp)
2122 {
2123         struct socket *soa;
2124         struct socket *so;
2125         struct file *fp;
2126
2127         /* Already processed. */
2128         if (unp->unp_gcflag & UNPGC_SCANNED)
2129                 return;
2130         fp = unp->unp_file;
2131
2132         /*
2133          * Check for a socket potentially in a cycle.  It must be in a
2134          * queue as indicated by msgcount, and this must equal the file
2135          * reference count.  Note that when msgcount is 0 the file is NULL.
2136          */
2137         if ((unp->unp_gcflag & UNPGC_REF) == 0 && fp &&
2138             unp->unp_msgcount != 0 && fp->f_count == unp->unp_msgcount) {
2139                 unp->unp_gcflag |= UNPGC_DEAD;
2140                 unp_unreachable++;
2141                 return;
2142         }
2143
2144         /*
2145          * Mark all sockets we reference with RIGHTS.
2146          */
2147         so = unp->unp_socket;
2148         SOCKBUF_LOCK(&so->so_rcv);
2149         unp_scan(so->so_rcv.sb_mb, unp_accessable);
2150         SOCKBUF_UNLOCK(&so->so_rcv);
2151
2152         /*
2153          * Mark all sockets in our accept queue.
2154          */
2155         ACCEPT_LOCK();
2156         TAILQ_FOREACH(soa, &so->so_comp, so_list) {
2157                 SOCKBUF_LOCK(&soa->so_rcv);
2158                 unp_scan(soa->so_rcv.sb_mb, unp_accessable);
2159                 SOCKBUF_UNLOCK(&soa->so_rcv);
2160         }
2161         ACCEPT_UNLOCK();
2162         unp->unp_gcflag |= UNPGC_SCANNED;
2163 }
2164
2165 static int unp_recycled;
2166 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, 
2167     "Number of unreachable sockets claimed by the garbage collector.");
2168
2169 static int unp_taskcount;
2170 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, 
2171     "Number of times the garbage collector has run.");
2172
2173 static void
2174 unp_gc(__unused void *arg, int pending)
2175 {
2176         struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead,
2177                                     NULL };
2178         struct unp_head **head;
2179         struct file *f, **unref;
2180         struct unpcb *unp;
2181         int i, total;
2182
2183         unp_taskcount++;
2184         UNP_LIST_LOCK();
2185         /*
2186          * First clear all gc flags from previous runs.
2187          */
2188         for (head = heads; *head != NULL; head++)
2189                 LIST_FOREACH(unp, *head, unp_link)
2190                         unp->unp_gcflag = 0;
2191
2192         /*
2193          * Scan marking all reachable sockets with UNPGC_REF.  Once a socket
2194          * is reachable all of the sockets it references are reachable.
2195          * Stop the scan once we do a complete loop without discovering
2196          * a new reachable socket.
2197          */
2198         do {
2199                 unp_unreachable = 0;
2200                 unp_marked = 0;
2201                 for (head = heads; *head != NULL; head++)
2202                         LIST_FOREACH(unp, *head, unp_link)
2203                                 unp_gc_process(unp);
2204         } while (unp_marked);
2205         UNP_LIST_UNLOCK();
2206         if (unp_unreachable == 0)
2207                 return;
2208
2209         /*
2210          * Allocate space for a local list of dead unpcbs.
2211          */
2212         unref = malloc(unp_unreachable * sizeof(struct file *),
2213             M_TEMP, M_WAITOK);
2214
2215         /*
2216          * Iterate looking for sockets which have been specifically marked
2217          * as as unreachable and store them locally.
2218          */
2219         UNP_LINK_RLOCK();
2220         UNP_LIST_LOCK();
2221         for (total = 0, head = heads; *head != NULL; head++)
2222                 LIST_FOREACH(unp, *head, unp_link)
2223                         if ((unp->unp_gcflag & UNPGC_DEAD) != 0) {
2224                                 f = unp->unp_file;
2225                                 if (unp->unp_msgcount == 0 || f == NULL ||
2226                                     f->f_count != unp->unp_msgcount)
2227                                         continue;
2228                                 unref[total++] = f;
2229                                 fhold(f);
2230                                 KASSERT(total <= unp_unreachable,
2231                                     ("unp_gc: incorrect unreachable count."));
2232                         }
2233         UNP_LIST_UNLOCK();
2234         UNP_LINK_RUNLOCK();
2235
2236         /*
2237          * Now flush all sockets, free'ing rights.  This will free the
2238          * struct files associated with these sockets but leave each socket
2239          * with one remaining ref.
2240          */
2241         for (i = 0; i < total; i++) {
2242                 struct socket *so;
2243
2244                 so = unref[i]->f_data;
2245                 CURVNET_SET(so->so_vnet);
2246                 sorflush(so);
2247                 CURVNET_RESTORE();
2248         }
2249
2250         /*
2251          * And finally release the sockets so they can be reclaimed.
2252          */
2253         for (i = 0; i < total; i++)
2254                 fdrop(unref[i], NULL);
2255         unp_recycled += total;
2256         free(unref, M_TEMP);
2257 }
2258
2259 static void
2260 unp_dispose(struct mbuf *m)
2261 {
2262
2263         if (m)
2264                 unp_scan(m, unp_discard);
2265 }
2266
2267 static void
2268 unp_scan(struct mbuf *m0, void (*op)(struct file *))
2269 {
2270         struct mbuf *m;
2271         struct file **rp;
2272         struct cmsghdr *cm;
2273         void *data;
2274         int i;
2275         socklen_t clen, datalen;
2276         int qfds;
2277
2278         while (m0 != NULL) {
2279                 for (m = m0; m; m = m->m_next) {
2280                         if (m->m_type != MT_CONTROL)
2281                                 continue;
2282
2283                         cm = mtod(m, struct cmsghdr *);
2284                         clen = m->m_len;
2285
2286                         while (cm != NULL) {
2287                                 if (sizeof(*cm) > clen || cm->cmsg_len > clen)
2288                                         break;
2289
2290                                 data = CMSG_DATA(cm);
2291                                 datalen = (caddr_t)cm + cm->cmsg_len
2292                                     - (caddr_t)data;
2293
2294                                 if (cm->cmsg_level == SOL_SOCKET &&
2295                                     cm->cmsg_type == SCM_RIGHTS) {
2296                                         qfds = datalen / sizeof (struct file *);
2297                                         rp = data;
2298                                         for (i = 0; i < qfds; i++)
2299                                                 (*op)(*rp++);
2300                                 }
2301
2302                                 if (CMSG_SPACE(datalen) < clen) {
2303                                         clen -= CMSG_SPACE(datalen);
2304                                         cm = (struct cmsghdr *)
2305                                             ((caddr_t)cm + CMSG_SPACE(datalen));
2306                                 } else {
2307                                         clen = 0;
2308                                         cm = NULL;
2309                                 }
2310                         }
2311                 }
2312                 m0 = m0->m_act;
2313         }
2314 }
2315
2316 #ifdef DDB
2317 static void
2318 db_print_indent(int indent)
2319 {
2320         int i;
2321
2322         for (i = 0; i < indent; i++)
2323                 db_printf(" ");
2324 }
2325
2326 static void
2327 db_print_unpflags(int unp_flags)
2328 {
2329         int comma;
2330
2331         comma = 0;
2332         if (unp_flags & UNP_HAVEPC) {
2333                 db_printf("%sUNP_HAVEPC", comma ? ", " : "");
2334                 comma = 1;
2335         }
2336         if (unp_flags & UNP_HAVEPCCACHED) {
2337                 db_printf("%sUNP_HAVEPCCACHED", comma ? ", " : "");
2338                 comma = 1;
2339         }
2340         if (unp_flags & UNP_WANTCRED) {
2341                 db_printf("%sUNP_WANTCRED", comma ? ", " : "");
2342                 comma = 1;
2343         }
2344         if (unp_flags & UNP_CONNWAIT) {
2345                 db_printf("%sUNP_CONNWAIT", comma ? ", " : "");
2346                 comma = 1;
2347         }
2348         if (unp_flags & UNP_CONNECTING) {
2349                 db_printf("%sUNP_CONNECTING", comma ? ", " : "");
2350                 comma = 1;
2351         }
2352         if (unp_flags & UNP_BINDING) {
2353                 db_printf("%sUNP_BINDING", comma ? ", " : "");
2354                 comma = 1;
2355         }
2356 }
2357
2358 static void
2359 db_print_xucred(int indent, struct xucred *xu)
2360 {
2361         int comma, i;
2362
2363         db_print_indent(indent);
2364         db_printf("cr_version: %u   cr_uid: %u   cr_ngroups: %d\n",
2365             xu->cr_version, xu->cr_uid, xu->cr_ngroups);
2366         db_print_indent(indent);
2367         db_printf("cr_groups: ");
2368         comma = 0;
2369         for (i = 0; i < xu->cr_ngroups; i++) {
2370                 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]);
2371                 comma = 1;
2372         }
2373         db_printf("\n");
2374 }
2375
2376 static void
2377 db_print_unprefs(int indent, struct unp_head *uh)
2378 {
2379         struct unpcb *unp;
2380         int counter;
2381
2382         counter = 0;
2383         LIST_FOREACH(unp, uh, unp_reflink) {
2384                 if (counter % 4 == 0)
2385                         db_print_indent(indent);
2386                 db_printf("%p  ", unp);
2387                 if (counter % 4 == 3)
2388                         db_printf("\n");
2389                 counter++;
2390         }
2391         if (counter != 0 && counter % 4 != 0)
2392                 db_printf("\n");
2393 }
2394
2395 DB_SHOW_COMMAND(unpcb, db_show_unpcb)
2396 {
2397         struct unpcb *unp;
2398
2399         if (!have_addr) {
2400                 db_printf("usage: show unpcb <addr>\n");
2401                 return;
2402         }
2403         unp = (struct unpcb *)addr;
2404
2405         db_printf("unp_socket: %p   unp_vnode: %p\n", unp->unp_socket,
2406             unp->unp_vnode);
2407
2408         db_printf("unp_ino: %d   unp_conn: %p\n", unp->unp_ino,
2409             unp->unp_conn);
2410
2411         db_printf("unp_refs:\n");
2412         db_print_unprefs(2, &unp->unp_refs);
2413
2414         /* XXXRW: Would be nice to print the full address, if any. */
2415         db_printf("unp_addr: %p\n", unp->unp_addr);
2416
2417         db_printf("unp_cc: %d   unp_mbcnt: %d   unp_gencnt: %llu\n",
2418             unp->unp_cc, unp->unp_mbcnt,
2419             (unsigned long long)unp->unp_gencnt);
2420
2421         db_printf("unp_flags: %x (", unp->unp_flags);
2422         db_print_unpflags(unp->unp_flags);
2423         db_printf(")\n");
2424
2425         db_printf("unp_peercred:\n");
2426         db_print_xucred(2, &unp->unp_peercred);
2427
2428         db_printf("unp_refcount: %u\n", unp->unp_refcount);
2429 }
2430 #endif