4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
40 #pragma ident "%Z%%M% %I% %E% SMI"
42 #include <sys/types.h>
43 #include <sys/param.h>
45 #include <sys/vnode.h>
47 /* Extensible attribute (xva) routines. */
50 * Zero out the structure, set the size of the requested/returned bitmaps,
51 * set AT_XVATTR in the embedded vattr_t's va_mask, and set up the pointer
52 * to the returned attributes array.
55 xva_init(xvattr_t *xvap)
57 bzero(xvap, sizeof (xvattr_t));
58 xvap->xva_mapsize = XVA_MAPSIZE;
59 xvap->xva_magic = XVA_MAGIC;
60 xvap->xva_vattr.va_mask = AT_XVATTR;
61 xvap->xva_rtnattrmapp = &(xvap->xva_rtnattrmap)[0];
65 * If AT_XVATTR is set, returns a pointer to the embedded xoptattr_t
66 * structure. Otherwise, returns NULL.
69 xva_getxoptattr(xvattr_t *xvap)
71 xoptattr_t *xoap = NULL;
72 if (xvap->xva_vattr.va_mask & AT_XVATTR)
73 xoap = &xvap->xva_xoptattrs;
77 static STAILQ_HEAD(, vnode) vn_rele_async_list;
78 static struct mtx vn_rele_async_lock;
79 static struct cv vn_rele_async_cv;
80 static int vn_rele_list_length;
81 static int vn_rele_async_thread_exit;
84 struct vnode *stqe_next;
88 * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
89 * asynchronously using a taskq. This can avoid deadlocks caused by re-entering
90 * the file system as a result of releasing the vnode. Note, file systems
91 * already have to handle the race where the vnode is incremented before the
92 * inactive routine is called and does its locking.
94 * Warning: Excessive use of this routine can lead to performance problems.
95 * This is because taskqs throttle back allocation if too many are created.
98 vn_rele_async(vnode_t *vp, taskq_t *taskq /* unused */)
101 KASSERT(vp != NULL, ("vrele: null vp"));
102 VFS_ASSERT_GIANT(vp->v_mount);
105 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
106 vp->v_usecount == 1)) {
111 if (vp->v_usecount != 1) {
113 vprint("vrele: negative ref count", vp);
116 panic("vrele: negative ref cnt");
121 if (vn_rele_async_thread_exit != 0) {
126 mtx_lock(&vn_rele_async_lock);
128 /* STAILQ_INSERT_TAIL */
129 (*(vnode_link_t *)&vp->v_cstart).stqe_next = NULL;
130 *vn_rele_async_list.stqh_last = vp;
131 vn_rele_async_list.stqh_last =
132 &((vnode_link_t *)&vp->v_cstart)->stqe_next;
134 /****************************************/
135 vn_rele_list_length++;
136 if ((vn_rele_list_length % 100) == 0)
137 cv_signal(&vn_rele_async_cv);
138 mtx_unlock(&vn_rele_async_lock);
143 vn_rele_async_init(void *arg)
146 mtx_init(&vn_rele_async_lock, "valock", NULL, MTX_DEF);
147 STAILQ_INIT(&vn_rele_async_list);
149 /* cv_init(&vn_rele_async_cv, "vacv"); */
150 vn_rele_async_cv.cv_description = "vacv";
151 vn_rele_async_cv.cv_waiters = 0;
155 vn_rele_async_fini(void)
158 mtx_lock(&vn_rele_async_lock);
159 vn_rele_async_thread_exit = 1;
160 cv_signal(&vn_rele_async_cv);
161 while (vn_rele_async_thread_exit != 0)
162 cv_wait(&vn_rele_async_cv, &vn_rele_async_lock);
163 mtx_unlock(&vn_rele_async_lock);
164 mtx_destroy(&vn_rele_async_lock);
169 vn_rele_async_cleaner(void)
171 STAILQ_HEAD(, vnode) vn_tmp_list;
172 struct vnode *curvnode;
174 STAILQ_INIT(&vn_tmp_list);
175 mtx_lock(&vn_rele_async_lock);
176 while (vn_rele_async_thread_exit == 0) {
177 STAILQ_CONCAT(&vn_tmp_list, &vn_rele_async_list);
178 vn_rele_list_length = 0;
179 mtx_unlock(&vn_rele_async_lock);
181 while (!STAILQ_EMPTY(&vn_tmp_list)) {
182 curvnode = STAILQ_FIRST(&vn_tmp_list);
184 /* STAILQ_REMOVE_HEAD */
185 STAILQ_FIRST(&vn_tmp_list) =
186 ((vnode_link_t *)&curvnode->v_cstart)->stqe_next;
187 if (STAILQ_FIRST(&vn_tmp_list) == NULL)
188 vn_tmp_list.stqh_last = &STAILQ_FIRST(&vn_tmp_list);
189 /***********************/
192 mtx_lock(&vn_rele_async_lock);
193 if (vn_rele_list_length == 0)
194 cv_timedwait(&vn_rele_async_cv, &vn_rele_async_lock,
198 vn_rele_async_thread_exit = 0;
199 cv_broadcast(&vn_rele_async_cv);
200 mtx_unlock(&vn_rele_async_lock);
204 static struct proc *vn_rele_async_proc;
205 static struct kproc_desc up_kp = {
207 vn_rele_async_cleaner,
210 SYSINIT(vaclean, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
211 SYSINIT(vn_rele_async_setup, SI_SUB_VFS, SI_ORDER_FIRST, vn_rele_async_init, NULL);