2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2005 Poul-Henning Kamp
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/mount.h>
35 #include <sys/rwlock.h>
36 #include <sys/vnode.h>
38 static MALLOC_DEFINE(M_VFS_HASH, "vfs_hash", "VFS hash table");
40 static LIST_HEAD(vfs_hash_head, vnode) *vfs_hash_tbl;
41 static LIST_HEAD(,vnode) vfs_hash_side;
42 static u_long vfs_hash_mask;
43 static struct rwlock __exclusive_cache_line vfs_hash_lock;
46 vfs_hashinit(void *dummy __unused)
49 vfs_hash_tbl = hashinit(desiredvnodes, M_VFS_HASH, &vfs_hash_mask);
50 rw_init(&vfs_hash_lock, "vfs hash");
51 LIST_INIT(&vfs_hash_side);
54 /* Must be SI_ORDER_SECOND so desiredvnodes is available */
55 SYSINIT(vfs_hash, SI_SUB_VFS, SI_ORDER_SECOND, vfs_hashinit, NULL);
58 vfs_hash_index(struct vnode *vp)
61 return (vp->v_hash + vp->v_mount->mnt_hashseed);
64 static struct vfs_hash_head *
65 vfs_hash_bucket(const struct mount *mp, u_int hash)
68 return (&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]);
72 vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td,
73 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
80 rw_rlock(&vfs_hash_lock);
81 LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
82 if (vp->v_hash != hash)
84 if (vp->v_mount != mp)
86 if (fn != NULL && fn(vp, arg))
89 rw_runlock(&vfs_hash_lock);
90 error = vget_finish(vp, flags, vs);
91 if (error == ENOENT && (flags & LK_NOWAIT) == 0)
95 if (vp->v_hash != hash ||
96 (fn != NULL && fn(vp, arg))) {
98 /* Restart the bucket walk. */
105 rw_runlock(&vfs_hash_lock);
113 vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
114 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
119 rw_rlock(&vfs_hash_lock);
120 LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
121 if (vp->v_hash != hash)
123 if (vp->v_mount != mp)
125 if (fn != NULL && fn(vp, arg))
128 rw_runlock(&vfs_hash_lock);
135 rw_runlock(&vfs_hash_lock);
143 vfs_hash_remove(struct vnode *vp)
146 rw_wlock(&vfs_hash_lock);
147 LIST_REMOVE(vp, v_hashlist);
148 rw_wunlock(&vfs_hash_lock);
152 vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
153 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
161 rw_wlock(&vfs_hash_lock);
163 vfs_hash_bucket(vp->v_mount, hash), v_hashlist) {
164 if (vp2->v_hash != hash)
166 if (vp2->v_mount != vp->v_mount)
168 if (fn != NULL && fn(vp2, arg))
171 rw_wunlock(&vfs_hash_lock);
172 error = vget_finish(vp2, flags, vs);
173 if (error == ENOENT && (flags & LK_NOWAIT) == 0)
175 rw_wlock(&vfs_hash_lock);
176 LIST_INSERT_HEAD(&vfs_hash_side, vp, v_hashlist);
177 rw_wunlock(&vfs_hash_lock);
188 LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
189 rw_wunlock(&vfs_hash_lock);
194 vfs_hash_rehash(struct vnode *vp, u_int hash)
196 ASSERT_VOP_ELOCKED(vp, "rehash requires excl lock");
198 rw_wlock(&vfs_hash_lock);
199 LIST_REMOVE(vp, v_hashlist);
200 LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
202 rw_wunlock(&vfs_hash_lock);
206 vfs_hash_changesize(u_long newmaxvnodes)
208 struct vfs_hash_head *vfs_hash_newtbl, *vfs_hash_oldtbl;
209 u_long vfs_hash_newmask, vfs_hash_oldmask;
213 vfs_hash_newtbl = hashinit(newmaxvnodes, M_VFS_HASH,
215 /* If same hash table size, nothing to do */
216 if (vfs_hash_mask == vfs_hash_newmask) {
217 free(vfs_hash_newtbl, M_VFS_HASH);
221 * Move everything from the old hash table to the new table.
222 * None of the vnodes in the table can be recycled because to
223 * do so, they have to be removed from the hash table.
225 rw_wlock(&vfs_hash_lock);
226 vfs_hash_oldtbl = vfs_hash_tbl;
227 vfs_hash_oldmask = vfs_hash_mask;
228 vfs_hash_tbl = vfs_hash_newtbl;
229 vfs_hash_mask = vfs_hash_newmask;
230 for (i = 0; i <= vfs_hash_oldmask; i++) {
231 while ((vp = LIST_FIRST(&vfs_hash_oldtbl[i])) != NULL) {
232 LIST_REMOVE(vp, v_hashlist);
234 vfs_hash_bucket(vp->v_mount, vp->v_hash),
238 rw_wunlock(&vfs_hash_lock);
239 free(vfs_hash_oldtbl, M_VFS_HASH);