]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/vfs_hash.c
amd64: allow gdb(4) to write to most registers
[FreeBSD/FreeBSD.git] / sys / kern / vfs_hash.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 Poul-Henning Kamp
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mount.h>
38 #include <sys/rwlock.h>
39 #include <sys/vnode.h>
40
41 static MALLOC_DEFINE(M_VFS_HASH, "vfs_hash", "VFS hash table");
42
43 static LIST_HEAD(vfs_hash_head, vnode)  *vfs_hash_tbl;
44 static LIST_HEAD(,vnode)                vfs_hash_side;
45 static u_long                           vfs_hash_mask;
46 static struct rwlock __exclusive_cache_line vfs_hash_lock;
47
48 static void
49 vfs_hashinit(void *dummy __unused)
50 {
51
52         vfs_hash_tbl = hashinit(desiredvnodes, M_VFS_HASH, &vfs_hash_mask);
53         rw_init(&vfs_hash_lock, "vfs hash");
54         LIST_INIT(&vfs_hash_side);
55 }
56
57 /* Must be SI_ORDER_SECOND so desiredvnodes is available */
58 SYSINIT(vfs_hash, SI_SUB_VFS, SI_ORDER_SECOND, vfs_hashinit, NULL);
59
60 u_int
61 vfs_hash_index(struct vnode *vp)
62 {
63
64         return (vp->v_hash + vp->v_mount->mnt_hashseed);
65 }
66
67 static struct vfs_hash_head *
68 vfs_hash_bucket(const struct mount *mp, u_int hash)
69 {
70
71         return (&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]);
72 }
73
74 int
75 vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td,
76     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
77 {
78         struct vnode *vp;
79         enum vgetstate vs;
80         int error;
81
82         while (1) {
83                 rw_rlock(&vfs_hash_lock);
84                 LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
85                         if (vp->v_hash != hash)
86                                 continue;
87                         if (vp->v_mount != mp)
88                                 continue;
89                         if (fn != NULL && fn(vp, arg))
90                                 continue;
91                         vs = vget_prep(vp);
92                         rw_runlock(&vfs_hash_lock);
93                         error = vget_finish(vp, flags, vs);
94                         if (error == ENOENT && (flags & LK_NOWAIT) == 0)
95                                 break;
96                         if (error)
97                                 return (error);
98                         *vpp = vp;
99                         return (0);
100                 }
101                 if (vp == NULL) {
102                         rw_runlock(&vfs_hash_lock);
103                         *vpp = NULL;
104                         return (0);
105                 }
106         }
107 }
108
109 void
110 vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
111     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
112 {
113         struct vnode *vp;
114
115         while (1) {
116                 rw_rlock(&vfs_hash_lock);
117                 LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
118                         if (vp->v_hash != hash)
119                                 continue;
120                         if (vp->v_mount != mp)
121                                 continue;
122                         if (fn != NULL && fn(vp, arg))
123                                 continue;
124                         vhold(vp);
125                         rw_runlock(&vfs_hash_lock);
126                         vref(vp);
127                         vdrop(vp);
128                         *vpp = vp;
129                         return;
130                 }
131                 if (vp == NULL) {
132                         rw_runlock(&vfs_hash_lock);
133                         *vpp = NULL;
134                         return;
135                 }
136         }
137 }
138
139 void
140 vfs_hash_remove(struct vnode *vp)
141 {
142
143         rw_wlock(&vfs_hash_lock);
144         LIST_REMOVE(vp, v_hashlist);
145         rw_wunlock(&vfs_hash_lock);
146 }
147
148 int
149 vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
150     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
151 {
152         struct vnode *vp2;
153         enum vgetstate vs;
154         int error;
155
156         *vpp = NULL;
157         while (1) {
158                 rw_wlock(&vfs_hash_lock);
159                 LIST_FOREACH(vp2,
160                     vfs_hash_bucket(vp->v_mount, hash), v_hashlist) {
161                         if (vp2->v_hash != hash)
162                                 continue;
163                         if (vp2->v_mount != vp->v_mount)
164                                 continue;
165                         if (fn != NULL && fn(vp2, arg))
166                                 continue;
167                         vs = vget_prep(vp2);
168                         rw_wunlock(&vfs_hash_lock);
169                         error = vget_finish(vp2, flags, vs);
170                         if (error == ENOENT && (flags & LK_NOWAIT) == 0)
171                                 break;
172                         rw_wlock(&vfs_hash_lock);
173                         LIST_INSERT_HEAD(&vfs_hash_side, vp, v_hashlist);
174                         rw_wunlock(&vfs_hash_lock);
175                         vgone(vp);
176                         vput(vp);
177                         if (!error)
178                                 *vpp = vp2;
179                         return (error);
180                 }
181                 if (vp2 == NULL)
182                         break;
183         }
184         vp->v_hash = hash;
185         LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
186         rw_wunlock(&vfs_hash_lock);
187         return (0);
188 }
189
190 void
191 vfs_hash_rehash(struct vnode *vp, u_int hash)
192 {
193
194         rw_wlock(&vfs_hash_lock);
195         LIST_REMOVE(vp, v_hashlist);
196         LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
197         vp->v_hash = hash;
198         rw_wunlock(&vfs_hash_lock);
199 }
200
201 void
202 vfs_hash_changesize(u_long newmaxvnodes)
203 {
204         struct vfs_hash_head *vfs_hash_newtbl, *vfs_hash_oldtbl;
205         u_long vfs_hash_newmask, vfs_hash_oldmask;
206         struct vnode *vp;
207         int i;
208
209         vfs_hash_newtbl = hashinit(newmaxvnodes, M_VFS_HASH,
210                 &vfs_hash_newmask);
211         /* If same hash table size, nothing to do */
212         if (vfs_hash_mask == vfs_hash_newmask) {
213                 free(vfs_hash_newtbl, M_VFS_HASH);
214                 return;
215         }
216         /*
217          * Move everything from the old hash table to the new table.
218          * None of the vnodes in the table can be recycled because to
219          * do so, they have to be removed from the hash table.
220          */
221         rw_wlock(&vfs_hash_lock);
222         vfs_hash_oldtbl = vfs_hash_tbl;
223         vfs_hash_oldmask = vfs_hash_mask;
224         vfs_hash_tbl = vfs_hash_newtbl;
225         vfs_hash_mask = vfs_hash_newmask;
226         for (i = 0; i <= vfs_hash_oldmask; i++) {
227                 while ((vp = LIST_FIRST(&vfs_hash_oldtbl[i])) != NULL) {
228                         LIST_REMOVE(vp, v_hashlist);
229                         LIST_INSERT_HEAD(
230                             vfs_hash_bucket(vp->v_mount, vp->v_hash),
231                             vp, v_hashlist);
232                 }
233         }
234         rw_wunlock(&vfs_hash_lock);
235         free(vfs_hash_oldtbl, M_VFS_HASH);
236 }