2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_itable.h"
41 #include "xfs_btree.h"
42 #include "xfs_alloc.h"
43 #include "xfs_ialloc.h"
46 #include "xfs_error.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_refcache.h"
50 STATIC lock_t xfs_refcache_lock;
51 STATIC xfs_inode_t **xfs_refcache;
52 STATIC int xfs_refcache_index;
53 STATIC int xfs_refcache_busy;
54 STATIC int xfs_refcache_count;
57 xfs_refcache_init(void)
59 spinlock_init(&xfs_refcache_lock, "xfs_refcache");
62 * Insert the given inode into the reference cache.
69 xfs_inode_t *release_ip;
70 xfs_inode_t **refcache;
72 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE));
75 * If an unmount is busy blowing entries out of the cache,
78 if (xfs_refcache_busy) {
83 * If we tuned the refcache down to zero, don't do anything.
85 if (!xfs_refcache_size) {
90 * The inode is already in the refcache, so don't bother
93 if (ip->i_refcache != NULL) {
98 /* ASSERT(vp->v_count > 0); */
102 * We allocate the reference cache on use so that we don't
103 * waste the memory on systems not being used as NFS servers.
105 if (xfs_refcache == NULL) {
106 refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
107 sizeof(xfs_inode_t *),
113 spin_lock(&xfs_refcache_lock);
116 * If we allocated memory for the refcache above and it still
117 * needs it, then use the memory we allocated. Otherwise we'll
118 * free the memory below.
120 if (refcache != NULL) {
121 if (xfs_refcache == NULL) {
122 xfs_refcache = refcache;
128 * If an unmount is busy clearing out the cache, don't add new
131 if (xfs_refcache_busy) {
132 spin_unlock(&xfs_refcache_lock);
135 * If we allocated memory for the refcache above but someone
136 * else beat us to using it, then free the memory now.
138 if (refcache != NULL) {
140 XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
144 release_ip = xfs_refcache[xfs_refcache_index];
145 if (release_ip != NULL) {
146 release_ip->i_refcache = NULL;
147 xfs_refcache_count--;
148 ASSERT(xfs_refcache_count >= 0);
150 xfs_refcache[xfs_refcache_index] = ip;
151 ASSERT(ip->i_refcache == NULL);
152 ip->i_refcache = &(xfs_refcache[xfs_refcache_index]);
153 xfs_refcache_count++;
154 ASSERT(xfs_refcache_count <= xfs_refcache_size);
155 xfs_refcache_index++;
156 if (xfs_refcache_index == xfs_refcache_size) {
157 xfs_refcache_index = 0;
159 spin_unlock(&xfs_refcache_lock);
162 * Save the pointer to the inode to be released so that we can
163 * VN_RELE it once we've dropped our inode locks in xfs_rwunlock().
164 * The pointer may be NULL, but that's OK.
166 ip->i_release = release_ip;
169 * If we allocated memory for the refcache above but someone
170 * else beat us to using it, then free the memory now.
172 if (refcache != NULL) {
174 XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
180 * If the given inode is in the reference cache, purge its entry and
181 * release the reference on the vnode.
184 xfs_refcache_purge_ip(
191 * If we're not pointing to our entry in the cache, then
192 * we must not be in the cache.
194 if (ip->i_refcache == NULL) {
198 spin_lock(&xfs_refcache_lock);
199 if (ip->i_refcache == NULL) {
200 spin_unlock(&xfs_refcache_lock);
205 * Clear both our pointer to the cache entry and its pointer
208 ASSERT(*(ip->i_refcache) == ip);
209 *(ip->i_refcache) = NULL;
210 ip->i_refcache = NULL;
211 xfs_refcache_count--;
212 ASSERT(xfs_refcache_count >= 0);
213 spin_unlock(&xfs_refcache_lock);
216 /* ASSERT(vp->v_count > 1); */
217 VOP_RELEASE(vp, error);
223 * This is called from the XFS unmount code to purge all entries for the
224 * given mount from the cache. It uses the refcache busy counter to
225 * make sure that new entries are not added to the cache as we purge them.
228 xfs_refcache_purge_mp(
235 if (xfs_refcache == NULL) {
239 spin_lock(&xfs_refcache_lock);
241 * Bumping the busy counter keeps new entries from being added
242 * to the cache. We use a counter since multiple unmounts could
243 * be in here simultaneously.
247 for (i = 0; i < xfs_refcache_size; i++) {
248 ip = xfs_refcache[i];
249 if ((ip != NULL) && (ip->i_mount == mp)) {
250 xfs_refcache[i] = NULL;
251 ip->i_refcache = NULL;
252 xfs_refcache_count--;
253 ASSERT(xfs_refcache_count >= 0);
254 spin_unlock(&xfs_refcache_lock);
256 VOP_RELEASE(vp, error);
258 spin_lock(&xfs_refcache_lock);
263 ASSERT(xfs_refcache_busy >= 0);
264 spin_unlock(&xfs_refcache_lock);
269 * This is called from the XFS sync code to ensure that the refcache
270 * is emptied out over time. We purge a small number of entries with
274 xfs_refcache_purge_some(xfs_mount_t *mp)
279 xfs_inode_t **iplist;
281 if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) {
286 iplist = (xfs_inode_t **)kmem_zalloc(xfs_refcache_purge_count *
287 sizeof(xfs_inode_t *), KM_SLEEP);
289 spin_lock(&xfs_refcache_lock);
292 * Store any inodes we find in the next several entries
293 * into the iplist array to be released after dropping
294 * the spinlock. We always start looking from the currently
295 * oldest place in the cache. We move the refcache index
296 * forward as we go so that we are sure to eventually clear
297 * out the entire cache when the system goes idle.
299 for (i = 0; i < xfs_refcache_purge_count; i++) {
300 ip = xfs_refcache[xfs_refcache_index];
302 xfs_refcache[xfs_refcache_index] = NULL;
303 ip->i_refcache = NULL;
304 xfs_refcache_count--;
305 ASSERT(xfs_refcache_count >= 0);
306 iplist[iplist_index] = ip;
309 xfs_refcache_index++;
310 if (xfs_refcache_index == xfs_refcache_size) {
311 xfs_refcache_index = 0;
315 spin_unlock(&xfs_refcache_lock);
318 * Now drop the inodes we collected.
320 for (i = 0; i < iplist_index; i++) {
321 VOP_RELEASE(XFS_ITOV(iplist[i]), error);
322 VN_RELE(XFS_ITOV(iplist[i]));
325 kmem_free(iplist, xfs_refcache_purge_count *
326 sizeof(xfs_inode_t *));
330 * This is called when the refcache is dynamically resized
333 * If the new size is smaller than the old size, purge all
334 * entries in slots greater than the new size, and move
335 * the index if necessary.
337 * If the refcache hasn't even been allocated yet, or the
338 * new size is larger than the old size, just set the value
339 * of xfs_refcache_size.
343 xfs_refcache_resize(int xfs_refcache_new_size)
347 int iplist_index = 0;
348 xfs_inode_t **iplist;
352 * If the new size is smaller than the current size,
353 * purge entries to create smaller cache, and
354 * reposition index if necessary.
355 * Don't bother if no refcache yet.
357 if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) {
359 iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
360 sizeof(xfs_inode_t *), KM_SLEEP);
362 spin_lock(&xfs_refcache_lock);
364 for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) {
365 ip = xfs_refcache[i];
367 xfs_refcache[i] = NULL;
368 ip->i_refcache = NULL;
369 xfs_refcache_count--;
370 ASSERT(xfs_refcache_count >= 0);
371 iplist[iplist_index] = ip;
376 xfs_refcache_size = xfs_refcache_new_size;
379 * Move index to beginning of cache if it's now past the end
381 if (xfs_refcache_index >= xfs_refcache_new_size)
382 xfs_refcache_index = 0;
384 spin_unlock(&xfs_refcache_lock);
387 * Now drop the inodes we collected.
389 for (i = 0; i < iplist_index; i++) {
390 VOP_RELEASE(XFS_ITOV(iplist[i]), error);
391 VN_RELE(XFS_ITOV(iplist[i]));
394 kmem_free(iplist, XFS_REFCACHE_SIZE_MAX *
395 sizeof(xfs_inode_t *));
397 spin_lock(&xfs_refcache_lock);
398 xfs_refcache_size = xfs_refcache_new_size;
399 spin_unlock(&xfs_refcache_lock);
404 xfs_refcache_iunlock(
408 xfs_inode_t *release_ip;
411 release_ip = ip->i_release;
412 ip->i_release = NULL;
414 xfs_iunlock(ip, lock_flags);
416 if (release_ip != NULL) {
417 VOP_RELEASE(XFS_ITOV(release_ip), error);
418 VN_RELE(XFS_ITOV(release_ip));
423 xfs_refcache_destroy(void)
426 kmem_free(xfs_refcache,
427 XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
430 spinlock_destroy(&xfs_refcache_lock);