2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
34 #include "xfs_macros.h"
35 #include "xfs_types.h"
38 #include "xfs_trans.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_alloc_btree.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_ialloc_btree.h"
48 #include "xfs_itable.h"
49 #include "xfs_btree.h"
50 #include "xfs_alloc.h"
51 #include "xfs_ialloc.h"
53 #include "xfs_attr_sf.h"
54 #include "xfs_dir_sf.h"
55 #include "xfs_dir2_sf.h"
56 #include "xfs_dinode.h"
57 #include "xfs_inode_item.h"
58 #include "xfs_inode.h"
60 #include "xfs_error.h"
61 #include "xfs_buf_item.h"
62 #include "xfs_refcache.h"
64 STATIC lock_t xfs_refcache_lock;
65 STATIC xfs_inode_t **xfs_refcache;
66 STATIC int xfs_refcache_index;
67 STATIC int xfs_refcache_busy;
68 STATIC int xfs_refcache_count;
71 xfs_refcache_init(void)
73 spinlock_init(&xfs_refcache_lock, "xfs_refcache");
76 * Insert the given inode into the reference cache.
83 xfs_inode_t *release_ip;
84 xfs_inode_t **refcache;
86 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE));
89 * If an unmount is busy blowing entries out of the cache,
92 if (xfs_refcache_busy) {
97 * If we tuned the refcache down to zero, don't do anything.
99 if (!xfs_refcache_size) {
104 * The inode is already in the refcache, so don't bother
107 if (ip->i_refcache != NULL) {
112 /* ASSERT(vp->v_count > 0); */
116 * We allocate the reference cache on use so that we don't
117 * waste the memory on systems not being used as NFS servers.
119 if (xfs_refcache == NULL) {
120 refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
121 sizeof(xfs_inode_t *),
127 spin_lock(&xfs_refcache_lock);
130 * If we allocated memory for the refcache above and it still
131 * needs it, then use the memory we allocated. Otherwise we'll
132 * free the memory below.
134 if (refcache != NULL) {
135 if (xfs_refcache == NULL) {
136 xfs_refcache = refcache;
142 * If an unmount is busy clearing out the cache, don't add new
145 if (xfs_refcache_busy) {
146 spin_unlock(&xfs_refcache_lock);
149 * If we allocated memory for the refcache above but someone
150 * else beat us to using it, then free the memory now.
152 if (refcache != NULL) {
154 XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
158 release_ip = xfs_refcache[xfs_refcache_index];
159 if (release_ip != NULL) {
160 release_ip->i_refcache = NULL;
161 xfs_refcache_count--;
162 ASSERT(xfs_refcache_count >= 0);
164 xfs_refcache[xfs_refcache_index] = ip;
165 ASSERT(ip->i_refcache == NULL);
166 ip->i_refcache = &(xfs_refcache[xfs_refcache_index]);
167 xfs_refcache_count++;
168 ASSERT(xfs_refcache_count <= xfs_refcache_size);
169 xfs_refcache_index++;
170 if (xfs_refcache_index == xfs_refcache_size) {
171 xfs_refcache_index = 0;
173 spin_unlock(&xfs_refcache_lock);
176 * Save the pointer to the inode to be released so that we can
177 * VN_RELE it once we've dropped our inode locks in xfs_rwunlock().
178 * The pointer may be NULL, but that's OK.
180 ip->i_release = release_ip;
183 * If we allocated memory for the refcache above but someone
184 * else beat us to using it, then free the memory now.
186 if (refcache != NULL) {
188 XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
194 * If the given inode is in the reference cache, purge its entry and
195 * release the reference on the vnode.
198 xfs_refcache_purge_ip(
205 * If we're not pointing to our entry in the cache, then
206 * we must not be in the cache.
208 if (ip->i_refcache == NULL) {
212 spin_lock(&xfs_refcache_lock);
213 if (ip->i_refcache == NULL) {
214 spin_unlock(&xfs_refcache_lock);
219 * Clear both our pointer to the cache entry and its pointer
222 ASSERT(*(ip->i_refcache) == ip);
223 *(ip->i_refcache) = NULL;
224 ip->i_refcache = NULL;
225 xfs_refcache_count--;
226 ASSERT(xfs_refcache_count >= 0);
227 spin_unlock(&xfs_refcache_lock);
230 /* ASSERT(vp->v_count > 1); */
231 VOP_RELEASE(vp, error);
237 * This is called from the XFS unmount code to purge all entries for the
238 * given mount from the cache. It uses the refcache busy counter to
239 * make sure that new entries are not added to the cache as we purge them.
242 xfs_refcache_purge_mp(
249 if (xfs_refcache == NULL) {
253 spin_lock(&xfs_refcache_lock);
255 * Bumping the busy counter keeps new entries from being added
256 * to the cache. We use a counter since multiple unmounts could
257 * be in here simultaneously.
261 for (i = 0; i < xfs_refcache_size; i++) {
262 ip = xfs_refcache[i];
263 if ((ip != NULL) && (ip->i_mount == mp)) {
264 xfs_refcache[i] = NULL;
265 ip->i_refcache = NULL;
266 xfs_refcache_count--;
267 ASSERT(xfs_refcache_count >= 0);
268 spin_unlock(&xfs_refcache_lock);
270 VOP_RELEASE(vp, error);
272 spin_lock(&xfs_refcache_lock);
277 ASSERT(xfs_refcache_busy >= 0);
278 spin_unlock(&xfs_refcache_lock);
283 * This is called from the XFS sync code to ensure that the refcache
284 * is emptied out over time. We purge a small number of entries with
288 xfs_refcache_purge_some(xfs_mount_t *mp)
293 xfs_inode_t **iplist;
295 if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) {
300 iplist = (xfs_inode_t **)kmem_zalloc(xfs_refcache_purge_count *
301 sizeof(xfs_inode_t *), KM_SLEEP);
303 spin_lock(&xfs_refcache_lock);
306 * Store any inodes we find in the next several entries
307 * into the iplist array to be released after dropping
308 * the spinlock. We always start looking from the currently
309 * oldest place in the cache. We move the refcache index
310 * forward as we go so that we are sure to eventually clear
311 * out the entire cache when the system goes idle.
313 for (i = 0; i < xfs_refcache_purge_count; i++) {
314 ip = xfs_refcache[xfs_refcache_index];
316 xfs_refcache[xfs_refcache_index] = NULL;
317 ip->i_refcache = NULL;
318 xfs_refcache_count--;
319 ASSERT(xfs_refcache_count >= 0);
320 iplist[iplist_index] = ip;
323 xfs_refcache_index++;
324 if (xfs_refcache_index == xfs_refcache_size) {
325 xfs_refcache_index = 0;
329 spin_unlock(&xfs_refcache_lock);
332 * Now drop the inodes we collected.
334 for (i = 0; i < iplist_index; i++) {
335 VOP_RELEASE(XFS_ITOV(iplist[i]), error);
336 VN_RELE(XFS_ITOV(iplist[i]));
339 kmem_free(iplist, xfs_refcache_purge_count *
340 sizeof(xfs_inode_t *));
344 * This is called when the refcache is dynamically resized
347 * If the new size is smaller than the old size, purge all
348 * entries in slots greater than the new size, and move
349 * the index if necessary.
351 * If the refcache hasn't even been allocated yet, or the
352 * new size is larger than the old size, just set the value
353 * of xfs_refcache_size.
357 xfs_refcache_resize(int xfs_refcache_new_size)
361 int iplist_index = 0;
362 xfs_inode_t **iplist;
366 * If the new size is smaller than the current size,
367 * purge entries to create smaller cache, and
368 * reposition index if necessary.
369 * Don't bother if no refcache yet.
371 if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) {
373 iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
374 sizeof(xfs_inode_t *), KM_SLEEP);
376 spin_lock(&xfs_refcache_lock);
378 for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) {
379 ip = xfs_refcache[i];
381 xfs_refcache[i] = NULL;
382 ip->i_refcache = NULL;
383 xfs_refcache_count--;
384 ASSERT(xfs_refcache_count >= 0);
385 iplist[iplist_index] = ip;
390 xfs_refcache_size = xfs_refcache_new_size;
393 * Move index to beginning of cache if it's now past the end
395 if (xfs_refcache_index >= xfs_refcache_new_size)
396 xfs_refcache_index = 0;
398 spin_unlock(&xfs_refcache_lock);
401 * Now drop the inodes we collected.
403 for (i = 0; i < iplist_index; i++) {
404 VOP_RELEASE(XFS_ITOV(iplist[i]), error);
405 VN_RELE(XFS_ITOV(iplist[i]));
408 kmem_free(iplist, XFS_REFCACHE_SIZE_MAX *
409 sizeof(xfs_inode_t *));
411 spin_lock(&xfs_refcache_lock);
412 xfs_refcache_size = xfs_refcache_new_size;
413 spin_unlock(&xfs_refcache_lock);
418 xfs_refcache_iunlock(
422 xfs_inode_t *release_ip;
425 release_ip = ip->i_release;
426 ip->i_release = NULL;
428 xfs_iunlock(ip, lock_flags);
430 if (release_ip != NULL) {
431 VOP_RELEASE(XFS_ITOV(release_ip), error);
432 VN_RELE(XFS_ITOV(release_ip));
437 xfs_refcache_destroy(void)
440 kmem_free(xfs_refcache,
441 XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
444 spinlock_destroy(&xfs_refcache_lock);