]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/gnu/fs/xfs/xfs_refcache.c
This commit was generated by cvs2svn to compensate for changes in r156701,
[FreeBSD/FreeBSD.git] / sys / gnu / fs / xfs / xfs_refcache.c
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32
33 #include "xfs.h"
34 #include "xfs_macros.h"
35 #include "xfs_types.h"
36 #include "xfs_inum.h"
37 #include "xfs_log.h"
38 #include "xfs_trans.h"
39 #include "xfs_sb.h"
40 #include "xfs_ag.h"
41 #include "xfs_dir.h"
42 #include "xfs_dir2.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_alloc_btree.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_ialloc_btree.h"
48 #include "xfs_itable.h"
49 #include "xfs_btree.h"
50 #include "xfs_alloc.h"
51 #include "xfs_ialloc.h"
52 #include "xfs_attr.h"
53 #include "xfs_attr_sf.h"
54 #include "xfs_dir_sf.h"
55 #include "xfs_dir2_sf.h"
56 #include "xfs_dinode.h"
57 #include "xfs_inode_item.h"
58 #include "xfs_inode.h"
59 #include "xfs_bmap.h"
60 #include "xfs_error.h"
61 #include "xfs_buf_item.h"
62 #include "xfs_refcache.h"
63
64 STATIC lock_t           xfs_refcache_lock;
65 STATIC xfs_inode_t      **xfs_refcache;
66 STATIC int              xfs_refcache_index;
67 STATIC int              xfs_refcache_busy;
68 STATIC int              xfs_refcache_count;
69
70 void
71 xfs_refcache_init(void)
72 {
73         spinlock_init(&xfs_refcache_lock, "xfs_refcache");
74 }
75 /*
76  * Insert the given inode into the reference cache.
77  */
78 void
79 xfs_refcache_insert(
80         xfs_inode_t     *ip)
81 {
82         vnode_t         *vp;
83         xfs_inode_t     *release_ip;
84         xfs_inode_t     **refcache;
85
86         ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE));
87
88         /*
89          * If an unmount is busy blowing entries out of the cache,
90          * then don't bother.
91          */
92         if (xfs_refcache_busy) {
93                 return;
94         }
95
96         /*
97          * If we tuned the refcache down to zero, don't do anything.
98          */
99          if (!xfs_refcache_size) {
100                 return;
101         }
102
103         /*
104          * The inode is already in the refcache, so don't bother
105          * with it.
106          */
107         if (ip->i_refcache != NULL) {
108                 return;
109         }
110
111         vp = XFS_ITOV(ip);
112         /* ASSERT(vp->v_count > 0); */
113         VN_HOLD(vp);
114
115         /*
116          * We allocate the reference cache on use so that we don't
117          * waste the memory on systems not being used as NFS servers.
118          */
119         if (xfs_refcache == NULL) {
120                 refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
121                                                        sizeof(xfs_inode_t *),
122                                                        KM_SLEEP);
123         } else {
124                 refcache = NULL;
125         }
126
127         spin_lock(&xfs_refcache_lock);
128
129         /*
130          * If we allocated memory for the refcache above and it still
131          * needs it, then use the memory we allocated.  Otherwise we'll
132          * free the memory below.
133          */
134         if (refcache != NULL) {
135                 if (xfs_refcache == NULL) {
136                         xfs_refcache = refcache;
137                         refcache = NULL;
138                 }
139         }
140
141         /*
142          * If an unmount is busy clearing out the cache, don't add new
143          * entries to it.
144          */
145         if (xfs_refcache_busy) {
146                 spin_unlock(&xfs_refcache_lock);
147                 VN_RELE(vp);
148                 /*
149                  * If we allocated memory for the refcache above but someone
150                  * else beat us to using it, then free the memory now.
151                  */
152                 if (refcache != NULL) {
153                         kmem_free(refcache,
154                                   XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
155                 }
156                 return;
157         }
158         release_ip = xfs_refcache[xfs_refcache_index];
159         if (release_ip != NULL) {
160                 release_ip->i_refcache = NULL;
161                 xfs_refcache_count--;
162                 ASSERT(xfs_refcache_count >= 0);
163         }
164         xfs_refcache[xfs_refcache_index] = ip;
165         ASSERT(ip->i_refcache == NULL);
166         ip->i_refcache = &(xfs_refcache[xfs_refcache_index]);
167         xfs_refcache_count++;
168         ASSERT(xfs_refcache_count <= xfs_refcache_size);
169         xfs_refcache_index++;
170         if (xfs_refcache_index == xfs_refcache_size) {
171                 xfs_refcache_index = 0;
172         }
173         spin_unlock(&xfs_refcache_lock);
174
175         /*
176          * Save the pointer to the inode to be released so that we can
177          * VN_RELE it once we've dropped our inode locks in xfs_rwunlock().
178          * The pointer may be NULL, but that's OK.
179          */
180         ip->i_release = release_ip;
181
182         /*
183          * If we allocated memory for the refcache above but someone
184          * else beat us to using it, then free the memory now.
185          */
186         if (refcache != NULL) {
187                 kmem_free(refcache,
188                           XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
189         }
190 }
191
192
193 /*
194  * If the given inode is in the reference cache, purge its entry and
195  * release the reference on the vnode.
196  */
197 void
198 xfs_refcache_purge_ip(
199         xfs_inode_t     *ip)
200 {
201         vnode_t *vp;
202         int     error;
203
204         /*
205          * If we're not pointing to our entry in the cache, then
206          * we must not be in the cache.
207          */
208         if (ip->i_refcache == NULL) {
209                 return;
210         }
211
212         spin_lock(&xfs_refcache_lock);
213         if (ip->i_refcache == NULL) {
214                 spin_unlock(&xfs_refcache_lock);
215                 return;
216         }
217
218         /*
219          * Clear both our pointer to the cache entry and its pointer
220          * back to us.
221          */
222         ASSERT(*(ip->i_refcache) == ip);
223         *(ip->i_refcache) = NULL;
224         ip->i_refcache = NULL;
225         xfs_refcache_count--;
226         ASSERT(xfs_refcache_count >= 0);
227         spin_unlock(&xfs_refcache_lock);
228
229         vp = XFS_ITOV(ip);
230         /* ASSERT(vp->v_count > 1); */
231         VOP_RELEASE(vp, error);
232         VN_RELE(vp);
233 }
234
235
236 /*
237  * This is called from the XFS unmount code to purge all entries for the
238  * given mount from the cache.  It uses the refcache busy counter to
239  * make sure that new entries are not added to the cache as we purge them.
240  */
241 void
242 xfs_refcache_purge_mp(
243         xfs_mount_t     *mp)
244 {
245         vnode_t         *vp;
246         int             error, i;
247         xfs_inode_t     *ip;
248
249         if (xfs_refcache == NULL) {
250                 return;
251         }
252
253         spin_lock(&xfs_refcache_lock);
254         /*
255          * Bumping the busy counter keeps new entries from being added
256          * to the cache.  We use a counter since multiple unmounts could
257          * be in here simultaneously.
258          */
259         xfs_refcache_busy++;
260
261         for (i = 0; i < xfs_refcache_size; i++) {
262                 ip = xfs_refcache[i];
263                 if ((ip != NULL) && (ip->i_mount == mp)) {
264                         xfs_refcache[i] = NULL;
265                         ip->i_refcache = NULL;
266                         xfs_refcache_count--;
267                         ASSERT(xfs_refcache_count >= 0);
268                         spin_unlock(&xfs_refcache_lock);
269                         vp = XFS_ITOV(ip);
270                         VOP_RELEASE(vp, error);
271                         VN_RELE(vp);
272                         spin_lock(&xfs_refcache_lock);
273                 }
274         }
275
276         xfs_refcache_busy--;
277         ASSERT(xfs_refcache_busy >= 0);
278         spin_unlock(&xfs_refcache_lock);
279 }
280
281
282 /*
283  * This is called from the XFS sync code to ensure that the refcache
284  * is emptied out over time.  We purge a small number of entries with
285  * each call.
286  */
287 void
288 xfs_refcache_purge_some(xfs_mount_t *mp)
289 {
290         int             error, i;
291         xfs_inode_t     *ip;
292         int             iplist_index;
293         xfs_inode_t     **iplist;
294
295         if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) {
296                 return;
297         }
298
299         iplist_index = 0;
300         iplist = (xfs_inode_t **)kmem_zalloc(xfs_refcache_purge_count *
301                                           sizeof(xfs_inode_t *), KM_SLEEP);
302
303         spin_lock(&xfs_refcache_lock);
304
305         /*
306          * Store any inodes we find in the next several entries
307          * into the iplist array to be released after dropping
308          * the spinlock.  We always start looking from the currently
309          * oldest place in the cache.  We move the refcache index
310          * forward as we go so that we are sure to eventually clear
311          * out the entire cache when the system goes idle.
312          */
313         for (i = 0; i < xfs_refcache_purge_count; i++) {
314                 ip = xfs_refcache[xfs_refcache_index];
315                 if (ip != NULL) {
316                         xfs_refcache[xfs_refcache_index] = NULL;
317                         ip->i_refcache = NULL;
318                         xfs_refcache_count--;
319                         ASSERT(xfs_refcache_count >= 0);
320                         iplist[iplist_index] = ip;
321                         iplist_index++;
322                 }
323                 xfs_refcache_index++;
324                 if (xfs_refcache_index == xfs_refcache_size) {
325                         xfs_refcache_index = 0;
326                 }
327         }
328
329         spin_unlock(&xfs_refcache_lock);
330
331         /*
332          * Now drop the inodes we collected.
333          */
334         for (i = 0; i < iplist_index; i++) {
335                 VOP_RELEASE(XFS_ITOV(iplist[i]), error);
336                 VN_RELE(XFS_ITOV(iplist[i]));
337         }
338
339         kmem_free(iplist, xfs_refcache_purge_count *
340                           sizeof(xfs_inode_t *));
341 }
342
343 /*
344  * This is called when the refcache is dynamically resized
345  * via a sysctl.
346  *
347  * If the new size is smaller than the old size, purge all
348  * entries in slots greater than the new size, and move
349  * the index if necessary.
350  *
351  * If the refcache hasn't even been allocated yet, or the
352  * new size is larger than the old size, just set the value
353  * of xfs_refcache_size.
354  */
355
356 void
357 xfs_refcache_resize(int xfs_refcache_new_size)
358 {
359         int             i;
360         xfs_inode_t     *ip;
361         int             iplist_index = 0;
362         xfs_inode_t     **iplist;
363         int             error;
364
365         /*
366          * If the new size is smaller than the current size,
367          * purge entries to create smaller cache, and
368          * reposition index if necessary.
369          * Don't bother if no refcache yet.
370          */
371         if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) {
372
373                 iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
374                                 sizeof(xfs_inode_t *), KM_SLEEP);
375
376                 spin_lock(&xfs_refcache_lock);
377
378                 for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) {
379                         ip = xfs_refcache[i];
380                         if (ip != NULL) {
381                                 xfs_refcache[i] = NULL;
382                                 ip->i_refcache = NULL;
383                                 xfs_refcache_count--;
384                                 ASSERT(xfs_refcache_count >= 0);
385                                 iplist[iplist_index] = ip;
386                                 iplist_index++;
387                         }
388                 }
389
390                 xfs_refcache_size = xfs_refcache_new_size;
391
392                 /*
393                  * Move index to beginning of cache if it's now past the end
394                  */
395                 if (xfs_refcache_index >= xfs_refcache_new_size)
396                         xfs_refcache_index = 0;
397
398                 spin_unlock(&xfs_refcache_lock);
399
400                 /*
401                  * Now drop the inodes we collected.
402                  */
403                 for (i = 0; i < iplist_index; i++) {
404                         VOP_RELEASE(XFS_ITOV(iplist[i]), error);
405                         VN_RELE(XFS_ITOV(iplist[i]));
406                 }
407
408                 kmem_free(iplist, XFS_REFCACHE_SIZE_MAX *
409                                   sizeof(xfs_inode_t *));
410         } else {
411                 spin_lock(&xfs_refcache_lock);
412                 xfs_refcache_size = xfs_refcache_new_size;
413                 spin_unlock(&xfs_refcache_lock);
414         }
415 }
416
417 void
418 xfs_refcache_iunlock(
419         xfs_inode_t     *ip,
420         uint            lock_flags)
421 {
422         xfs_inode_t     *release_ip;
423         int             error;
424
425         release_ip = ip->i_release;
426         ip->i_release = NULL;
427
428         xfs_iunlock(ip, lock_flags);
429
430         if (release_ip != NULL) {
431                 VOP_RELEASE(XFS_ITOV(release_ip), error);
432                 VN_RELE(XFS_ITOV(release_ip));
433         }
434 }
435
436 void
437 xfs_refcache_destroy(void)
438 {
439         if (xfs_refcache) {
440                 kmem_free(xfs_refcache,
441                         XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
442                 xfs_refcache = NULL;
443         }
444         spinlock_destroy(&xfs_refcache_lock);
445 }