]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/gnu/fs/xfs/xfs_refcache.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / gnu / fs / xfs / xfs_refcache.c
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir.h"
28 #include "xfs_dir2.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_itable.h"
41 #include "xfs_btree.h"
42 #include "xfs_alloc.h"
43 #include "xfs_ialloc.h"
44 #include "xfs_bmap.h"
45 #include "xfs_attr.h"
46 #include "xfs_error.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_refcache.h"
49
50 STATIC lock_t           xfs_refcache_lock;
51 STATIC xfs_inode_t      **xfs_refcache;
52 STATIC int              xfs_refcache_index;
53 STATIC int              xfs_refcache_busy;
54 STATIC int              xfs_refcache_count;
55
56 void
57 xfs_refcache_init(void)
58 {
59         spinlock_init(&xfs_refcache_lock, "xfs_refcache");
60 }
61 /*
62  * Insert the given inode into the reference cache.
63  */
64 void
65 xfs_refcache_insert(
66         xfs_inode_t     *ip)
67 {
68         vnode_t         *vp;
69         xfs_inode_t     *release_ip;
70         xfs_inode_t     **refcache;
71
72         ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE));
73
74         /*
75          * If an unmount is busy blowing entries out of the cache,
76          * then don't bother.
77          */
78         if (xfs_refcache_busy) {
79                 return;
80         }
81
82         /*
83          * If we tuned the refcache down to zero, don't do anything.
84          */
85          if (!xfs_refcache_size) {
86                 return;
87         }
88
89         /*
90          * The inode is already in the refcache, so don't bother
91          * with it.
92          */
93         if (ip->i_refcache != NULL) {
94                 return;
95         }
96
97         vp = XFS_ITOV(ip);
98         /* ASSERT(vp->v_count > 0); */
99         VN_HOLD(vp);
100
101         /*
102          * We allocate the reference cache on use so that we don't
103          * waste the memory on systems not being used as NFS servers.
104          */
105         if (xfs_refcache == NULL) {
106                 refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
107                                                        sizeof(xfs_inode_t *),
108                                                        KM_SLEEP);
109         } else {
110                 refcache = NULL;
111         }
112
113         spin_lock(&xfs_refcache_lock);
114
115         /*
116          * If we allocated memory for the refcache above and it still
117          * needs it, then use the memory we allocated.  Otherwise we'll
118          * free the memory below.
119          */
120         if (refcache != NULL) {
121                 if (xfs_refcache == NULL) {
122                         xfs_refcache = refcache;
123                         refcache = NULL;
124                 }
125         }
126
127         /*
128          * If an unmount is busy clearing out the cache, don't add new
129          * entries to it.
130          */
131         if (xfs_refcache_busy) {
132                 spin_unlock(&xfs_refcache_lock);
133                 VN_RELE(vp);
134                 /*
135                  * If we allocated memory for the refcache above but someone
136                  * else beat us to using it, then free the memory now.
137                  */
138                 if (refcache != NULL) {
139                         kmem_free(refcache,
140                                   XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
141                 }
142                 return;
143         }
144         release_ip = xfs_refcache[xfs_refcache_index];
145         if (release_ip != NULL) {
146                 release_ip->i_refcache = NULL;
147                 xfs_refcache_count--;
148                 ASSERT(xfs_refcache_count >= 0);
149         }
150         xfs_refcache[xfs_refcache_index] = ip;
151         ASSERT(ip->i_refcache == NULL);
152         ip->i_refcache = &(xfs_refcache[xfs_refcache_index]);
153         xfs_refcache_count++;
154         ASSERT(xfs_refcache_count <= xfs_refcache_size);
155         xfs_refcache_index++;
156         if (xfs_refcache_index == xfs_refcache_size) {
157                 xfs_refcache_index = 0;
158         }
159         spin_unlock(&xfs_refcache_lock);
160
161         /*
162          * Save the pointer to the inode to be released so that we can
163          * VN_RELE it once we've dropped our inode locks in xfs_rwunlock().
164          * The pointer may be NULL, but that's OK.
165          */
166         ip->i_release = release_ip;
167
168         /*
169          * If we allocated memory for the refcache above but someone
170          * else beat us to using it, then free the memory now.
171          */
172         if (refcache != NULL) {
173                 kmem_free(refcache,
174                           XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
175         }
176 }
177
178
179 /*
180  * If the given inode is in the reference cache, purge its entry and
181  * release the reference on the vnode.
182  */
183 void
184 xfs_refcache_purge_ip(
185         xfs_inode_t     *ip)
186 {
187         vnode_t *vp;
188         int     error;
189
190         /*
191          * If we're not pointing to our entry in the cache, then
192          * we must not be in the cache.
193          */
194         if (ip->i_refcache == NULL) {
195                 return;
196         }
197
198         spin_lock(&xfs_refcache_lock);
199         if (ip->i_refcache == NULL) {
200                 spin_unlock(&xfs_refcache_lock);
201                 return;
202         }
203
204         /*
205          * Clear both our pointer to the cache entry and its pointer
206          * back to us.
207          */
208         ASSERT(*(ip->i_refcache) == ip);
209         *(ip->i_refcache) = NULL;
210         ip->i_refcache = NULL;
211         xfs_refcache_count--;
212         ASSERT(xfs_refcache_count >= 0);
213         spin_unlock(&xfs_refcache_lock);
214
215         vp = XFS_ITOV(ip);
216         /* ASSERT(vp->v_count > 1); */
217         VOP_RELEASE(vp, error);
218         VN_RELE(vp);
219 }
220
221
222 /*
223  * This is called from the XFS unmount code to purge all entries for the
224  * given mount from the cache.  It uses the refcache busy counter to
225  * make sure that new entries are not added to the cache as we purge them.
226  */
227 void
228 xfs_refcache_purge_mp(
229         xfs_mount_t     *mp)
230 {
231         vnode_t         *vp;
232         int             error, i;
233         xfs_inode_t     *ip;
234
235         if (xfs_refcache == NULL) {
236                 return;
237         }
238
239         spin_lock(&xfs_refcache_lock);
240         /*
241          * Bumping the busy counter keeps new entries from being added
242          * to the cache.  We use a counter since multiple unmounts could
243          * be in here simultaneously.
244          */
245         xfs_refcache_busy++;
246
247         for (i = 0; i < xfs_refcache_size; i++) {
248                 ip = xfs_refcache[i];
249                 if ((ip != NULL) && (ip->i_mount == mp)) {
250                         xfs_refcache[i] = NULL;
251                         ip->i_refcache = NULL;
252                         xfs_refcache_count--;
253                         ASSERT(xfs_refcache_count >= 0);
254                         spin_unlock(&xfs_refcache_lock);
255                         vp = XFS_ITOV(ip);
256                         VOP_RELEASE(vp, error);
257                         VN_RELE(vp);
258                         spin_lock(&xfs_refcache_lock);
259                 }
260         }
261
262         xfs_refcache_busy--;
263         ASSERT(xfs_refcache_busy >= 0);
264         spin_unlock(&xfs_refcache_lock);
265 }
266
267
268 /*
269  * This is called from the XFS sync code to ensure that the refcache
270  * is emptied out over time.  We purge a small number of entries with
271  * each call.
272  */
273 void
274 xfs_refcache_purge_some(xfs_mount_t *mp)
275 {
276         int             error, i;
277         xfs_inode_t     *ip;
278         int             iplist_index;
279         xfs_inode_t     **iplist;
280
281         if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) {
282                 return;
283         }
284
285         iplist_index = 0;
286         iplist = (xfs_inode_t **)kmem_zalloc(xfs_refcache_purge_count *
287                                           sizeof(xfs_inode_t *), KM_SLEEP);
288
289         spin_lock(&xfs_refcache_lock);
290
291         /*
292          * Store any inodes we find in the next several entries
293          * into the iplist array to be released after dropping
294          * the spinlock.  We always start looking from the currently
295          * oldest place in the cache.  We move the refcache index
296          * forward as we go so that we are sure to eventually clear
297          * out the entire cache when the system goes idle.
298          */
299         for (i = 0; i < xfs_refcache_purge_count; i++) {
300                 ip = xfs_refcache[xfs_refcache_index];
301                 if (ip != NULL) {
302                         xfs_refcache[xfs_refcache_index] = NULL;
303                         ip->i_refcache = NULL;
304                         xfs_refcache_count--;
305                         ASSERT(xfs_refcache_count >= 0);
306                         iplist[iplist_index] = ip;
307                         iplist_index++;
308                 }
309                 xfs_refcache_index++;
310                 if (xfs_refcache_index == xfs_refcache_size) {
311                         xfs_refcache_index = 0;
312                 }
313         }
314
315         spin_unlock(&xfs_refcache_lock);
316
317         /*
318          * Now drop the inodes we collected.
319          */
320         for (i = 0; i < iplist_index; i++) {
321                 VOP_RELEASE(XFS_ITOV(iplist[i]), error);
322                 VN_RELE(XFS_ITOV(iplist[i]));
323         }
324
325         kmem_free(iplist, xfs_refcache_purge_count *
326                           sizeof(xfs_inode_t *));
327 }
328
329 /*
330  * This is called when the refcache is dynamically resized
331  * via a sysctl.
332  *
333  * If the new size is smaller than the old size, purge all
334  * entries in slots greater than the new size, and move
335  * the index if necessary.
336  *
337  * If the refcache hasn't even been allocated yet, or the
338  * new size is larger than the old size, just set the value
339  * of xfs_refcache_size.
340  */
341
342 void
343 xfs_refcache_resize(int xfs_refcache_new_size)
344 {
345         int             i;
346         xfs_inode_t     *ip;
347         int             iplist_index = 0;
348         xfs_inode_t     **iplist;
349         int             error;
350
351         /*
352          * If the new size is smaller than the current size,
353          * purge entries to create smaller cache, and
354          * reposition index if necessary.
355          * Don't bother if no refcache yet.
356          */
357         if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) {
358
359                 iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
360                                 sizeof(xfs_inode_t *), KM_SLEEP);
361
362                 spin_lock(&xfs_refcache_lock);
363
364                 for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) {
365                         ip = xfs_refcache[i];
366                         if (ip != NULL) {
367                                 xfs_refcache[i] = NULL;
368                                 ip->i_refcache = NULL;
369                                 xfs_refcache_count--;
370                                 ASSERT(xfs_refcache_count >= 0);
371                                 iplist[iplist_index] = ip;
372                                 iplist_index++;
373                         }
374                 }
375
376                 xfs_refcache_size = xfs_refcache_new_size;
377
378                 /*
379                  * Move index to beginning of cache if it's now past the end
380                  */
381                 if (xfs_refcache_index >= xfs_refcache_new_size)
382                         xfs_refcache_index = 0;
383
384                 spin_unlock(&xfs_refcache_lock);
385
386                 /*
387                  * Now drop the inodes we collected.
388                  */
389                 for (i = 0; i < iplist_index; i++) {
390                         VOP_RELEASE(XFS_ITOV(iplist[i]), error);
391                         VN_RELE(XFS_ITOV(iplist[i]));
392                 }
393
394                 kmem_free(iplist, XFS_REFCACHE_SIZE_MAX *
395                                   sizeof(xfs_inode_t *));
396         } else {
397                 spin_lock(&xfs_refcache_lock);
398                 xfs_refcache_size = xfs_refcache_new_size;
399                 spin_unlock(&xfs_refcache_lock);
400         }
401 }
402
403 void
404 xfs_refcache_iunlock(
405         xfs_inode_t     *ip,
406         uint            lock_flags)
407 {
408         xfs_inode_t     *release_ip;
409         int             error;
410
411         release_ip = ip->i_release;
412         ip->i_release = NULL;
413
414         xfs_iunlock(ip, lock_flags);
415
416         if (release_ip != NULL) {
417                 VOP_RELEASE(XFS_ITOV(release_ip), error);
418                 VN_RELE(XFS_ITOV(release_ip));
419         }
420 }
421
422 void
423 xfs_refcache_destroy(void)
424 {
425         if (xfs_refcache) {
426                 kmem_free(xfs_refcache,
427                         XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
428                 xfs_refcache = NULL;
429         }
430         spinlock_destroy(&xfs_refcache_lock);
431 }