2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_quota.h"
42 #include "xfs_utils.h"
45 * Initialize the inode hash table for the newly mounted file system.
46 * Choose an initial table size based on user specified value, else
47 * use a simple algorithm using the maximum number of inodes as an
48 * indicator for table size, and clamp it between one and some large
52 xfs_ihash_init(xfs_mount_t *mp)
55 uint i, flags = KM_SLEEP | KM_MAYFAIL;
58 icount = mp->m_maxicount ? mp->m_maxicount :
59 (mp->m_sb.sb_dblocks << mp->m_sb.sb_inopblog);
60 mp->m_ihsize = 1 << max_t(uint, 8,
61 (xfs_highbit64(icount) + 1) / 2);
62 mp->m_ihsize = min_t(uint, mp->m_ihsize,
63 (64 * NBPP) / sizeof(xfs_ihash_t));
66 while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize *
67 sizeof(xfs_ihash_t), flags))) {
68 if ((mp->m_ihsize >>= 1) <= NBPP)
71 for (i = 0; i < mp->m_ihsize; i++) {
72 rwlock_init(&(mp->m_ihash[i].ih_lock));
77 * Free up structures allocated by xfs_ihash_init, at unmount time.
80 xfs_ihash_free(xfs_mount_t *mp)
82 kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t));
87 * Initialize the inode cluster hash table for the newly mounted file system.
88 * Its size is derived from the ihash table size.
91 xfs_chash_init(xfs_mount_t *mp)
95 mp->m_chsize = max_t(uint, 1, mp->m_ihsize /
96 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog));
97 mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
98 mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
99 * sizeof(xfs_chash_t),
101 for (i = 0; i < mp->m_chsize; i++) {
102 spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
107 * Free up structures allocated by xfs_chash_init, at unmount time.
110 xfs_chash_free(xfs_mount_t *mp)
114 for (i = 0; i < mp->m_chsize; i++) {
115 spinlock_destroy(&mp->m_chash[i].ch_lock);
118 kmem_free(mp->m_chash, mp->m_chsize*sizeof(xfs_chash_t));
123 * Try to move an inode to the front of its hash list if possible
124 * (and if its not there already). Called right after obtaining
125 * the list version number and then dropping the read_lock on the
126 * hash list in question (which is done right after looking up the
127 * inode in question...).
137 if ((ip->i_prevp != &ih->ih_next) && write_trylock(&ih->ih_lock)) {
138 if (likely(version == ih->ih_version)) {
139 /* remove from list */
140 if ((iq = ip->i_next)) {
141 iq->i_prevp = ip->i_prevp;
145 /* insert at list head */
147 iq->i_prevp = &ip->i_next;
149 ip->i_prevp = &ih->ih_next;
152 write_unlock(&ih->ih_lock);
157 * Look up an inode by number in the given file system.
158 * The inode is looked up in the hash table for the file system
159 * represented by the mount point parameter mp. Each bucket of
160 * the hash table is guarded by an individual semaphore.
162 * If the inode is found in the hash table, its corresponding vnode
163 * is obtained with a call to vn_get(). This call takes care of
164 * coordination with the reclamation of the inode and vnode. Note
165 * that the vmap structure is filled in while holding the hash lock.
166 * This gives us the state of the inode/vnode when we found it and
167 * is used for coordination in vn_get().
169 * If it is not in core, read it in from the file system's device and
170 * add the inode into the hash table.
172 * The inode is locked according to the value of the lock_flags parameter.
173 * This flag parameter indicates how and if the inode's IO lock and inode lock
176 * mp -- the mount point structure for the current file system. It points
177 * to the inode hash table.
178 * tp -- a pointer to the current transaction if there is one. This is
179 * simply passed through to the xfs_iread() call.
180 * ino -- the number of the inode desired. This is the unique identifier
181 * within the file system for the inode being requested.
182 * lock_flags -- flags indicating how to lock the inode. See the comment
183 * for xfs_ilock() for a list of valid values.
184 * bno -- the block number starting the buffer containing the inode,
185 * if known (as by bulkstat), else 0.
202 xfs_vnode_t *inode_vp;
207 xfs_chashlist_t *chl, *chlnew;
211 ih = XFS_IHASH(mp, ino);
214 read_lock(&ih->ih_lock);
216 for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
217 if (ip->i_ino == ino) {
219 * If INEW is set this inode is being set up
220 * we need to pause and try again.
222 if (ip->i_flags & XFS_INEW) {
223 read_unlock(&ih->ih_lock);
225 XFS_STATS_INC(xs_ig_frecycle);
230 inode_vp = XFS_ITOV_NULL(ip);
231 if (inode_vp == NULL) {
233 * If IRECLAIM is set this inode is
234 * on its way out of the system,
235 * we need to pause and try again.
237 if (ip->i_flags & XFS_IRECLAIM) {
238 read_unlock(&ih->ih_lock);
240 XFS_STATS_INC(xs_ig_frecycle);
245 vn_trace_exit(vp, "xfs_iget.alloc",
246 (inst_t *)__return_address);
248 XFS_STATS_INC(xs_ig_found);
250 ip->i_flags &= ~XFS_IRECLAIMABLE;
251 version = ih->ih_version;
252 read_unlock(&ih->ih_lock);
253 xfs_ihash_promote(ih, ip, version);
257 list_del_init(&ip->i_reclaim);
258 XFS_MOUNT_IUNLOCK(mp);
263 } else if (vp != inode_vp) {
265 struct inode *inode = vn_to_inode(inode_vp);
267 /* The inode is being torn down, pause and
270 if (inode->i_state & (I_FREEING | I_CLEAR)) {
271 read_unlock(&ih->ih_lock);
273 XFS_STATS_INC(xs_ig_frecycle);
278 /* Chances are the other vnode (the one in the inode) is being torn
279 * down right now, and we landed on top of it. Question is, what do
280 * we do? Unhook the old inode and hook up the new one?
283 "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
288 * Inode cache hit: if ip is not at the front of
289 * its hash chain, move it there now.
290 * Do this with the lock held for update, but
291 * do statistics after releasing the lock.
293 version = ih->ih_version;
294 read_unlock(&ih->ih_lock);
295 xfs_ihash_promote(ih, ip, version);
296 XFS_STATS_INC(xs_ig_found);
299 if (ip->i_d.di_mode == 0) {
300 if (!(flags & IGET_CREATE))
302 xfs_iocore_inode_reinit(ip);
306 xfs_ilock(ip, lock_flags);
308 ip->i_flags &= ~XFS_ISTALE;
310 vn_trace_exit(vp, "xfs_iget.found",
311 (inst_t *)__return_address);
317 * Inode cache miss: save the hash chain version stamp and unlock
318 * the chain, so we don't deadlock in vn_alloc.
320 XFS_STATS_INC(xs_ig_missed);
322 version = ih->ih_version;
324 read_unlock(&ih->ih_lock);
327 * Read the disk inode attributes into a new inode structure and get
328 * a new vnode for it. This should also initialize i_ino and i_mount.
330 error = xfs_iread(mp, tp, ino, &ip, bno);
335 vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);
337 xfs_inode_lock_init(ip, vp);
338 xfs_iocore_inode_init(ip);
340 if (lock_flags != 0) {
341 xfs_ilock(ip, lock_flags);
344 if ((ip->i_d.di_mode == 0) && !(flags & IGET_CREATE)) {
350 * Put ip on its hash chain, unless someone else hashed a duplicate
351 * after we released the hash lock.
353 write_lock(&ih->ih_lock);
355 if (ih->ih_version != version) {
356 for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) {
357 if (iq->i_ino == ino) {
358 write_unlock(&ih->ih_lock);
361 XFS_STATS_INC(xs_ig_dup);
368 * These values _must_ be set before releasing ihlock!
371 if ((iq = ih->ih_next)) {
372 iq->i_prevp = &ip->i_next;
375 ip->i_prevp = &ih->ih_next;
377 ip->i_udquot = ip->i_gdquot = NULL;
379 ip->i_flags |= XFS_INEW;
381 write_unlock(&ih->ih_lock);
384 * put ip on its cluster's hash chain
386 ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL &&
387 ip->i_cnext == NULL);
390 ch = XFS_CHASH(mp, ip->i_blkno);
392 s = mutex_spinlock(&ch->ch_lock);
393 for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
394 if (chl->chl_blkno == ip->i_blkno) {
396 /* insert this inode into the doubly-linked list
397 * where chl points */
398 if ((iq = chl->chl_ip)) {
399 ip->i_cprev = iq->i_cprev;
400 iq->i_cprev->i_cnext = ip;
413 /* no hash list found for this block; add a new hash list */
415 if (chlnew == NULL) {
416 mutex_spinunlock(&ch->ch_lock, s);
417 ASSERT(xfs_chashlist_zone != NULL);
418 chlnew = (xfs_chashlist_t *)
419 kmem_zone_alloc(xfs_chashlist_zone,
421 ASSERT(chlnew != NULL);
426 ip->i_chash = chlnew;
428 chlnew->chl_blkno = ip->i_blkno;
430 ch->ch_list->chl_prev = chlnew;
431 chlnew->chl_next = ch->ch_list;
432 chlnew->chl_prev = NULL;
433 ch->ch_list = chlnew;
437 if (chlnew != NULL) {
438 kmem_zone_free(xfs_chashlist_zone, chlnew);
442 mutex_spinunlock(&ch->ch_lock, s);
446 * Link ip to its mount and thread it on the mount's inode list.
449 if ((iq = mp->m_inodes)) {
450 ASSERT(iq->i_mprev->i_mnext == iq);
451 ip->i_mprev = iq->i_mprev;
452 iq->i_mprev->i_mnext = ip;
461 XFS_MOUNT_IUNLOCK(mp);
464 ASSERT(ip->i_df.if_ext_max ==
465 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
467 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
468 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
473 * If we have a real type for an on-disk inode, we can set ops(&unlock)
474 * now. If it's a new inode being created, xfs_ialloc will handle it.
476 XVFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
484 * The 'normal' internal xfs_iget, if needed it will
485 * 'allocate', or 'get', the vnode.
499 xfs_vnode_t *vp = NULL;
501 XFS_STATS_INC(xs_ig_attempts);
504 if ((inode = VFS_GET_INODE(XFS_MTOVFS(mp), ino, 0))) {
507 vp = vn_from_inode(inode);
508 if (inode->i_state & I_NEW) {
509 vn_initialize(inode);
510 error = xfs_iget_core(vp, mp, tp, ino, flags,
511 lock_flags, ipp, bno);
514 if (inode->i_state & I_NEW)
515 unlock_new_inode(inode);
520 * If the inode is not fully constructed due to
521 * filehandle mismatches wait for the inode to go
522 * away and try again.
524 * iget_locked will call __wait_on_freeing_inode
525 * to wait for the inode to go away.
527 if (is_bad_inode(inode) ||
528 ((ip = xfs_vtoi(vp)) == NULL)) {
535 xfs_ilock(ip, lock_flags);
536 XFS_STATS_INC(xs_ig_found);
541 error = ENOMEM; /* If we got no inode we are out of memory */
548 * Do the setup for the various locks within the incore inode.
555 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
556 "xfsino", (long)vp->v_number);
557 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number);
559 init_waitqueue_head(&ip->i_ipin_wait);
561 atomic_set(&ip->i_pincount, 0);
562 init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number);
566 * Look for the inode corresponding to the given ino in the hash table.
567 * If it is there and its i_transp pointer matches tp, return it.
568 * Otherwise, return NULL.
571 xfs_inode_incore(xfs_mount_t *mp,
579 ih = XFS_IHASH(mp, ino);
580 read_lock(&ih->ih_lock);
581 for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
582 if (ip->i_ino == ino) {
584 * If we find it and tp matches, return it.
585 * Also move it to the front of the hash list
586 * if we find it and it is not already there.
587 * Otherwise break from the loop and return
590 if (ip->i_transp == tp) {
591 version = ih->ih_version;
592 read_unlock(&ih->ih_lock);
593 xfs_ihash_promote(ih, ip, version);
599 read_unlock(&ih->ih_lock);
604 * Decrement reference count of an inode structure and unlock it.
606 * ip -- the inode being released
607 * lock_flags -- this parameter indicates the inode's locks to be
608 * to be released. See the comment on xfs_iunlock() for a list
612 xfs_iput(xfs_inode_t *ip,
615 xfs_vnode_t *vp = XFS_ITOV(ip);
617 vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address);
619 xfs_iunlock(ip, lock_flags);
625 /* in xfs_freebsd_iget.c
626 * Special iput for brand-new inodes that are still locked
629 xfs_iput_new(xfs_inode_t *ip,
632 xfs_vnode_t *vp = XFS_ITOV(ip);
633 struct inode *inode = vn_to_inode(vp);
635 vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
637 if ((ip->i_d.di_mode == 0)) {
638 ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE));
641 if (inode->i_state & I_NEW)
642 unlock_new_inode(inode);
644 xfs_iunlock(ip, lock_flags);
651 * This routine embodies the part of the reclaim code that pulls
652 * the inode from the inode hash table and the mount structure's
654 * This should only be called from xfs_reclaim().
663 * Remove from old hash list and mount list.
665 XFS_STATS_INC(xs_ig_reclaims);
670 * Here we do a spurious inode lock in order to coordinate with
671 * xfs_sync(). This is because xfs_sync() references the inodes
672 * in the mount list without taking references on the corresponding
673 * vnodes. We make that OK here by ensuring that we wait until
674 * the inode is unlocked in xfs_sync() before we go ahead and
675 * free it. We get both the regular lock and the io lock because
676 * the xfs_sync() code may need to drop the regular one but will
677 * still hold the io lock.
679 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
682 * Release dquots (and their references) if any. An inode may escape
683 * xfs_inactive and get here via vn_alloc->vn_reclaim path.
685 XFS_QM_DQDETACH(ip->i_mount, ip);
688 * Pull our behavior descriptor from the vnode chain.
690 vp = XFS_ITOV_NULL(ip);
692 vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
696 * Free all memory associated with the inode.
702 * This routine removes an about-to-be-destroyed inode from
703 * all of the lists in which it is located with the exception
704 * of the behavior chain.
714 xfs_chashlist_t *chl, *chm;
718 write_lock(&ih->ih_lock);
719 if ((iq = ip->i_next)) {
720 iq->i_prevp = ip->i_prevp;
724 write_unlock(&ih->ih_lock);
727 * Remove from cluster hash list
728 * 1) delete the chashlist if this is the last inode on the chashlist
729 * 2) unchain from list of inodes
730 * 3) point chashlist->chl_ip to 'chl_next' if to this inode.
733 ch = XFS_CHASH(mp, ip->i_blkno);
734 s = mutex_spinlock(&ch->ch_lock);
736 if (ip->i_cnext == ip) {
737 /* Last inode on chashlist */
738 ASSERT(ip->i_cnext == ip && ip->i_cprev == ip);
739 ASSERT(ip->i_chash != NULL);
743 chl->chl_prev->chl_next = chl->chl_next;
745 ch->ch_list = chl->chl_next;
747 chl->chl_next->chl_prev = chl->chl_prev;
748 kmem_zone_free(xfs_chashlist_zone, chl);
750 /* delete one inode from a non-empty list */
752 iq->i_cprev = ip->i_cprev;
753 ip->i_cprev->i_cnext = iq;
754 if (ip->i_chash->chl_ip == ip) {
755 ip->i_chash->chl_ip = iq;
757 ip->i_chash = __return_address;
758 ip->i_cprev = __return_address;
759 ip->i_cnext = __return_address;
761 mutex_spinunlock(&ch->ch_lock, s);
764 * Remove from mount's inode list.
767 ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL));
769 iq->i_mprev = ip->i_mprev;
770 ip->i_mprev->i_mnext = iq;
773 * Fix up the head pointer if it points to the inode being deleted.
775 if (mp->m_inodes == ip) {
783 /* Deal with the deleted inodes list */
785 list_del_init(&ip->i_reclaim);
789 XFS_MOUNT_IUNLOCK(mp);
793 * This is a wrapper routine around the xfs_ilock() routine
794 * used to centralize some grungy code. It is used in places
795 * that wish to lock the inode solely for reading the extents.
796 * The reason these places can't just call xfs_ilock(SHARED)
797 * is that the inode lock also guards to bringing in of the
798 * extents from disk for a file in b-tree format. If the inode
799 * is in b-tree format, then we need to lock the inode exclusively
800 * until the extents are read in. Locking it exclusively all
801 * the time would limit our parallelism unnecessarily, though.
802 * What we do instead is check to see if the extents have been
803 * read in yet, and only lock the inode exclusively if they
806 * The function returns a value which should be given to the
807 * corresponding xfs_iunlock_map_shared(). This value is
808 * the mode in which the lock was actually taken.
811 xfs_ilock_map_shared(
816 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
817 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
818 lock_mode = XFS_ILOCK_EXCL;
820 lock_mode = XFS_ILOCK_SHARED;
823 xfs_ilock(ip, lock_mode);
829 * This is simply the unlock routine to go with xfs_ilock_map_shared().
830 * All it does is call xfs_iunlock() with the given lock_mode.
833 xfs_iunlock_map_shared(
835 unsigned int lock_mode)
837 xfs_iunlock(ip, lock_mode);
841 * The xfs inode contains 2 locks: a multi-reader lock called the
842 * i_iolock and a multi-reader lock called the i_lock. This routine
843 * allows either or both of the locks to be obtained.
845 * The 2 locks should always be ordered so that the IO lock is
846 * obtained first in order to prevent deadlock.
848 * ip -- the inode being locked
849 * lock_flags -- this parameter indicates the inode's locks
850 * to be locked. It can be:
855 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
856 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
857 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
858 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
861 xfs_ilock(xfs_inode_t *ip,
865 * You can't set both SHARED and EXCL for the same lock,
866 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
867 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
869 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
870 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
871 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
872 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
873 ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
875 if (lock_flags & XFS_IOLOCK_EXCL) {
876 mrupdate(&ip->i_iolock);
877 } else if (lock_flags & XFS_IOLOCK_SHARED) {
878 mraccess(&ip->i_iolock);
880 if (lock_flags & XFS_ILOCK_EXCL) {
881 mrupdate(&ip->i_lock);
882 } else if (lock_flags & XFS_ILOCK_SHARED) {
883 mraccess(&ip->i_lock);
885 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
889 * This is just like xfs_ilock(), except that the caller
890 * is guaranteed not to sleep. It returns 1 if it gets
891 * the requested locks and 0 otherwise. If the IO lock is
892 * obtained but the inode lock cannot be, then the IO lock
893 * is dropped before returning.
895 * ip -- the inode being locked
896 * lock_flags -- this parameter indicates the inode's locks to be
897 * to be locked. See the comment for xfs_ilock() for a list
902 xfs_ilock_nowait(xfs_inode_t *ip,
909 * You can't set both SHARED and EXCL for the same lock,
910 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
911 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
913 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
914 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
915 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
916 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
917 ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
920 if (lock_flags & XFS_IOLOCK_EXCL) {
921 iolocked = mrtryupdate(&ip->i_iolock);
925 } else if (lock_flags & XFS_IOLOCK_SHARED) {
926 iolocked = mrtryaccess(&ip->i_iolock);
931 if (lock_flags & XFS_ILOCK_EXCL) {
932 ilocked = mrtryupdate(&ip->i_lock);
935 mrunlock(&ip->i_iolock);
939 } else if (lock_flags & XFS_ILOCK_SHARED) {
940 ilocked = mrtryaccess(&ip->i_lock);
943 mrunlock(&ip->i_iolock);
948 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
953 * xfs_iunlock() is used to drop the inode locks acquired with
954 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
955 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
956 * that we know which locks to drop.
958 * ip -- the inode being unlocked
959 * lock_flags -- this parameter indicates the inode's locks to be
960 * to be unlocked. See the comment for xfs_ilock() for a list
961 * of valid values for this parameter.
965 xfs_iunlock(xfs_inode_t *ip,
969 * You can't set both SHARED and EXCL for the same lock,
970 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
971 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
973 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
974 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
975 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
976 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
977 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0);
978 ASSERT(lock_flags != 0);
980 if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
981 ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||
982 (ismrlocked(&ip->i_iolock, MR_ACCESS)));
983 ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||
984 (ismrlocked(&ip->i_iolock, MR_UPDATE)));
985 mrunlock(&ip->i_iolock);
988 if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {
989 ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||
990 (ismrlocked(&ip->i_lock, MR_ACCESS)));
991 ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||
992 (ismrlocked(&ip->i_lock, MR_UPDATE)));
993 mrunlock(&ip->i_lock);
996 * Let the AIL know that this item has been unlocked in case
997 * it is in the AIL and anyone is waiting on it. Don't do
998 * this if the caller has asked us not to.
1000 if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
1001 ip->i_itemp != NULL) {
1002 xfs_trans_unlocked_item(ip->i_mount,
1003 (xfs_log_item_t*)(ip->i_itemp));
1006 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
1010 * give up write locks. the i/o lock cannot be held nested
1011 * if it is being demoted.
1014 xfs_ilock_demote(xfs_inode_t *ip,
1017 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
1018 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
1020 if (lock_flags & XFS_ILOCK_EXCL) {
1021 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
1022 mrdemote(&ip->i_lock);
1024 if (lock_flags & XFS_IOLOCK_EXCL) {
1025 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
1026 mrdemote(&ip->i_iolock);
1031 * The following three routines simply manage the i_flock
1032 * semaphore embedded in the inode. This semaphore synchronizes
1033 * processes attempting to flush the in-core inode back to disk.
1036 xfs_iflock(xfs_inode_t *ip)
1038 psema(&(ip->i_flock), PINOD|PLTWAIT);
1042 xfs_iflock_nowait(xfs_inode_t *ip)
1044 return (cpsema(&(ip->i_flock)));
1048 xfs_ifunlock(xfs_inode_t *ip)
1050 ASSERT(valusema(&(ip->i_flock)) <= 0);
1051 vsema(&(ip->i_flock));