2 * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
4 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
7 * This software was developed for the FreeBSD Project by Marshall
8 * Kirk McKusick and Network Associates Laboratories, the Security
9 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
10 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1982, 1986, 1989, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
62 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
63 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
69 #include "opt_directio.h"
73 #include <sys/param.h>
75 #include <sys/systm.h>
78 #include <sys/extattr.h>
79 #include <sys/kernel.h>
80 #include <sys/limits.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
84 #include <sys/rwlock.h>
86 #include <sys/sysctl.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
91 #include <vm/vm_param.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pager.h>
96 #include <vm/vnode_pager.h>
98 #include <ufs/ufs/extattr.h>
99 #include <ufs/ufs/quota.h>
100 #include <ufs/ufs/inode.h>
101 #include <ufs/ufs/ufs_extern.h>
102 #include <ufs/ufs/ufsmount.h>
103 #include <ufs/ufs/dir.h>
105 #include <ufs/ufs/dirhash.h>
108 #include <ufs/ffs/fs.h>
109 #include <ufs/ffs/ffs_extern.h>
111 #define ALIGNED_TO(ptr, s) \
112 (((uintptr_t)(ptr) & (_Alignof(s) - 1)) == 0)
115 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
117 static vop_fdatasync_t ffs_fdatasync;
118 static vop_fsync_t ffs_fsync;
119 static vop_getpages_t ffs_getpages;
120 static vop_getpages_async_t ffs_getpages_async;
121 static vop_lock1_t ffs_lock;
123 static vop_unlock_t ffs_unlock_debug;
125 static vop_read_t ffs_read;
126 static vop_write_t ffs_write;
127 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
128 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
130 static vop_strategy_t ffsext_strategy;
131 static vop_closeextattr_t ffs_closeextattr;
132 static vop_deleteextattr_t ffs_deleteextattr;
133 static vop_getextattr_t ffs_getextattr;
134 static vop_listextattr_t ffs_listextattr;
135 static vop_openextattr_t ffs_openextattr;
136 static vop_setextattr_t ffs_setextattr;
137 static vop_vptofh_t ffs_vptofh;
138 static vop_vput_pair_t ffs_vput_pair;
140 /* Global vfs data structures for ufs. */
141 struct vop_vector ffs_vnodeops1 = {
142 .vop_default = &ufs_vnodeops,
143 .vop_fsync = ffs_fsync,
144 .vop_fdatasync = ffs_fdatasync,
145 .vop_getpages = ffs_getpages,
146 .vop_getpages_async = ffs_getpages_async,
147 .vop_lock1 = ffs_lock,
149 .vop_unlock = ffs_unlock_debug,
151 .vop_read = ffs_read,
152 .vop_reallocblks = ffs_reallocblks,
153 .vop_write = ffs_write,
154 .vop_vptofh = ffs_vptofh,
155 .vop_vput_pair = ffs_vput_pair,
157 VFS_VOP_VECTOR_REGISTER(ffs_vnodeops1);
159 struct vop_vector ffs_fifoops1 = {
160 .vop_default = &ufs_fifoops,
161 .vop_fsync = ffs_fsync,
162 .vop_fdatasync = ffs_fdatasync,
163 .vop_lock1 = ffs_lock,
165 .vop_unlock = ffs_unlock_debug,
167 .vop_vptofh = ffs_vptofh,
169 VFS_VOP_VECTOR_REGISTER(ffs_fifoops1);
171 /* Global vfs data structures for ufs. */
172 struct vop_vector ffs_vnodeops2 = {
173 .vop_default = &ufs_vnodeops,
174 .vop_fsync = ffs_fsync,
175 .vop_fdatasync = ffs_fdatasync,
176 .vop_getpages = ffs_getpages,
177 .vop_getpages_async = ffs_getpages_async,
178 .vop_lock1 = ffs_lock,
180 .vop_unlock = ffs_unlock_debug,
182 .vop_read = ffs_read,
183 .vop_reallocblks = ffs_reallocblks,
184 .vop_write = ffs_write,
185 .vop_closeextattr = ffs_closeextattr,
186 .vop_deleteextattr = ffs_deleteextattr,
187 .vop_getextattr = ffs_getextattr,
188 .vop_listextattr = ffs_listextattr,
189 .vop_openextattr = ffs_openextattr,
190 .vop_setextattr = ffs_setextattr,
191 .vop_vptofh = ffs_vptofh,
192 .vop_vput_pair = ffs_vput_pair,
194 VFS_VOP_VECTOR_REGISTER(ffs_vnodeops2);
196 struct vop_vector ffs_fifoops2 = {
197 .vop_default = &ufs_fifoops,
198 .vop_fsync = ffs_fsync,
199 .vop_fdatasync = ffs_fdatasync,
200 .vop_lock1 = ffs_lock,
202 .vop_unlock = ffs_unlock_debug,
204 .vop_reallocblks = ffs_reallocblks,
205 .vop_strategy = ffsext_strategy,
206 .vop_closeextattr = ffs_closeextattr,
207 .vop_deleteextattr = ffs_deleteextattr,
208 .vop_getextattr = ffs_getextattr,
209 .vop_listextattr = ffs_listextattr,
210 .vop_openextattr = ffs_openextattr,
211 .vop_setextattr = ffs_setextattr,
212 .vop_vptofh = ffs_vptofh,
214 VFS_VOP_VECTOR_REGISTER(ffs_fifoops2);
217 * Synch an open file.
221 ffs_fsync(struct vop_fsync_args *ap)
230 error = ffs_syncvnode(vp, ap->a_waitfor, 0);
233 if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
234 error = softdep_fsync(vp);
239 * The softdep_fsync() function may drop vp lock,
240 * allowing for dirty buffers to reappear on the
241 * bo_dirty list. Recheck and resync as needed.
244 if ((vp->v_type == VREG || vp->v_type == VDIR) &&
245 (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) {
251 if (ffs_fsfail_cleanup(VFSTOUFS(vp->v_mount), 0))
257 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
261 struct ufsmount *ump;
262 struct buf *bp, *nbp;
265 bool still_dirty, unlocked, wait;
269 ump = VFSTOUFS(vp->v_mount);
272 * When doing MNT_WAIT we must first flush all dependencies
275 if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
276 (error = softdep_sync_metadata(vp)) != 0) {
277 if (ffs_fsfail_cleanup(ump, error))
283 * Flush all dirty buffers associated with a vnode.
287 wait = false; /* Always do an async pass first. */
289 lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1));
292 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
293 bp->b_vflags &= ~BV_SCANNED;
294 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
296 * Reasons to skip this buffer: it has already been considered
297 * on this pass, the buffer has dependencies that will cause
298 * it to be redirtied and it has not already been deferred,
299 * or it is already being written.
301 if ((bp->b_vflags & BV_SCANNED) != 0)
303 bp->b_vflags |= BV_SCANNED;
305 * Flush indirects in order, if requested.
307 * Note that if only datasync is requested, we can
308 * skip indirect blocks when softupdates are not
309 * active. Otherwise we must flush them with data,
310 * since dependencies prevent data block writes.
312 if (waitfor == MNT_WAIT && bp->b_lblkno <= -UFS_NDADDR &&
313 (lbn_level(bp->b_lblkno) >= passes ||
314 ((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp))))
316 if (bp->b_lblkno > lbn)
317 panic("ffs_syncvnode: syncing truncated data.");
318 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
322 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
323 BO_LOCKPTR(bo)) != 0) {
324 bp->b_vflags &= ~BV_SCANNED;
329 if ((bp->b_flags & B_DELWRI) == 0)
330 panic("ffs_fsync: not dirty");
332 * Check for dependencies and potentially complete them.
334 if (!LIST_EMPTY(&bp->b_dep) &&
335 (error = softdep_sync_buf(vp, bp,
336 wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
338 * Lock order conflict, buffer was already unlocked,
339 * and vnode possibly unlocked.
341 if (error == ERELOOKUP) {
342 if (vp->v_data == NULL)
345 if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
346 (error = softdep_sync_metadata(vp)) != 0) {
347 if (ffs_fsfail_cleanup(ump, error))
349 return (unlocked && error == 0 ?
352 /* Re-evaluate inode size */
353 lbn = lblkno(ITOFS(ip), (ip->i_size +
354 ITOFS(ip)->fs_bsize - 1));
358 if (error != EBUSY) {
362 /* If we deferred once, don't defer again. */
363 if ((bp->b_flags & B_DEFERRED) == 0) {
364 bp->b_flags |= B_DEFERRED;
372 if (ffs_fsfail_cleanup(ump, error))
376 } else if ((bp->b_flags & B_CLUSTEROK)) {
377 (void) vfs_bio_awrite(bp);
384 * Since we may have slept during the I/O, we need
385 * to start from a known point.
388 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
390 if (waitfor != MNT_WAIT) {
392 if ((flags & NO_INO_UPDT) != 0)
393 return (unlocked ? ERELOOKUP : 0);
394 error = ffs_update(vp, 0);
395 if (error == 0 && unlocked)
399 /* Drain IO to see if we're done. */
400 bufobj_wwait(bo, 0, 0);
402 * Block devices associated with filesystems may have new I/O
403 * requests posted for them even if the vnode is locked, so no
404 * amount of trying will get them clean. We make several passes
407 * Regular files may need multiple passes to flush all dependency
408 * work as it is possible that we must write once per indirect
409 * level, once for the leaf, and once for the inode and each of
410 * these will be done with one sync and one async pass.
412 if (bo->bo_dirty.bv_cnt > 0) {
413 if ((flags & DATA_ONLY) == 0) {
417 * For data-only sync, dirty indirect buffers
421 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
422 if (bp->b_lblkno > -UFS_NDADDR) {
430 /* Write the inode after sync passes to flush deps. */
431 if (wait && DOINGSOFTDEP(vp) &&
432 (flags & NO_INO_UPDT) == 0) {
437 /* switch between sync/async. */
439 if (wait || ++passes < UFS_NIADDR + 2)
445 if ((flags & DATA_ONLY) == 0) {
446 if ((flags & NO_INO_UPDT) == 0)
447 error = ffs_update(vp, 1);
449 softdep_journal_fsync(VTOI(vp));
450 } else if ((ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)) != 0) {
451 error = ffs_update(vp, 1);
453 if (error == 0 && unlocked)
456 ip->i_flag &= ~IN_NEEDSYNC;
461 ffs_fdatasync(struct vop_fdatasync_args *ap)
464 return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY));
469 struct vop_lock1_args /* {
476 #if !defined(NO_FFS_SNAPSHOT) || defined(DIAGNOSTIC)
477 struct vnode *vp = ap->a_vp;
478 #endif /* !NO_FFS_SNAPSHOT || DIAGNOSTIC */
481 #endif /* DIAGNOSTIC */
483 #ifndef NO_FFS_SNAPSHOT
488 * Adaptive spinning mixed with SU leads to trouble. use a giant hammer
489 * and only use it when LK_NODDLKTREAT is set. Currently this means it
490 * is only used during path lookup.
492 if ((ap->a_flags & LK_NODDLKTREAT) != 0)
493 ap->a_flags |= LK_ADAPTIVE;
494 switch (ap->a_flags & LK_TYPE_MASK) {
500 #ifdef DEBUG_VFS_LOCKS
501 VNPASS(vp->v_holdcnt != 0, vp);
502 #endif /* DEBUG_VFS_LOCKS */
504 result = lockmgr_lock_flags(lkp, flags,
505 &VI_MTX(vp)->lock_object, ap->a_file, ap->a_line);
506 if (lkp == vp->v_vnlock || result != 0)
509 * Apparent success, except that the vnode
510 * mutated between snapshot file vnode and
511 * regular file vnode while this process
512 * slept. The lock currently held is not the
513 * right lock. Release it, and try to get the
517 if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
518 (LK_INTERLOCK | LK_NOWAIT))
520 if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
521 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
522 flags &= ~LK_INTERLOCK;
525 switch (ap->a_flags & LK_TYPE_MASK) {
528 if (result == 0 && vp->v_vnlock->lk_recurse == 0) {
534 #endif /* DIAGNOSTIC */
538 if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) {
541 ufs_unlock_tracker(ip);
543 #endif /* DIAGNOSTIC */
544 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
547 #else /* NO_FFS_SNAPSHOT */
549 * See above for an explanation.
551 if ((ap->a_flags & LK_NODDLKTREAT) != 0)
552 ap->a_flags |= LK_ADAPTIVE;
554 if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) {
557 ufs_unlock_tracker(ip);
559 #endif /* DIAGNOSTIC */
560 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
561 #endif /* NO_FFS_SNAPSHOT */
563 switch (ap->a_flags & LK_TYPE_MASK) {
566 if (result == 0 && vp->v_vnlock->lk_recurse == 0) {
572 #endif /* DIAGNOSTIC */
578 ffs_unlock_debug(struct vop_unlock_args *ap)
585 if (ip->i_flag & UFS_INODE_FLAG_LAZY_MASK_ASSERTABLE) {
586 if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
588 VNASSERT((vp->v_mflag & VMP_LAZYLIST), vp,
589 ("%s: modified vnode (%x) not on lazy list",
590 __func__, ip->i_flag));
594 KASSERT(vp->v_type != VDIR || vp->v_vnlock->lk_recurse != 0 ||
595 (ip->i_flag & IN_ENDOFF) == 0,
596 ("ufs dir vp %p ip %p flags %#x", vp, ip, ip->i_flag));
598 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && ip != NULL &&
599 vp->v_vnlock->lk_recurse == 0)
600 ufs_unlock_tracker(ip);
602 return (VOP_UNLOCK_APV(&ufs_vnodeops, ap));
607 ffs_read_hole(struct uio *uio, long xfersize, long *size)
609 ssize_t saved_resid, tlen;
612 while (xfersize > 0) {
613 tlen = min(xfersize, ZERO_REGION_SIZE);
614 saved_resid = uio->uio_resid;
615 error = vn_io_fault_uiomove(__DECONST(void *, zero_region),
619 tlen = saved_resid - uio->uio_resid;
627 * Vnode op for reading.
631 struct vop_read_args /* {
635 struct ucred *a_cred;
643 ufs_lbn_t lbn, nextlbn;
645 long size, xfersize, blkoffset;
647 int bflag, error, ioflag, seqcount;
651 ioflag = ap->a_ioflag;
652 if (ap->a_ioflag & IO_EXT)
654 return (ffs_extread(vp, uio, ioflag));
656 panic("ffs_read+IO_EXT");
659 if ((ioflag & IO_DIRECT) != 0) {
662 error = ffs_rawread(vp, uio, &workdone);
663 if (error != 0 || workdone != 0)
668 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
672 if (uio->uio_rw != UIO_READ)
673 panic("ffs_read: mode");
675 if (vp->v_type == VLNK) {
676 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
677 panic("ffs_read: short symlink");
678 } else if (vp->v_type != VREG && vp->v_type != VDIR)
679 panic("ffs_read: type %d", vp->v_type);
681 orig_resid = uio->uio_resid;
682 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
685 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
687 if (uio->uio_offset < ip->i_size &&
688 uio->uio_offset >= fs->fs_maxfilesize)
691 bflag = GB_UNMAPPED | (uio->uio_segflg == UIO_NOCOPY ? 0 : GB_NOSPARSE);
692 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
693 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
695 lbn = lblkno(fs, uio->uio_offset);
699 * size of buffer. The buffer representing the
700 * end of the file is rounded up to the size of
701 * the block type ( fragment or full block,
704 size = blksize(fs, ip, lbn);
705 blkoffset = blkoff(fs, uio->uio_offset);
708 * The amount we want to transfer in this iteration is
709 * one FS block less the amount of the data before
710 * our startpoint (duh!)
712 xfersize = fs->fs_bsize - blkoffset;
715 * But if we actually want less than the block,
716 * or the file doesn't have a whole block more of data,
717 * then use the lesser number.
719 if (uio->uio_resid < xfersize)
720 xfersize = uio->uio_resid;
721 if (bytesinfile < xfersize)
722 xfersize = bytesinfile;
724 if (lblktosize(fs, nextlbn) >= ip->i_size) {
726 * Don't do readahead if this is the end of the file.
728 error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
729 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
731 * Otherwise if we are allowed to cluster,
732 * grab as much as we can.
734 * XXX This may not be a win if we are not
735 * doing sequential access.
737 error = cluster_read(vp, ip->i_size, lbn,
738 size, NOCRED, blkoffset + uio->uio_resid,
739 seqcount, bflag, &bp);
740 } else if (seqcount > 1) {
742 * If we are NOT allowed to cluster, then
743 * if we appear to be acting sequentially,
744 * fire off a request for a readahead
745 * as well as a read. Note that the 4th and 5th
746 * arguments point to arrays of the size specified in
749 u_int nextsize = blksize(fs, ip, nextlbn);
750 error = breadn_flags(vp, lbn, lbn, size, &nextlbn,
751 &nextsize, 1, NOCRED, bflag, NULL, &bp);
754 * Failing all of the above, just read what the
755 * user asked for. Interestingly, the same as
756 * the first option above.
758 error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
760 if (error == EJUSTRETURN) {
761 error = ffs_read_hole(uio, xfersize, &size);
772 * We should only get non-zero b_resid when an I/O error
773 * has occurred, which should cause us to break above.
774 * However, if the short read did not cause an error,
775 * then we want to ensure that we do not uiomove bad
776 * or uninitialized data.
779 if (size < xfersize) {
785 if (buf_mapped(bp)) {
786 error = vn_io_fault_uiomove((char *)bp->b_data +
787 blkoffset, (int)xfersize, uio);
789 error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
795 vfs_bio_brelse(bp, ioflag);
799 * This can only happen in the case of an error
800 * because the loop above resets bp to NULL on each iteration
801 * and on normal completion has not set a new value into it.
802 * so it must have come from a 'break' statement
805 vfs_bio_brelse(bp, ioflag);
807 if ((error == 0 || uio->uio_resid != orig_resid) &&
808 (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
809 UFS_INODE_SET_FLAG_SHARED(ip, IN_ACCESS);
814 * Vnode op for writing.
818 struct vop_write_args /* {
822 struct ucred *a_cred;
834 int blkoffset, error, flags, ioflag, size, xfersize;
838 softdep_prealloc(vp, MNT_WAIT);
839 if (vp->v_data == NULL)
843 ioflag = ap->a_ioflag;
844 if (ap->a_ioflag & IO_EXT)
846 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
848 panic("ffs_write+IO_EXT");
851 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
855 if (uio->uio_rw != UIO_WRITE)
856 panic("ffs_write: mode");
859 switch (vp->v_type) {
861 if (ioflag & IO_APPEND)
862 uio->uio_offset = ip->i_size;
863 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
869 panic("ffs_write: dir write");
872 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
873 (int)uio->uio_offset,
878 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
879 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
881 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
884 * Maybe this should be above the vnode op call, but so long as
885 * file servers have no limits, I don't think it matters.
887 if (vn_rlimit_fsize(vp, uio, uio->uio_td))
890 resid = uio->uio_resid;
892 if (seqcount > BA_SEQMAX)
893 flags = BA_SEQMAX << BA_SEQSHIFT;
895 flags = seqcount << BA_SEQSHIFT;
896 if (ioflag & IO_SYNC)
898 flags |= BA_UNMAPPED;
900 for (error = 0; uio->uio_resid > 0;) {
901 lbn = lblkno(fs, uio->uio_offset);
902 blkoffset = blkoff(fs, uio->uio_offset);
903 xfersize = fs->fs_bsize - blkoffset;
904 if (uio->uio_resid < xfersize)
905 xfersize = uio->uio_resid;
906 if (uio->uio_offset + xfersize > ip->i_size)
907 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
910 * We must perform a read-before-write if the transfer size
911 * does not cover the entire buffer.
913 if (fs->fs_bsize > xfersize)
917 /* XXX is uio->uio_offset the right thing here? */
918 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
919 ap->a_cred, flags, &bp);
921 vnode_pager_setsize(vp, ip->i_size);
924 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
925 bp->b_flags |= B_NOCACHE;
927 if (uio->uio_offset + xfersize > ip->i_size) {
928 ip->i_size = uio->uio_offset + xfersize;
929 DIP_SET(ip, i_size, ip->i_size);
930 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
933 size = blksize(fs, ip, lbn) - bp->b_resid;
937 if (buf_mapped(bp)) {
938 error = vn_io_fault_uiomove((char *)bp->b_data +
939 blkoffset, (int)xfersize, uio);
941 error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
945 * If the buffer is not already filled and we encounter an
946 * error while trying to fill it, we have to clear out any
947 * garbage data from the pages instantiated for the buffer.
948 * If we do not, a failed uiomove() during a write can leave
949 * the prior contents of the pages exposed to a userland mmap.
951 * Note that we need only clear buffers with a transfer size
952 * equal to the block size because buffers with a shorter
953 * transfer size were cleared above by the call to UFS_BALLOC()
954 * with the BA_CLRBUF flag set.
956 * If the source region for uiomove identically mmaps the
957 * buffer, uiomove() performed the NOP copy, and the buffer
958 * content remains valid because the page fault handler
959 * validated the pages.
961 if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
962 fs->fs_bsize == xfersize)
965 vfs_bio_set_flags(bp, ioflag);
968 * If IO_SYNC each buffer is written synchronously. Otherwise
969 * if we have a severe page deficiency write the buffer
970 * asynchronously. Otherwise try to cluster, and if that
971 * doesn't do it then either do an async write (if O_DIRECT),
972 * or a delayed write (if not).
974 if (ioflag & IO_SYNC) {
976 } else if (vm_page_count_severe() ||
977 buf_dirty_count_severe() ||
978 (ioflag & IO_ASYNC)) {
979 bp->b_flags |= B_CLUSTEROK;
981 } else if (xfersize + blkoffset == fs->fs_bsize) {
982 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
983 bp->b_flags |= B_CLUSTEROK;
984 cluster_write(vp, &ip->i_clusterw, bp,
985 ip->i_size, seqcount, GB_UNMAPPED);
989 } else if (ioflag & IO_DIRECT) {
990 bp->b_flags |= B_CLUSTEROK;
993 bp->b_flags |= B_CLUSTEROK;
996 if (error || xfersize == 0)
998 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
1001 * If we successfully wrote any data, and we are not the superuser
1002 * we clear the setuid and setgid bits as a precaution against
1005 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
1007 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID)) {
1008 vn_seqc_write_begin(vp);
1009 UFS_INODE_SET_MODE(ip, ip->i_mode & ~(ISUID | ISGID));
1010 DIP_SET(ip, i_mode, ip->i_mode);
1011 vn_seqc_write_end(vp);
1015 if (ioflag & IO_UNIT) {
1016 (void)ffs_truncate(vp, osize,
1017 IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
1018 uio->uio_offset -= resid - uio->uio_resid;
1019 uio->uio_resid = resid;
1021 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
1022 if (!(ioflag & IO_DATASYNC) ||
1023 (ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)))
1024 error = ffs_update(vp, 1);
1025 if (ffs_fsfail_cleanup(VFSTOUFS(vp->v_mount), error))
1032 * Extended attribute area reading.
1035 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
1038 struct ufs2_dinode *dp;
1041 ufs_lbn_t lbn, nextlbn;
1043 long size, xfersize, blkoffset;
1052 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
1053 panic("ffs_extread: mode");
1056 orig_resid = uio->uio_resid;
1057 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
1058 if (orig_resid == 0)
1060 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
1062 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
1063 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
1065 lbn = lblkno(fs, uio->uio_offset);
1069 * size of buffer. The buffer representing the
1070 * end of the file is rounded up to the size of
1071 * the block type ( fragment or full block,
1074 size = sblksize(fs, dp->di_extsize, lbn);
1075 blkoffset = blkoff(fs, uio->uio_offset);
1078 * The amount we want to transfer in this iteration is
1079 * one FS block less the amount of the data before
1080 * our startpoint (duh!)
1082 xfersize = fs->fs_bsize - blkoffset;
1085 * But if we actually want less than the block,
1086 * or the file doesn't have a whole block more of data,
1087 * then use the lesser number.
1089 if (uio->uio_resid < xfersize)
1090 xfersize = uio->uio_resid;
1091 if (bytesinfile < xfersize)
1092 xfersize = bytesinfile;
1094 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
1096 * Don't do readahead if this is the end of the info.
1098 error = bread(vp, -1 - lbn, size, NOCRED, &bp);
1101 * If we have a second block, then
1102 * fire off a request for a readahead
1103 * as well as a read. Note that the 4th and 5th
1104 * arguments point to arrays of the size specified in
1107 u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
1109 nextlbn = -1 - nextlbn;
1110 error = breadn(vp, -1 - lbn,
1111 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
1120 * We should only get non-zero b_resid when an I/O error
1121 * has occurred, which should cause us to break above.
1122 * However, if the short read did not cause an error,
1123 * then we want to ensure that we do not uiomove bad
1124 * or uninitialized data.
1126 size -= bp->b_resid;
1127 if (size < xfersize) {
1133 error = uiomove((char *)bp->b_data + blkoffset,
1134 (int)xfersize, uio);
1137 vfs_bio_brelse(bp, ioflag);
1141 * This can only happen in the case of an error
1142 * because the loop above resets bp to NULL on each iteration
1143 * and on normal completion has not set a new value into it.
1144 * so it must have come from a 'break' statement
1147 vfs_bio_brelse(bp, ioflag);
1152 * Extended attribute area writing.
1155 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
1158 struct ufs2_dinode *dp;
1164 int blkoffset, error, flags, size, xfersize;
1171 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
1172 panic("ffs_extwrite: mode");
1175 if (ioflag & IO_APPEND)
1176 uio->uio_offset = dp->di_extsize;
1177 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1178 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1179 if ((uoff_t)uio->uio_offset + uio->uio_resid >
1180 UFS_NXADDR * fs->fs_bsize)
1183 resid = uio->uio_resid;
1184 osize = dp->di_extsize;
1186 if (ioflag & IO_SYNC)
1189 for (error = 0; uio->uio_resid > 0;) {
1190 lbn = lblkno(fs, uio->uio_offset);
1191 blkoffset = blkoff(fs, uio->uio_offset);
1192 xfersize = fs->fs_bsize - blkoffset;
1193 if (uio->uio_resid < xfersize)
1194 xfersize = uio->uio_resid;
1197 * We must perform a read-before-write if the transfer size
1198 * does not cover the entire buffer.
1200 if (fs->fs_bsize > xfersize)
1203 flags &= ~BA_CLRBUF;
1204 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1209 * If the buffer is not valid we have to clear out any
1210 * garbage data from the pages instantiated for the buffer.
1211 * If we do not, a failed uiomove() during a write can leave
1212 * the prior contents of the pages exposed to a userland
1213 * mmap(). XXX deal with uiomove() errors a better way.
1215 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1218 if (uio->uio_offset + xfersize > dp->di_extsize) {
1219 dp->di_extsize = uio->uio_offset + xfersize;
1220 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
1223 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1224 if (size < xfersize)
1228 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1230 vfs_bio_set_flags(bp, ioflag);
1233 * If IO_SYNC each buffer is written synchronously. Otherwise
1234 * if we have a severe page deficiency write the buffer
1235 * asynchronously. Otherwise try to cluster, and if that
1236 * doesn't do it then either do an async write (if O_DIRECT),
1237 * or a delayed write (if not).
1239 if (ioflag & IO_SYNC) {
1241 } else if (vm_page_count_severe() ||
1242 buf_dirty_count_severe() ||
1243 xfersize + blkoffset == fs->fs_bsize ||
1244 (ioflag & (IO_ASYNC | IO_DIRECT)))
1248 if (error || xfersize == 0)
1250 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
1253 * If we successfully wrote any data, and we are not the superuser
1254 * we clear the setuid and setgid bits as a precaution against
1257 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1258 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID)) {
1259 vn_seqc_write_begin(vp);
1260 UFS_INODE_SET_MODE(ip, ip->i_mode & ~(ISUID | ISGID));
1261 dp->di_mode = ip->i_mode;
1262 vn_seqc_write_end(vp);
1266 if (ioflag & IO_UNIT) {
1267 (void)ffs_truncate(vp, osize,
1268 IO_EXT | (ioflag&IO_SYNC), ucred);
1269 uio->uio_offset -= resid - uio->uio_resid;
1270 uio->uio_resid = resid;
1272 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1273 error = ffs_update(vp, 1);
1278 * Vnode operating to retrieve a named extended attribute.
1280 * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1281 * the length of the EA, and possibly the pointer to the entry and to the data.
1284 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name,
1285 struct extattr **eapp, u_char **eac)
1287 struct extattr *eap, *eaend;
1290 nlen = strlen(name);
1291 KASSERT(ALIGNED_TO(ptr, struct extattr), ("unaligned"));
1292 eap = (struct extattr *)ptr;
1293 eaend = (struct extattr *)(ptr + length);
1294 for (; eap < eaend; eap = EXTATTR_NEXT(eap)) {
1295 KASSERT(EXTATTR_NEXT(eap) <= eaend,
1296 ("extattr next %p beyond %p", EXTATTR_NEXT(eap), eaend));
1297 if (eap->ea_namespace != nspace || eap->ea_namelength != nlen
1298 || memcmp(eap->ea_name, name, nlen) != 0)
1303 *eac = EXTATTR_CONTENT(eap);
1304 return (EXTATTR_CONTENT_SIZE(eap));
1310 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td)
1312 const struct extattr *eap, *eaend, *eapnext;
1314 struct ufs2_dinode *dp;
1317 struct iovec liovec;
1325 easize = dp->di_extsize;
1326 if ((uoff_t)easize > UFS_NXADDR * fs->fs_bsize)
1329 eae = malloc(easize, M_TEMP, M_WAITOK);
1331 liovec.iov_base = eae;
1332 liovec.iov_len = easize;
1333 luio.uio_iov = &liovec;
1334 luio.uio_iovcnt = 1;
1335 luio.uio_offset = 0;
1336 luio.uio_resid = easize;
1337 luio.uio_segflg = UIO_SYSSPACE;
1338 luio.uio_rw = UIO_READ;
1341 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1346 /* Validate disk xattrfile contents. */
1347 for (eap = (void *)eae, eaend = (void *)(eae + easize); eap < eaend;
1349 eapnext = EXTATTR_NEXT(eap);
1350 /* Bogusly short entry or bogusly long entry. */
1351 if (eap->ea_length < sizeof(*eap) || eapnext > eaend) {
1353 return (EINTEGRITY);
1361 ffs_lock_ea(struct vnode *vp)
1367 while (ip->i_flag & IN_EA_LOCKED) {
1368 UFS_INODE_SET_FLAG(ip, IN_EA_LOCKWAIT);
1369 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
1372 UFS_INODE_SET_FLAG(ip, IN_EA_LOCKED);
1377 ffs_unlock_ea(struct vnode *vp)
1383 if (ip->i_flag & IN_EA_LOCKWAIT)
1384 wakeup(&ip->i_ea_refs);
1385 ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
1390 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1393 struct ufs2_dinode *dp;
1399 if (ip->i_ea_area != NULL) {
1405 error = ffs_rdextattr(&ip->i_ea_area, vp, td);
1410 ip->i_ea_len = dp->di_extsize;
1418 * Vnode extattr transaction commit/abort
1421 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1425 struct iovec *liovec;
1426 struct ufs2_dinode *dp;
1427 size_t ea_len, tlen;
1433 if (ip->i_ea_area == NULL) {
1438 error = ip->i_ea_error;
1439 if (commit && error == 0) {
1440 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
1442 cred = vp->v_mount->mnt_cred;
1444 ea_len = MAX(ip->i_ea_len, dp->di_extsize);
1445 for (lcnt = 1, tlen = ea_len - ip->i_ea_len; tlen > 0;) {
1446 tlen -= MIN(ZERO_REGION_SIZE, tlen);
1450 liovec = __builtin_alloca(lcnt * sizeof(struct iovec));
1451 luio.uio_iovcnt = lcnt;
1453 liovec[0].iov_base = ip->i_ea_area;
1454 liovec[0].iov_len = ip->i_ea_len;
1455 for (i = 1, tlen = ea_len; i < lcnt; i++) {
1456 liovec[i].iov_base = __DECONST(void *, zero_region);
1457 liovec[i].iov_len = MIN(ZERO_REGION_SIZE, tlen);
1458 tlen -= liovec[i].iov_len;
1460 MPASS(tlen == ip->i_ea_len);
1462 luio.uio_iov = liovec;
1463 luio.uio_offset = 0;
1464 luio.uio_resid = ea_len;
1465 luio.uio_segflg = UIO_SYSSPACE;
1466 luio.uio_rw = UIO_WRITE;
1468 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1470 if (--ip->i_ea_refs == 0) {
1471 free(ip->i_ea_area, M_TEMP);
1472 ip->i_ea_area = NULL;
1478 if (commit && error == 0 && ip->i_ea_len == 0)
1479 ffs_truncate(vp, 0, IO_EXT, cred);
1484 * Vnode extattr strategy routine for fifos.
1486 * We need to check for a read or write of the external attributes.
1487 * Otherwise we just fall through and do the usual thing.
1490 ffsext_strategy(struct vop_strategy_args *ap)
1492 struct vop_strategy_args {
1493 struct vnodeop_desc *a_desc;
1503 lbn = ap->a_bp->b_lblkno;
1504 if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -UFS_NXADDR)
1505 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1506 if (vp->v_type == VFIFO)
1507 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1508 panic("spec nodes went here");
1512 * Vnode extattr transaction commit/abort
1515 ffs_openextattr(struct vop_openextattr_args *ap)
1517 struct vop_openextattr_args {
1518 struct vnodeop_desc *a_desc;
1520 IN struct ucred *a_cred;
1521 IN struct thread *a_td;
1526 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1527 return (EOPNOTSUPP);
1529 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1533 * Vnode extattr transaction commit/abort
1536 ffs_closeextattr(struct vop_closeextattr_args *ap)
1538 struct vop_closeextattr_args {
1539 struct vnodeop_desc *a_desc;
1542 IN struct ucred *a_cred;
1543 IN struct thread *a_td;
1550 if (vp->v_type == VCHR || vp->v_type == VBLK)
1551 return (EOPNOTSUPP);
1552 if (ap->a_commit && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
1555 return (ffs_close_ea(vp, ap->a_commit, ap->a_cred, ap->a_td));
1559 * Vnode operation to remove a named attribute.
1562 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1565 IN struct vnode *a_vp;
1566 IN int a_attrnamespace;
1567 IN const char *a_name;
1568 IN struct ucred *a_cred;
1569 IN struct thread *a_td;
1575 struct extattr *eap;
1577 int olen, error, i, easize;
1584 if (vp->v_type == VCHR || vp->v_type == VBLK)
1585 return (EOPNOTSUPP);
1586 if (strlen(ap->a_name) == 0)
1588 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1591 error = extattr_check_cred(vp, ap->a_attrnamespace,
1592 ap->a_cred, ap->a_td, VWRITE);
1595 * ffs_lock_ea is not needed there, because the vnode
1596 * must be exclusively locked.
1598 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1599 ip->i_ea_error = error;
1603 error = ffs_open_ea(vp, ap->a_cred, ap->a_td);
1607 /* CEM: delete could be done in-place instead */
1608 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1609 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1610 easize = ip->i_ea_len;
1612 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1615 /* delete but nonexistent */
1617 ffs_close_ea(vp, 0, ap->a_cred, ap->a_td);
1620 ul = eap->ea_length;
1621 i = (u_char *)EXTATTR_NEXT(eap) - eae;
1622 bcopy(EXTATTR_NEXT(eap), eap, easize - i);
1625 tmp = ip->i_ea_area;
1626 ip->i_ea_area = eae;
1627 ip->i_ea_len = easize;
1629 error = ffs_close_ea(vp, 1, ap->a_cred, ap->a_td);
1634 * Vnode operation to retrieve a named extended attribute.
1637 ffs_getextattr(struct vop_getextattr_args *ap)
1640 IN struct vnode *a_vp;
1641 IN int a_attrnamespace;
1642 IN const char *a_name;
1643 INOUT struct uio *a_uio;
1645 IN struct ucred *a_cred;
1646 IN struct thread *a_td;
1655 ip = VTOI(ap->a_vp);
1657 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1658 return (EOPNOTSUPP);
1660 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1661 ap->a_cred, ap->a_td, VREAD);
1665 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1669 eae = ip->i_ea_area;
1670 easize = ip->i_ea_len;
1672 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1676 if (ap->a_size != NULL)
1677 *ap->a_size = ealen;
1678 else if (ap->a_uio != NULL)
1679 error = uiomove(p, ealen, ap->a_uio);
1683 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1688 * Vnode operation to retrieve extended attributes on a vnode.
1691 ffs_listextattr(struct vop_listextattr_args *ap)
1694 IN struct vnode *a_vp;
1695 IN int a_attrnamespace;
1696 INOUT struct uio *a_uio;
1698 IN struct ucred *a_cred;
1699 IN struct thread *a_td;
1704 struct extattr *eap, *eaend;
1707 ip = VTOI(ap->a_vp);
1709 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1710 return (EOPNOTSUPP);
1712 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1713 ap->a_cred, ap->a_td, VREAD);
1717 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1722 if (ap->a_size != NULL)
1725 KASSERT(ALIGNED_TO(ip->i_ea_area, struct extattr), ("unaligned"));
1726 eap = (struct extattr *)ip->i_ea_area;
1727 eaend = (struct extattr *)(ip->i_ea_area + ip->i_ea_len);
1728 for (; error == 0 && eap < eaend; eap = EXTATTR_NEXT(eap)) {
1729 KASSERT(EXTATTR_NEXT(eap) <= eaend,
1730 ("extattr next %p beyond %p", EXTATTR_NEXT(eap), eaend));
1731 if (eap->ea_namespace != ap->a_attrnamespace)
1734 ealen = eap->ea_namelength;
1735 if (ap->a_size != NULL)
1736 *ap->a_size += ealen + 1;
1737 else if (ap->a_uio != NULL)
1738 error = uiomove(&eap->ea_namelength, ealen + 1,
1742 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1747 * Vnode operation to set a named attribute.
1750 ffs_setextattr(struct vop_setextattr_args *ap)
1753 IN struct vnode *a_vp;
1754 IN int a_attrnamespace;
1755 IN const char *a_name;
1756 INOUT struct uio *a_uio;
1757 IN struct ucred *a_cred;
1758 IN struct thread *a_td;
1765 struct extattr *eap;
1766 uint32_t ealength, ul;
1768 int olen, eapad1, eapad2, error, i, easize;
1776 if (vp->v_type == VCHR || vp->v_type == VBLK)
1777 return (EOPNOTSUPP);
1778 if (strlen(ap->a_name) == 0)
1781 /* XXX Now unsupported API to delete EAs using NULL uio. */
1782 if (ap->a_uio == NULL)
1783 return (EOPNOTSUPP);
1785 if (vp->v_mount->mnt_flag & MNT_RDONLY)
1788 ealen = ap->a_uio->uio_resid;
1789 if (ealen < 0 || ealen > lblktosize(fs, UFS_NXADDR))
1792 error = extattr_check_cred(vp, ap->a_attrnamespace,
1793 ap->a_cred, ap->a_td, VWRITE);
1796 * ffs_lock_ea is not needed there, because the vnode
1797 * must be exclusively locked.
1799 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1800 ip->i_ea_error = error;
1804 error = ffs_open_ea(vp, ap->a_cred, ap->a_td);
1808 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1809 eapad1 = roundup2(ealength, 8) - ealength;
1810 eapad2 = roundup2(ealen, 8) - ealen;
1811 ealength += eapad1 + ealen + eapad2;
1814 * CEM: rewrites of the same size or smaller could be done in-place
1815 * instead. (We don't acquire any fine-grained locks in here either,
1816 * so we could also do bigger writes in-place.)
1818 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1819 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1820 easize = ip->i_ea_len;
1822 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1825 /* new, append at end */
1826 KASSERT(ALIGNED_TO(eae + easize, struct extattr),
1828 eap = (struct extattr *)(eae + easize);
1831 ul = eap->ea_length;
1832 i = (u_char *)EXTATTR_NEXT(eap) - eae;
1833 if (ul != ealength) {
1834 bcopy(EXTATTR_NEXT(eap), (u_char *)eap + ealength,
1836 easize += (ealength - ul);
1839 if (easize > lblktosize(fs, UFS_NXADDR)) {
1841 ffs_close_ea(vp, 0, ap->a_cred, ap->a_td);
1842 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1843 ip->i_ea_error = ENOSPC;
1846 eap->ea_length = ealength;
1847 eap->ea_namespace = ap->a_attrnamespace;
1848 eap->ea_contentpadlen = eapad2;
1849 eap->ea_namelength = strlen(ap->a_name);
1850 memcpy(eap->ea_name, ap->a_name, strlen(ap->a_name));
1851 bzero(&eap->ea_name[strlen(ap->a_name)], eapad1);
1852 error = uiomove(EXTATTR_CONTENT(eap), ealen, ap->a_uio);
1855 ffs_close_ea(vp, 0, ap->a_cred, ap->a_td);
1856 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1857 ip->i_ea_error = error;
1860 bzero((u_char *)EXTATTR_CONTENT(eap) + ealen, eapad2);
1862 tmp = ip->i_ea_area;
1863 ip->i_ea_area = eae;
1864 ip->i_ea_len = easize;
1866 error = ffs_close_ea(vp, 1, ap->a_cred, ap->a_td);
1871 * Vnode pointer to File handle
1874 ffs_vptofh(struct vop_vptofh_args *ap)
1877 IN struct vnode *a_vp;
1878 IN struct fid *a_fhp;
1885 ip = VTOI(ap->a_vp);
1886 ufhp = (struct ufid *)ap->a_fhp;
1887 ufhp->ufid_len = sizeof(struct ufid);
1888 ufhp->ufid_ino = ip->i_number;
1889 ufhp->ufid_gen = ip->i_gen;
1893 SYSCTL_DECL(_vfs_ffs);
1894 static int use_buf_pager = 1;
1895 SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0,
1896 "Always use buffer pager instead of bmap");
1899 ffs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
1902 return (lblkno(VFSTOUFS(vp->v_mount)->um_fs, off));
1906 ffs_gbp_getblksz(struct vnode *vp, daddr_t lbn)
1909 return (blksize(VFSTOUFS(vp->v_mount)->um_fs, VTOI(vp), lbn));
1913 ffs_getpages(struct vop_getpages_args *ap)
1916 struct ufsmount *um;
1919 um = VFSTOUFS(vp->v_mount);
1921 if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE)
1922 return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
1923 ap->a_rbehind, ap->a_rahead, NULL, NULL));
1924 return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind,
1925 ap->a_rahead, ffs_gbp_getblkno, ffs_gbp_getblksz));
1929 ffs_getpages_async(struct vop_getpages_async_args *ap)
1932 struct ufsmount *um;
1937 um = VFSTOUFS(vp->v_mount);
1940 if (um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE) {
1941 error = vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
1942 ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg);
1946 error = vfs_bio_getpages(vp, ap->a_m, ap->a_count,
1947 ap->a_rbehind, ap->a_rahead, ffs_gbp_getblkno,
1950 if (do_iodone && ap->a_iodone != NULL)
1951 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
1957 ffs_vput_pair(struct vop_vput_pair_args *ap)
1960 struct vnode *dvp, *vp, *vp1, **vpp;
1961 struct inode *dp, *ip;
1965 int error, vp_locked;
1970 vp = vpp != NULL ? *vpp : NULL;
1972 if ((dp->i_flag & (IN_NEEDSYNC | IN_ENDOFF)) == 0) {
1974 if (vp != NULL && ap->a_unlock_vp)
1981 if (ap->a_unlock_vp) {
1984 MPASS(vp->v_type != VNON);
1985 vp_locked = VOP_ISLOCKED(vp);
1987 ip_ino = ip->i_number;
1994 * If compaction or fsync was requested do it in ffs_vput_pair()
1995 * now that other locks are no longer held.
1997 if ((dp->i_flag & IN_ENDOFF) != 0) {
1998 VNASSERT(I_ENDOFF(dp) != 0 && I_ENDOFF(dp) < dp->i_size, dvp,
1999 ("IN_ENDOFF set but I_ENDOFF() is not"));
2000 dp->i_flag &= ~IN_ENDOFF;
2001 old_size = dp->i_size;
2002 error = UFS_TRUNCATE(dvp, (off_t)I_ENDOFF(dp), IO_NORMAL |
2003 (DOINGASYNC(dvp) ? 0 : IO_SYNC), curthread->td_ucred);
2004 if (error != 0 && error != ERELOOKUP) {
2005 if (!ffs_fsfail_cleanup(VFSTOUFS(mp), error)) {
2007 "IN_ENDOFF: failed to truncate, "
2008 "error %d\n", error);
2011 ufsdirhash_free(dp);
2014 SET_I_ENDOFF(dp, 0);
2016 if ((dp->i_flag & IN_NEEDSYNC) != 0) {
2018 error = ffs_syncvnode(dvp, MNT_WAIT, 0);
2019 } while (error == ERELOOKUP);
2024 if (vp == NULL || ap->a_unlock_vp)
2029 * It is possible that vp is reclaimed at this point. Only
2030 * routines that call us with a_unlock_vp == false can find
2031 * that their vp has been reclaimed. There are three areas
2032 * that are affected:
2033 * 1) vn_open_cred() - later VOPs could fail, but
2034 * dead_open() returns 0 to simulate successful open.
2035 * 2) ffs_snapshot() - creation of snapshot fails with EBADF.
2036 * 3) NFS server (several places) - code is prepared to detect
2037 * and respond to dead vnodes by returning ESTALE.
2039 VOP_LOCK(vp, vp_locked | LK_RETRY);
2040 if (!VN_IS_DOOMED(vp))
2044 * Try harder to recover from reclaimed vp if reclaim was not
2045 * because underlying inode was cleared. We saved inode
2046 * number and inode generation, so we can try to reinstantiate
2047 * exactly same version of inode. If this fails, return
2048 * original doomed vnode and let caller to handle
2051 * Note that callers must keep write started around
2052 * VOP_VPUT_PAIR() calls, so it is safe to use mp without
2056 error = ffs_inotovp(mp, ip_ino, ip_gen, LK_EXCLUSIVE, &vp1,
2057 FFSV_REPLACE_DOOMED);
2059 VOP_LOCK(vp, vp_locked | LK_RETRY);