2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
67 #include <sys/param.h>
69 #include <sys/systm.h>
72 #include <sys/extattr.h>
73 #include <sys/kernel.h>
74 #include <sys/limits.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
79 #include <sys/vmmeter.h>
80 #include <sys/vnode.h>
83 #include <vm/vm_extern.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vnode_pager.h>
89 #include <ufs/ufs/extattr.h>
90 #include <ufs/ufs/quota.h>
91 #include <ufs/ufs/inode.h>
92 #include <ufs/ufs/ufs_extern.h>
93 #include <ufs/ufs/ufsmount.h>
95 #include <ufs/ffs/fs.h>
96 #include <ufs/ffs/ffs_extern.h>
97 #include "opt_directio.h"
101 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
103 static vop_fsync_t ffs_fsync;
104 static vop_lock1_t ffs_lock;
105 static vop_getpages_t ffs_getpages;
106 static vop_read_t ffs_read;
107 static vop_write_t ffs_write;
108 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
109 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
111 static vop_strategy_t ffsext_strategy;
112 static vop_closeextattr_t ffs_closeextattr;
113 static vop_deleteextattr_t ffs_deleteextattr;
114 static vop_getextattr_t ffs_getextattr;
115 static vop_listextattr_t ffs_listextattr;
116 static vop_openextattr_t ffs_openextattr;
117 static vop_setextattr_t ffs_setextattr;
118 static vop_vptofh_t ffs_vptofh;
121 /* Global vfs data structures for ufs. */
122 struct vop_vector ffs_vnodeops1 = {
123 .vop_default = &ufs_vnodeops,
124 .vop_fsync = ffs_fsync,
125 .vop_getpages = ffs_getpages,
126 .vop_lock1 = ffs_lock,
127 .vop_read = ffs_read,
128 .vop_reallocblks = ffs_reallocblks,
129 .vop_write = ffs_write,
130 .vop_vptofh = ffs_vptofh,
133 struct vop_vector ffs_fifoops1 = {
134 .vop_default = &ufs_fifoops,
135 .vop_fsync = ffs_fsync,
136 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */
137 .vop_vptofh = ffs_vptofh,
140 /* Global vfs data structures for ufs. */
141 struct vop_vector ffs_vnodeops2 = {
142 .vop_default = &ufs_vnodeops,
143 .vop_fsync = ffs_fsync,
144 .vop_getpages = ffs_getpages,
145 .vop_lock1 = ffs_lock,
146 .vop_read = ffs_read,
147 .vop_reallocblks = ffs_reallocblks,
148 .vop_write = ffs_write,
149 .vop_closeextattr = ffs_closeextattr,
150 .vop_deleteextattr = ffs_deleteextattr,
151 .vop_getextattr = ffs_getextattr,
152 .vop_listextattr = ffs_listextattr,
153 .vop_openextattr = ffs_openextattr,
154 .vop_setextattr = ffs_setextattr,
155 .vop_vptofh = ffs_vptofh,
158 struct vop_vector ffs_fifoops2 = {
159 .vop_default = &ufs_fifoops,
160 .vop_fsync = ffs_fsync,
161 .vop_lock1 = ffs_lock,
162 .vop_reallocblks = ffs_reallocblks,
163 .vop_strategy = ffsext_strategy,
164 .vop_closeextattr = ffs_closeextattr,
165 .vop_deleteextattr = ffs_deleteextattr,
166 .vop_getextattr = ffs_getextattr,
167 .vop_listextattr = ffs_listextattr,
168 .vop_openextattr = ffs_openextattr,
169 .vop_setextattr = ffs_setextattr,
170 .vop_vptofh = ffs_vptofh,
174 * Synch an open file.
178 ffs_fsync(struct vop_fsync_args *ap)
187 error = ffs_syncvnode(vp, ap->a_waitfor, 0);
190 if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
191 error = softdep_fsync(vp);
196 * The softdep_fsync() function may drop vp lock,
197 * allowing for dirty buffers to reappear on the
198 * bo_dirty list. Recheck and resync as needed.
201 if (vp->v_type == VREG && (bo->bo_numoutput > 0 ||
202 bo->bo_dirty.bv_cnt > 0)) {
212 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
219 int error, wait, passes;
222 ip->i_flag &= ~IN_NEEDSYNC;
226 * When doing MNT_WAIT we must first flush all dependencies
229 if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
230 (error = softdep_sync_metadata(vp)) != 0)
234 * Flush all dirty buffers associated with a vnode.
238 wait = 0; /* Always do an async pass first. */
239 lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1));
242 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
243 bp->b_vflags &= ~BV_SCANNED;
244 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
246 * Reasons to skip this buffer: it has already been considered
247 * on this pass, the buffer has dependencies that will cause
248 * it to be redirtied and it has not already been deferred,
249 * or it is already being written.
251 if ((bp->b_vflags & BV_SCANNED) != 0)
253 bp->b_vflags |= BV_SCANNED;
254 /* Flush indirects in order. */
255 if (waitfor == MNT_WAIT && bp->b_lblkno <= -NDADDR &&
256 lbn_level(bp->b_lblkno) >= passes)
258 if (bp->b_lblkno > lbn)
259 panic("ffs_syncvnode: syncing truncated data.");
260 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
263 if ((bp->b_flags & B_DELWRI) == 0)
264 panic("ffs_fsync: not dirty");
266 * Check for dependencies and potentially complete them.
268 if (!LIST_EMPTY(&bp->b_dep) &&
269 (error = softdep_sync_buf(vp, bp,
270 wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
272 if (error != EBUSY) {
276 /* If we deferred once, don't defer again. */
277 if ((bp->b_flags & B_DEFERRED) == 0) {
278 bp->b_flags |= B_DEFERRED;
285 if ((error = bwrite(bp)) != 0)
287 } else if ((bp->b_flags & B_CLUSTEROK)) {
288 (void) vfs_bio_awrite(bp);
295 * Since we may have slept during the I/O, we need
296 * to start from a known point.
299 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
301 if (waitfor != MNT_WAIT) {
303 if ((flags & NO_INO_UPDT) != 0)
306 return (ffs_update(vp, 0));
308 /* Drain IO to see if we're done. */
309 bufobj_wwait(bo, 0, 0);
311 * Block devices associated with filesystems may have new I/O
312 * requests posted for them even if the vnode is locked, so no
313 * amount of trying will get them clean. We make several passes
316 * Regular files may need multiple passes to flush all dependency
317 * work as it is possible that we must write once per indirect
318 * level, once for the leaf, and once for the inode and each of
319 * these will be done with one sync and one async pass.
321 if (bo->bo_dirty.bv_cnt > 0) {
322 /* Write the inode after sync passes to flush deps. */
323 if (wait && DOINGSOFTDEP(vp) && (flags & NO_INO_UPDT) == 0) {
328 /* switch between sync/async. */
330 if (wait == 1 || ++passes < NIADDR + 2)
333 if (!vn_isdisk(vp, NULL))
334 vprint("ffs_fsync: dirty", vp);
339 if ((flags & NO_INO_UPDT) == 0)
340 error = ffs_update(vp, 1);
342 softdep_journal_fsync(VTOI(vp));
348 struct vop_lock1_args /* {
356 #ifndef NO_FFS_SNAPSHOT
362 switch (ap->a_flags & LK_TYPE_MASK) {
369 #ifdef DEBUG_VFS_LOCKS
370 KASSERT(vp->v_holdcnt != 0,
371 ("ffs_lock %p: zero hold count", vp));
374 result = _lockmgr_args(lkp, flags, VI_MTX(vp),
375 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
376 ap->a_file, ap->a_line);
377 if (lkp == vp->v_vnlock || result != 0)
380 * Apparent success, except that the vnode
381 * mutated between snapshot file vnode and
382 * regular file vnode while this process
383 * slept. The lock currently held is not the
384 * right lock. Release it, and try to get the
387 (void) _lockmgr_args(lkp, LK_RELEASE, NULL,
388 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
389 ap->a_file, ap->a_line);
390 if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
391 (LK_INTERLOCK | LK_NOWAIT))
393 if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
394 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
395 flags &= ~LK_INTERLOCK;
399 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
403 return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
408 * Vnode op for reading.
412 struct vop_read_args /* {
416 struct ucred *a_cred;
424 ufs_lbn_t lbn, nextlbn;
426 long size, xfersize, blkoffset;
434 ioflag = ap->a_ioflag;
435 if (ap->a_ioflag & IO_EXT)
437 return (ffs_extread(vp, uio, ioflag));
439 panic("ffs_read+IO_EXT");
442 if ((ioflag & IO_DIRECT) != 0) {
445 error = ffs_rawread(vp, uio, &workdone);
446 if (error != 0 || workdone != 0)
451 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
455 if (uio->uio_rw != UIO_READ)
456 panic("ffs_read: mode");
458 if (vp->v_type == VLNK) {
459 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
460 panic("ffs_read: short symlink");
461 } else if (vp->v_type != VREG && vp->v_type != VDIR)
462 panic("ffs_read: type %d", vp->v_type);
464 orig_resid = uio->uio_resid;
465 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
468 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
470 if (uio->uio_offset < ip->i_size &&
471 uio->uio_offset >= fs->fs_maxfilesize)
474 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
475 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
477 lbn = lblkno(fs, uio->uio_offset);
481 * size of buffer. The buffer representing the
482 * end of the file is rounded up to the size of
483 * the block type ( fragment or full block,
486 size = blksize(fs, ip, lbn);
487 blkoffset = blkoff(fs, uio->uio_offset);
490 * The amount we want to transfer in this iteration is
491 * one FS block less the amount of the data before
492 * our startpoint (duh!)
494 xfersize = fs->fs_bsize - blkoffset;
497 * But if we actually want less than the block,
498 * or the file doesn't have a whole block more of data,
499 * then use the lesser number.
501 if (uio->uio_resid < xfersize)
502 xfersize = uio->uio_resid;
503 if (bytesinfile < xfersize)
504 xfersize = bytesinfile;
506 if (lblktosize(fs, nextlbn) >= ip->i_size) {
508 * Don't do readahead if this is the end of the file.
510 error = bread(vp, lbn, size, NOCRED, &bp);
511 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
513 * Otherwise if we are allowed to cluster,
514 * grab as much as we can.
516 * XXX This may not be a win if we are not
517 * doing sequential access.
519 error = cluster_read(vp, ip->i_size, lbn,
520 size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
521 } else if (seqcount > 1) {
523 * If we are NOT allowed to cluster, then
524 * if we appear to be acting sequentially,
525 * fire off a request for a readahead
526 * as well as a read. Note that the 4th and 5th
527 * arguments point to arrays of the size specified in
530 int nextsize = blksize(fs, ip, nextlbn);
531 error = breadn(vp, lbn,
532 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
535 * Failing all of the above, just read what the
536 * user asked for. Interestingly, the same as
537 * the first option above.
539 error = bread(vp, lbn, size, NOCRED, &bp);
548 * If IO_DIRECT then set B_DIRECT for the buffer. This
549 * will cause us to attempt to release the buffer later on
550 * and will cause the buffer cache to attempt to free the
553 if (ioflag & IO_DIRECT)
554 bp->b_flags |= B_DIRECT;
557 * We should only get non-zero b_resid when an I/O error
558 * has occurred, which should cause us to break above.
559 * However, if the short read did not cause an error,
560 * then we want to ensure that we do not uiomove bad
561 * or uninitialized data.
564 if (size < xfersize) {
570 error = vn_io_fault_uiomove((char *)bp->b_data + blkoffset,
575 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
576 (LIST_EMPTY(&bp->b_dep))) {
578 * If there are no dependencies, and it's VMIO,
579 * then we don't need the buf, mark it available
580 * for freeing. For non-direct VMIO reads, the VM
583 bp->b_flags |= B_RELBUF;
587 * Otherwise let whoever
588 * made the request take care of
589 * freeing it. We just queue
590 * it onto another list.
597 * This can only happen in the case of an error
598 * because the loop above resets bp to NULL on each iteration
599 * and on normal completion has not set a new value into it.
600 * so it must have come from a 'break' statement
603 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
604 (LIST_EMPTY(&bp->b_dep))) {
605 bp->b_flags |= B_RELBUF;
612 if ((error == 0 || uio->uio_resid != orig_resid) &&
613 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 &&
614 (ip->i_flag & IN_ACCESS) == 0) {
616 ip->i_flag |= IN_ACCESS;
623 * Vnode op for writing.
627 struct vop_write_args /* {
631 struct ucred *a_cred;
643 int blkoffset, error, flags, ioflag, size, xfersize;
647 ioflag = ap->a_ioflag;
648 if (ap->a_ioflag & IO_EXT)
650 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
652 panic("ffs_write+IO_EXT");
655 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
659 if (uio->uio_rw != UIO_WRITE)
660 panic("ffs_write: mode");
663 switch (vp->v_type) {
665 if (ioflag & IO_APPEND)
666 uio->uio_offset = ip->i_size;
667 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
673 panic("ffs_write: dir write");
676 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
677 (int)uio->uio_offset,
682 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
683 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
685 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
688 * Maybe this should be above the vnode op call, but so long as
689 * file servers have no limits, I don't think it matters.
691 if (vn_rlimit_fsize(vp, uio, uio->uio_td))
694 resid = uio->uio_resid;
696 if (seqcount > BA_SEQMAX)
697 flags = BA_SEQMAX << BA_SEQSHIFT;
699 flags = seqcount << BA_SEQSHIFT;
700 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
703 for (error = 0; uio->uio_resid > 0;) {
704 lbn = lblkno(fs, uio->uio_offset);
705 blkoffset = blkoff(fs, uio->uio_offset);
706 xfersize = fs->fs_bsize - blkoffset;
707 if (uio->uio_resid < xfersize)
708 xfersize = uio->uio_resid;
709 if (uio->uio_offset + xfersize > ip->i_size)
710 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
713 * We must perform a read-before-write if the transfer size
714 * does not cover the entire buffer.
716 if (fs->fs_bsize > xfersize)
720 /* XXX is uio->uio_offset the right thing here? */
721 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
722 ap->a_cred, flags, &bp);
724 vnode_pager_setsize(vp, ip->i_size);
727 if (ioflag & IO_DIRECT)
728 bp->b_flags |= B_DIRECT;
729 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
730 bp->b_flags |= B_NOCACHE;
732 if (uio->uio_offset + xfersize > ip->i_size) {
733 ip->i_size = uio->uio_offset + xfersize;
734 DIP_SET(ip, i_size, ip->i_size);
737 size = blksize(fs, ip, lbn) - bp->b_resid;
741 error = vn_io_fault_uiomove((char *)bp->b_data + blkoffset,
744 * If the buffer is not already filled and we encounter an
745 * error while trying to fill it, we have to clear out any
746 * garbage data from the pages instantiated for the buffer.
747 * If we do not, a failed uiomove() during a write can leave
748 * the prior contents of the pages exposed to a userland mmap.
750 * Note that we need only clear buffers with a transfer size
751 * equal to the block size because buffers with a shorter
752 * transfer size were cleared above by the call to UFS_BALLOC()
753 * with the BA_CLRBUF flag set.
755 * If the source region for uiomove identically mmaps the
756 * buffer, uiomove() performed the NOP copy, and the buffer
757 * content remains valid because the page fault handler
758 * validated the pages.
760 if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
761 fs->fs_bsize == xfersize)
763 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
764 (LIST_EMPTY(&bp->b_dep))) {
765 bp->b_flags |= B_RELBUF;
769 * If IO_SYNC each buffer is written synchronously. Otherwise
770 * if we have a severe page deficiency write the buffer
771 * asynchronously. Otherwise try to cluster, and if that
772 * doesn't do it then either do an async write (if O_DIRECT),
773 * or a delayed write (if not).
775 if (ioflag & IO_SYNC) {
777 } else if (vm_page_count_severe() ||
778 buf_dirty_count_severe() ||
779 (ioflag & IO_ASYNC)) {
780 bp->b_flags |= B_CLUSTEROK;
782 } else if (xfersize + blkoffset == fs->fs_bsize) {
783 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
784 bp->b_flags |= B_CLUSTEROK;
785 cluster_write(vp, bp, ip->i_size, seqcount);
789 } else if (ioflag & IO_DIRECT) {
790 bp->b_flags |= B_CLUSTEROK;
793 bp->b_flags |= B_CLUSTEROK;
796 if (error || xfersize == 0)
798 ip->i_flag |= IN_CHANGE | IN_UPDATE;
801 * If we successfully wrote any data, and we are not the superuser
802 * we clear the setuid and setgid bits as a precaution against
805 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
807 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
808 ip->i_mode &= ~(ISUID | ISGID);
809 DIP_SET(ip, i_mode, ip->i_mode);
813 if (ioflag & IO_UNIT) {
814 (void)ffs_truncate(vp, osize,
815 IO_NORMAL | (ioflag & IO_SYNC),
816 ap->a_cred, uio->uio_td);
817 uio->uio_offset -= resid - uio->uio_resid;
818 uio->uio_resid = resid;
820 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
821 error = ffs_update(vp, 1);
830 struct vop_getpages_args *ap;
836 pcount = round_page(ap->a_count) / PAGE_SIZE;
837 mreq = ap->a_m[ap->a_reqpage];
840 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
841 * then the entire page is valid. Since the page may be mapped,
842 * user programs might reference data beyond the actual end of file
843 * occuring within the page. We have to zero that data.
845 VM_OBJECT_LOCK(mreq->object);
847 if (mreq->valid != VM_PAGE_BITS_ALL)
848 vm_page_zero_invalid(mreq, TRUE);
849 for (i = 0; i < pcount; i++) {
850 if (i != ap->a_reqpage) {
851 vm_page_lock(ap->a_m[i]);
852 vm_page_free(ap->a_m[i]);
853 vm_page_unlock(ap->a_m[i]);
856 VM_OBJECT_UNLOCK(mreq->object);
859 VM_OBJECT_UNLOCK(mreq->object);
861 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
868 * Extended attribute area reading.
871 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
874 struct ufs2_dinode *dp;
877 ufs_lbn_t lbn, nextlbn;
879 long size, xfersize, blkoffset;
888 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
889 panic("ffs_extread: mode");
892 orig_resid = uio->uio_resid;
893 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
896 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
898 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
899 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
901 lbn = lblkno(fs, uio->uio_offset);
905 * size of buffer. The buffer representing the
906 * end of the file is rounded up to the size of
907 * the block type ( fragment or full block,
910 size = sblksize(fs, dp->di_extsize, lbn);
911 blkoffset = blkoff(fs, uio->uio_offset);
914 * The amount we want to transfer in this iteration is
915 * one FS block less the amount of the data before
916 * our startpoint (duh!)
918 xfersize = fs->fs_bsize - blkoffset;
921 * But if we actually want less than the block,
922 * or the file doesn't have a whole block more of data,
923 * then use the lesser number.
925 if (uio->uio_resid < xfersize)
926 xfersize = uio->uio_resid;
927 if (bytesinfile < xfersize)
928 xfersize = bytesinfile;
930 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
932 * Don't do readahead if this is the end of the info.
934 error = bread(vp, -1 - lbn, size, NOCRED, &bp);
937 * If we have a second block, then
938 * fire off a request for a readahead
939 * as well as a read. Note that the 4th and 5th
940 * arguments point to arrays of the size specified in
943 int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
945 nextlbn = -1 - nextlbn;
946 error = breadn(vp, -1 - lbn,
947 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
956 * If IO_DIRECT then set B_DIRECT for the buffer. This
957 * will cause us to attempt to release the buffer later on
958 * and will cause the buffer cache to attempt to free the
961 if (ioflag & IO_DIRECT)
962 bp->b_flags |= B_DIRECT;
965 * We should only get non-zero b_resid when an I/O error
966 * has occurred, which should cause us to break above.
967 * However, if the short read did not cause an error,
968 * then we want to ensure that we do not uiomove bad
969 * or uninitialized data.
972 if (size < xfersize) {
978 error = uiomove((char *)bp->b_data + blkoffset,
983 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
984 (LIST_EMPTY(&bp->b_dep))) {
986 * If there are no dependencies, and it's VMIO,
987 * then we don't need the buf, mark it available
988 * for freeing. For non-direct VMIO reads, the VM
991 bp->b_flags |= B_RELBUF;
995 * Otherwise let whoever
996 * made the request take care of
997 * freeing it. We just queue
998 * it onto another list.
1005 * This can only happen in the case of an error
1006 * because the loop above resets bp to NULL on each iteration
1007 * and on normal completion has not set a new value into it.
1008 * so it must have come from a 'break' statement
1011 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1012 (LIST_EMPTY(&bp->b_dep))) {
1013 bp->b_flags |= B_RELBUF;
1023 * Extended attribute area writing.
1026 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
1029 struct ufs2_dinode *dp;
1035 int blkoffset, error, flags, size, xfersize;
1042 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
1043 panic("ffs_extwrite: mode");
1046 if (ioflag & IO_APPEND)
1047 uio->uio_offset = dp->di_extsize;
1048 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1049 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1050 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
1053 resid = uio->uio_resid;
1054 osize = dp->di_extsize;
1056 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
1059 for (error = 0; uio->uio_resid > 0;) {
1060 lbn = lblkno(fs, uio->uio_offset);
1061 blkoffset = blkoff(fs, uio->uio_offset);
1062 xfersize = fs->fs_bsize - blkoffset;
1063 if (uio->uio_resid < xfersize)
1064 xfersize = uio->uio_resid;
1067 * We must perform a read-before-write if the transfer size
1068 * does not cover the entire buffer.
1070 if (fs->fs_bsize > xfersize)
1073 flags &= ~BA_CLRBUF;
1074 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1079 * If the buffer is not valid we have to clear out any
1080 * garbage data from the pages instantiated for the buffer.
1081 * If we do not, a failed uiomove() during a write can leave
1082 * the prior contents of the pages exposed to a userland
1083 * mmap(). XXX deal with uiomove() errors a better way.
1085 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1087 if (ioflag & IO_DIRECT)
1088 bp->b_flags |= B_DIRECT;
1090 if (uio->uio_offset + xfersize > dp->di_extsize)
1091 dp->di_extsize = uio->uio_offset + xfersize;
1093 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1094 if (size < xfersize)
1098 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1099 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1100 (LIST_EMPTY(&bp->b_dep))) {
1101 bp->b_flags |= B_RELBUF;
1105 * If IO_SYNC each buffer is written synchronously. Otherwise
1106 * if we have a severe page deficiency write the buffer
1107 * asynchronously. Otherwise try to cluster, and if that
1108 * doesn't do it then either do an async write (if O_DIRECT),
1109 * or a delayed write (if not).
1111 if (ioflag & IO_SYNC) {
1113 } else if (vm_page_count_severe() ||
1114 buf_dirty_count_severe() ||
1115 xfersize + blkoffset == fs->fs_bsize ||
1116 (ioflag & (IO_ASYNC | IO_DIRECT)))
1120 if (error || xfersize == 0)
1122 ip->i_flag |= IN_CHANGE;
1125 * If we successfully wrote any data, and we are not the superuser
1126 * we clear the setuid and setgid bits as a precaution against
1129 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1130 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1131 ip->i_mode &= ~(ISUID | ISGID);
1132 dp->di_mode = ip->i_mode;
1136 if (ioflag & IO_UNIT) {
1137 (void)ffs_truncate(vp, osize,
1138 IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td);
1139 uio->uio_offset -= resid - uio->uio_resid;
1140 uio->uio_resid = resid;
1142 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1143 error = ffs_update(vp, 1);
1149 * Vnode operating to retrieve a named extended attribute.
1151 * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1152 * the length of the EA, and possibly the pointer to the entry and to the data.
1155 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
1157 u_char *p, *pe, *pn, *p0;
1158 int eapad1, eapad2, ealength, ealen, nlen;
1162 nlen = strlen(name);
1164 for (p = ptr; p < pe; p = pn) {
1166 bcopy(p, &ul, sizeof(ul));
1168 /* make sure this entry is complete */
1171 p += sizeof(uint32_t);
1179 if (bcmp(p, name, nlen))
1181 ealength = sizeof(uint32_t) + 3 + nlen;
1182 eapad1 = 8 - (ealength % 8);
1186 ealen = ul - ealength - eapad2;
1198 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1201 struct ufs2_dinode *dp;
1204 struct iovec liovec;
1211 easize = dp->di_extsize;
1212 if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
1215 eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1217 liovec.iov_base = eae;
1218 liovec.iov_len = easize;
1219 luio.uio_iov = &liovec;
1220 luio.uio_iovcnt = 1;
1221 luio.uio_offset = 0;
1222 luio.uio_resid = easize;
1223 luio.uio_segflg = UIO_SYSSPACE;
1224 luio.uio_rw = UIO_READ;
1227 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1237 ffs_lock_ea(struct vnode *vp)
1243 while (ip->i_flag & IN_EA_LOCKED) {
1244 ip->i_flag |= IN_EA_LOCKWAIT;
1245 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
1248 ip->i_flag |= IN_EA_LOCKED;
1253 ffs_unlock_ea(struct vnode *vp)
1259 if (ip->i_flag & IN_EA_LOCKWAIT)
1260 wakeup(&ip->i_ea_refs);
1261 ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
1266 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1269 struct ufs2_dinode *dp;
1275 if (ip->i_ea_area != NULL) {
1281 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1286 ip->i_ea_len = dp->di_extsize;
1294 * Vnode extattr transaction commit/abort
1297 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1301 struct iovec liovec;
1303 struct ufs2_dinode *dp;
1308 if (ip->i_ea_area == NULL) {
1313 error = ip->i_ea_error;
1314 if (commit && error == 0) {
1315 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
1317 cred = vp->v_mount->mnt_cred;
1318 liovec.iov_base = ip->i_ea_area;
1319 liovec.iov_len = ip->i_ea_len;
1320 luio.uio_iov = &liovec;
1321 luio.uio_iovcnt = 1;
1322 luio.uio_offset = 0;
1323 luio.uio_resid = ip->i_ea_len;
1324 luio.uio_segflg = UIO_SYSSPACE;
1325 luio.uio_rw = UIO_WRITE;
1327 /* XXX: I'm not happy about truncating to zero size */
1328 if (ip->i_ea_len < dp->di_extsize)
1329 error = ffs_truncate(vp, 0, IO_EXT, cred, td);
1330 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1332 if (--ip->i_ea_refs == 0) {
1333 free(ip->i_ea_area, M_TEMP);
1334 ip->i_ea_area = NULL;
1343 * Vnode extattr strategy routine for fifos.
1345 * We need to check for a read or write of the external attributes.
1346 * Otherwise we just fall through and do the usual thing.
1349 ffsext_strategy(struct vop_strategy_args *ap)
1351 struct vop_strategy_args {
1352 struct vnodeop_desc *a_desc;
1362 lbn = ap->a_bp->b_lblkno;
1363 if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC &&
1364 lbn < 0 && lbn >= -NXADDR)
1365 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1366 if (vp->v_type == VFIFO)
1367 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1368 panic("spec nodes went here");
1372 * Vnode extattr transaction commit/abort
1375 ffs_openextattr(struct vop_openextattr_args *ap)
1377 struct vop_openextattr_args {
1378 struct vnodeop_desc *a_desc;
1380 IN struct ucred *a_cred;
1381 IN struct thread *a_td;
1388 ip = VTOI(ap->a_vp);
1391 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1392 return (EOPNOTSUPP);
1394 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1399 * Vnode extattr transaction commit/abort
1402 ffs_closeextattr(struct vop_closeextattr_args *ap)
1404 struct vop_closeextattr_args {
1405 struct vnodeop_desc *a_desc;
1408 IN struct ucred *a_cred;
1409 IN struct thread *a_td;
1416 ip = VTOI(ap->a_vp);
1419 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1420 return (EOPNOTSUPP);
1422 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1425 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1429 * Vnode operation to remove a named attribute.
1432 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1435 IN struct vnode *a_vp;
1436 IN int a_attrnamespace;
1437 IN const char *a_name;
1438 IN struct ucred *a_cred;
1439 IN struct thread *a_td;
1445 uint32_t ealength, ul;
1446 int ealen, olen, eapad1, eapad2, error, i, easize;
1449 ip = VTOI(ap->a_vp);
1452 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1453 return (EOPNOTSUPP);
1455 if (strlen(ap->a_name) == 0)
1458 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1461 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1462 ap->a_cred, ap->a_td, VWRITE);
1466 * ffs_lock_ea is not needed there, because the vnode
1467 * must be exclusively locked.
1469 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1470 ip->i_ea_error = error;
1474 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1478 ealength = eapad1 = ealen = eapad2 = 0;
1480 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1481 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1482 easize = ip->i_ea_len;
1484 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1487 /* delete but nonexistent */
1489 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1492 bcopy(p, &ul, sizeof ul);
1494 if (ul != ealength) {
1495 bcopy(p + ul, p + ealength, easize - i);
1496 easize += (ealength - ul);
1498 if (easize > NXADDR * fs->fs_bsize) {
1500 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1501 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1502 ip->i_ea_error = ENOSPC;
1506 ip->i_ea_area = eae;
1507 ip->i_ea_len = easize;
1509 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1514 * Vnode operation to retrieve a named extended attribute.
1517 ffs_getextattr(struct vop_getextattr_args *ap)
1520 IN struct vnode *a_vp;
1521 IN int a_attrnamespace;
1522 IN const char *a_name;
1523 INOUT struct uio *a_uio;
1525 IN struct ucred *a_cred;
1526 IN struct thread *a_td;
1536 ip = VTOI(ap->a_vp);
1539 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1540 return (EOPNOTSUPP);
1542 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1543 ap->a_cred, ap->a_td, VREAD);
1547 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1551 eae = ip->i_ea_area;
1552 easize = ip->i_ea_len;
1554 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1558 if (ap->a_size != NULL)
1559 *ap->a_size = ealen;
1560 else if (ap->a_uio != NULL)
1561 error = uiomove(p, ealen, ap->a_uio);
1565 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1570 * Vnode operation to retrieve extended attributes on a vnode.
1573 ffs_listextattr(struct vop_listextattr_args *ap)
1576 IN struct vnode *a_vp;
1577 IN int a_attrnamespace;
1578 INOUT struct uio *a_uio;
1580 IN struct ucred *a_cred;
1581 IN struct thread *a_td;
1587 u_char *eae, *p, *pe, *pn;
1592 ip = VTOI(ap->a_vp);
1595 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1596 return (EOPNOTSUPP);
1598 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1599 ap->a_cred, ap->a_td, VREAD);
1603 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1606 eae = ip->i_ea_area;
1607 easize = ip->i_ea_len;
1610 if (ap->a_size != NULL)
1613 for(p = eae; error == 0 && p < pe; p = pn) {
1614 bcopy(p, &ul, sizeof(ul));
1619 if (*p++ != ap->a_attrnamespace)
1623 if (ap->a_size != NULL) {
1624 *ap->a_size += ealen + 1;
1625 } else if (ap->a_uio != NULL) {
1626 error = uiomove(p, ealen + 1, ap->a_uio);
1629 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1634 * Vnode operation to set a named attribute.
1637 ffs_setextattr(struct vop_setextattr_args *ap)
1640 IN struct vnode *a_vp;
1641 IN int a_attrnamespace;
1642 IN const char *a_name;
1643 INOUT struct uio *a_uio;
1644 IN struct ucred *a_cred;
1645 IN struct thread *a_td;
1651 uint32_t ealength, ul;
1653 int olen, eapad1, eapad2, error, i, easize;
1656 ip = VTOI(ap->a_vp);
1659 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1660 return (EOPNOTSUPP);
1662 if (strlen(ap->a_name) == 0)
1665 /* XXX Now unsupported API to delete EAs using NULL uio. */
1666 if (ap->a_uio == NULL)
1667 return (EOPNOTSUPP);
1669 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1672 ealen = ap->a_uio->uio_resid;
1673 if (ealen < 0 || ealen > lblktosize(fs, NXADDR))
1676 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1677 ap->a_cred, ap->a_td, VWRITE);
1681 * ffs_lock_ea is not needed there, because the vnode
1682 * must be exclusively locked.
1684 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1685 ip->i_ea_error = error;
1689 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1693 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1694 eapad1 = 8 - (ealength % 8);
1697 eapad2 = 8 - (ealen % 8);
1700 ealength += eapad1 + ealen + eapad2;
1702 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1703 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1704 easize = ip->i_ea_len;
1706 olen = ffs_findextattr(eae, easize,
1707 ap->a_attrnamespace, ap->a_name, &p, NULL);
1709 /* new, append at end */
1713 bcopy(p, &ul, sizeof ul);
1715 if (ul != ealength) {
1716 bcopy(p + ul, p + ealength, easize - i);
1717 easize += (ealength - ul);
1720 if (easize > lblktosize(fs, NXADDR)) {
1722 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1723 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1724 ip->i_ea_error = ENOSPC;
1727 bcopy(&ealength, p, sizeof(ealength));
1728 p += sizeof(ealength);
1729 *p++ = ap->a_attrnamespace;
1731 *p++ = strlen(ap->a_name);
1732 strcpy(p, ap->a_name);
1733 p += strlen(ap->a_name);
1736 error = uiomove(p, ealen, ap->a_uio);
1739 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1740 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1741 ip->i_ea_error = error;
1748 ip->i_ea_area = eae;
1749 ip->i_ea_len = easize;
1751 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1756 * Vnode pointer to File handle
1759 ffs_vptofh(struct vop_vptofh_args *ap)
1762 IN struct vnode *a_vp;
1763 IN struct fid *a_fhp;
1770 ip = VTOI(ap->a_vp);
1771 ufhp = (struct ufid *)ap->a_fhp;
1772 ufhp->ufid_len = sizeof(struct ufid);
1773 ufhp->ufid_ino = ip->i_number;
1774 ufhp->ufid_gen = ip->i_gen;