2 * modified for Lites 1.1
4 * Aug 1995, Godmar Back (gback@cs.utah.edu)
5 * University of Utah, Department of Computer Science
9 * The Regents of the University of California. All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * @(#)ufs_readwrite.c 8.7 (Berkeley) 1/21/94
39 /* XXX TODO: remove these obfuscations (as in ffs_vnops.c). */
40 #define BLKSIZE(a, b, c) blksize(a, b, c)
41 #define FS struct m_ext2fs
43 #define READ ext2_read
44 #define READ_S "ext2_read"
45 #define WRITE ext2_write
46 #define WRITE_S "ext2_write"
49 * Vnode op for reading.
53 struct vop_read_args /* {
67 long size, xfersize, blkoffset;
68 int error, orig_resid, seqcount;
69 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
78 if (uio->uio_rw != UIO_READ)
79 panic("%s: mode", READ_S);
81 if (vp->v_type == VLNK) {
82 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
83 panic("%s: short symlink", READ_S);
84 } else if (vp->v_type != VREG && vp->v_type != VDIR)
85 panic("%s: type %d", READ_S, vp->v_type);
87 orig_resid = uio->uio_resid;
88 KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0"));
91 KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0"));
93 if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize)
95 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
96 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
98 lbn = lblkno(fs, uio->uio_offset);
100 size = BLKSIZE(fs, ip, lbn);
101 blkoffset = blkoff(fs, uio->uio_offset);
103 xfersize = fs->e2fs_fsize - blkoffset;
104 if (uio->uio_resid < xfersize)
105 xfersize = uio->uio_resid;
106 if (bytesinfile < xfersize)
107 xfersize = bytesinfile;
109 if (lblktosize(fs, nextlbn) >= ip->i_size)
110 error = bread(vp, lbn, size, NOCRED, &bp);
111 else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0)
112 error = cluster_read(vp, ip->i_size, lbn, size,
113 NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
114 else if (seqcount > 1) {
115 int nextsize = BLKSIZE(fs, ip, nextlbn);
116 error = breadn(vp, lbn,
117 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
119 error = bread(vp, lbn, size, NOCRED, &bp);
127 * We should only get non-zero b_resid when an I/O error
128 * has occurred, which should cause us to break above.
129 * However, if the short read did not cause an error,
130 * then we want to ensure that we do not uiomove bad
131 * or uninitialized data.
134 if (size < xfersize) {
139 error = uiomove((char *)bp->b_data + blkoffset,
148 if ((error == 0 || uio->uio_resid != orig_resid) &&
149 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
150 ip->i_flag |= IN_ACCESS;
155 * Vnode op for writing.
159 struct vop_write_args /* {
163 struct ucred *a_cred;
173 int blkoffset, error, flags, ioflag, resid, size, seqcount, xfersize;
175 ioflag = ap->a_ioflag;
176 seqcount = ioflag >> IO_SEQSHIFT;
182 if (uio->uio_rw != UIO_WRITE)
183 panic("%s: mode", WRITE_S);
186 switch (vp->v_type) {
188 if (ioflag & IO_APPEND)
189 uio->uio_offset = ip->i_size;
190 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
196 /* XXX differs from ffs -- this is called from ext2_mkdir(). */
197 if ((ioflag & IO_SYNC) == 0)
198 panic("ext2_write: nonsync dir write");
201 panic("ext2_write: type %p %d (%jd,%jd)", (void *)vp,
202 vp->v_type, (intmax_t)uio->uio_offset,
203 (intmax_t)uio->uio_resid);
206 KASSERT(uio->uio_resid >= 0, ("ext2_write: uio->uio_resid < 0"));
207 KASSERT(uio->uio_offset >= 0, ("ext2_write: uio->uio_offset < 0"));
209 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->e2fs_maxfilesize)
212 * Maybe this should be above the vnode op call, but so long as
213 * file servers have no limits, I don't think it matters.
215 if (vn_rlimit_fsize(vp, uio, uio->uio_td))
218 resid = uio->uio_resid;
220 flags = ioflag & IO_SYNC ? B_SYNC : 0;
222 for (error = 0; uio->uio_resid > 0;) {
223 lbn = lblkno(fs, uio->uio_offset);
224 blkoffset = blkoff(fs, uio->uio_offset);
225 xfersize = fs->e2fs_fsize - blkoffset;
226 if (uio->uio_resid < xfersize)
227 xfersize = uio->uio_resid;
228 if (uio->uio_offset + xfersize > ip->i_size)
229 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
232 * Avoid a data-consistency race between write() and mmap()
233 * by ensuring that newly allocated blocks are zeroed. The
234 * race can occur even in the case where the write covers
238 error = ext2_balloc(ip, lbn, blkoffset + xfersize,
239 ap->a_cred, &bp, flags);
242 if (uio->uio_offset + xfersize > ip->i_size)
243 ip->i_size = uio->uio_offset + xfersize;
244 size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
249 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
250 if ((ioflag & IO_VMIO) &&
251 LIST_FIRST(&bp->b_dep) == NULL) /* in ext2fs? */
252 bp->b_flags |= B_RELBUF;
254 if (ioflag & IO_SYNC) {
256 } else if (xfersize + blkoffset == fs->e2fs_fsize) {
257 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
258 bp->b_flags |= B_CLUSTEROK;
259 cluster_write(vp, bp, ip->i_size, seqcount);
264 bp->b_flags |= B_CLUSTEROK;
267 if (error || xfersize == 0)
271 * If we successfully wrote any data, and we are not the superuser
272 * we clear the setuid and setgid bits as a precaution against
274 * XXX too late, the tamperer may have opened the file while we
275 * were writing the data (or before).
276 * XXX too early, if (error && ioflag & IO_UNIT) then we will
279 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
280 ip->i_mode &= ~(ISUID | ISGID);
283 * XXX should truncate to the last successfully written
284 * data if the uiomove() failed.
286 if (ioflag & IO_UNIT) {
287 (void)ext2_truncate(vp, osize,
288 ioflag & IO_SYNC, ap->a_cred, uio->uio_td);
289 uio->uio_offset -= resid - uio->uio_resid;
290 uio->uio_resid = resid;
293 if (uio->uio_resid != resid) {
294 ip->i_flag |= IN_CHANGE | IN_UPDATE;
295 if (ioflag & IO_SYNC)
296 error = ext2_update(vp, 1);