3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
37 #define BLKSIZE(a, b, c) blksize(a, b, c)
41 #define READ_S "ffs_read"
42 #define WRITE ffs_write
43 #define WRITE_S "ffs_write"
46 #include <vm/vm_object.h>
47 #include <vm/vm_pager.h>
48 #include <vm/vm_map.h>
49 #include <vm/vnode_pager.h>
50 #include <sys/event.h>
51 #include <sys/vmmeter.h>
54 * Vnode op for reading.
59 struct vop_read_args /* {
66 register struct vnode *vp;
67 register struct inode *ip;
68 register struct uio *uio;
71 ufs_daddr_t lbn, nextlbn;
73 long size, xfersize, blkoffset;
74 int error, orig_resid;
83 seqcount = ap->a_ioflag >> 16;
87 ioflag = ap->a_ioflag;
90 if (uio->uio_rw != UIO_READ)
91 panic("%s: mode", READ_S);
93 if (vp->v_type == VLNK) {
94 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
95 panic("%s: short symlink", READ_S);
96 } else if (vp->v_type != VREG && vp->v_type != VDIR)
97 panic("%s: type %d", READ_S, vp->v_type);
100 if ((u_int64_t)uio->uio_offset > fs->fs_maxfilesize)
103 orig_resid = uio->uio_resid;
107 object = vp->v_object;
109 bytesinfile = ip->i_size - uio->uio_offset;
110 if (bytesinfile <= 0) {
111 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
112 ip->i_flag |= IN_ACCESS;
117 vm_object_reference(object);
120 #ifdef ENABLE_VFS_IOOPT
122 * If IO optimisation is turned on,
123 * and we are NOT a VM based IO request,
124 * (i.e. not headed for the buffer cache)
125 * but there IS a vm object associated with it.
127 if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) {
130 toread = uio->uio_resid;
131 if (toread > bytesinfile)
132 toread = bytesinfile;
133 if (toread >= PAGE_SIZE) {
135 * Then if it's at least a page in size, try
136 * get the data from the object using vm tricks
138 error = uioread(toread, uio, object, &nread);
139 if ((uio->uio_resid == 0) || (error != 0)) {
141 * If we finished or there was an error
142 * then finish up (the reference previously
143 * obtained on object must be released).
146 uio->uio_resid != orig_resid) &&
147 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
148 ip->i_flag |= IN_ACCESS;
151 vm_object_vndeallocate(object);
160 * Ok so we couldn't do it all in one vm trick...
161 * so cycle around trying smaller bites..
163 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
164 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
166 #ifdef ENABLE_VFS_IOOPT
167 if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) {
169 * Obviously we didn't finish above, but we
170 * didn't get an error either. Try the same trick again.
171 * but this time we are looping.
174 toread = uio->uio_resid;
175 if (toread > bytesinfile)
176 toread = bytesinfile;
179 * Once again, if there isn't enough for a
180 * whole page, don't try optimising.
182 if (toread >= PAGE_SIZE) {
183 error = uioread(toread, uio, object, &nread);
184 if ((uio->uio_resid == 0) || (error != 0)) {
186 * If we finished or there was an
187 * error then finish up (the reference
188 * previously obtained on object must
192 uio->uio_resid != orig_resid) &&
193 (vp->v_mount->mnt_flag &
195 ip->i_flag |= IN_ACCESS;
197 vm_object_vndeallocate(object);
202 * To get here we didnt't finish or err.
203 * If we did get some data,
204 * loop to try another bite.
213 lbn = lblkno(fs, uio->uio_offset);
217 * size of buffer. The buffer representing the
218 * end of the file is rounded up to the size of
219 * the block type ( fragment or full block,
222 size = BLKSIZE(fs, ip, lbn);
223 blkoffset = blkoff(fs, uio->uio_offset);
226 * The amount we want to transfer in this iteration is
227 * one FS block less the amount of the data before
228 * our startpoint (duh!)
230 xfersize = fs->fs_bsize - blkoffset;
233 * But if we actually want less than the block,
234 * or the file doesn't have a whole block more of data,
235 * then use the lesser number.
237 if (uio->uio_resid < xfersize)
238 xfersize = uio->uio_resid;
239 if (bytesinfile < xfersize)
240 xfersize = bytesinfile;
242 if (lblktosize(fs, nextlbn) >= ip->i_size) {
244 * Don't do readahead if this is the end of the file.
246 error = bread(vp, lbn, size, NOCRED, &bp);
247 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
249 * Otherwise if we are allowed to cluster,
250 * grab as much as we can.
252 * XXX This may not be a win if we are not
253 * doing sequential access.
255 error = cluster_read(vp, ip->i_size, lbn,
256 size, NOCRED, uio->uio_resid, seqcount, &bp);
257 } else if (seqcount > 1) {
259 * If we are NOT allowed to cluster, then
260 * if we appear to be acting sequentially,
261 * fire off a request for a readahead
262 * as well as a read. Note that the 4th and 5th
263 * arguments point to arrays of the size specified in
266 int nextsize = BLKSIZE(fs, ip, nextlbn);
267 error = breadn(vp, lbn,
268 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
271 * Failing all of the above, just read what the
272 * user asked for. Interestingly, the same as
273 * the first option above.
275 error = bread(vp, lbn, size, NOCRED, &bp);
284 * If IO_DIRECT then set B_DIRECT for the buffer. This
285 * will cause us to attempt to release the buffer later on
286 * and will cause the buffer cache to attempt to free the
289 if (ioflag & IO_DIRECT)
290 bp->b_flags |= B_DIRECT;
293 * We should only get non-zero b_resid when an I/O error
294 * has occurred, which should cause us to break above.
295 * However, if the short read did not cause an error,
296 * then we want to ensure that we do not uiomove bad
297 * or uninitialized data.
300 if (size < xfersize) {
306 #ifdef ENABLE_VFS_IOOPT
307 if (vfs_ioopt && object &&
308 (bp->b_flags & B_VMIO) &&
309 ((blkoffset & PAGE_MASK) == 0) &&
310 ((xfersize & PAGE_MASK) == 0)) {
312 * If VFS IO optimisation is turned on,
313 * and it's an exact page multiple
314 * And a normal VM based op,
315 * then use uiomiveco()
318 uiomoveco((char *)bp->b_data + blkoffset,
319 (int)xfersize, uio, object);
324 * otherwise use the general form
327 uiomove((char *)bp->b_data + blkoffset,
334 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
335 (LIST_FIRST(&bp->b_dep) == NULL)) {
337 * If there are no dependencies, and it's VMIO,
338 * then we don't need the buf, mark it available
339 * for freeing. The VM has the data.
341 bp->b_flags |= B_RELBUF;
345 * Otherwise let whoever
346 * made the request take care of
347 * freeing it. We just queue
348 * it onto another list.
355 * This can only happen in the case of an error
356 * because the loop above resets bp to NULL on each iteration
357 * and on normal completion has not set a new value into it.
358 * so it must have come from a 'break' statement
361 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
362 (LIST_FIRST(&bp->b_dep) == NULL)) {
363 bp->b_flags |= B_RELBUF;
371 vm_object_vndeallocate(object);
373 if ((error == 0 || uio->uio_resid != orig_resid) &&
374 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
375 ip->i_flag |= IN_ACCESS;
380 * Vnode op for writing.
384 struct vop_write_args /* {
388 struct ucred *a_cred;
391 register struct vnode *vp;
392 register struct uio *uio;
393 register struct inode *ip;
400 int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
406 seqcount = ap->a_ioflag >> 16;
407 ioflag = ap->a_ioflag;
412 object = vp->v_object;
414 vm_object_reference(object);
418 if (uio->uio_rw != UIO_WRITE)
419 panic("%s: mode", WRITE_S);
422 switch (vp->v_type) {
424 if (ioflag & IO_APPEND)
425 uio->uio_offset = ip->i_size;
426 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) {
428 vm_object_vndeallocate(object);
436 panic("%s: dir write", WRITE_S);
439 panic("%s: type %p %d (%d,%d)", WRITE_S, vp, (int)vp->v_type,
440 (int)uio->uio_offset,
446 if (uio->uio_offset < 0 ||
447 (u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) {
449 vm_object_vndeallocate(object);
454 * Maybe this should be above the vnode op call, but so long as
455 * file servers have no limits, I don't think it matters.
458 if (vp->v_type == VREG && td &&
459 uio->uio_offset + uio->uio_resid >
460 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
461 PROC_LOCK(td->td_proc);
462 psignal(td->td_proc, SIGXFSZ);
463 PROC_UNLOCK(td->td_proc);
465 vm_object_vndeallocate(object);
470 resid = uio->uio_resid;
473 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
476 #ifdef ENABLE_VFS_IOOPT
477 if (object && (object->flags & OBJ_OPT)) {
478 vm_freeze_copyopts(object,
479 OFF_TO_IDX(uio->uio_offset),
480 OFF_TO_IDX(uio->uio_offset + uio->uio_resid + PAGE_MASK));
483 for (error = 0; uio->uio_resid > 0;) {
484 lbn = lblkno(fs, uio->uio_offset);
485 blkoffset = blkoff(fs, uio->uio_offset);
486 xfersize = fs->fs_bsize - blkoffset;
487 if (uio->uio_resid < xfersize)
488 xfersize = uio->uio_resid;
490 if (uio->uio_offset + xfersize > ip->i_size)
491 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
494 * Avoid a data-consistency race between write() and mmap()
495 * by ensuring that newly allocated blocks are zerod. The
496 * race can occur even in the case where the write covers
501 if (fs->fs_bsize > xfersize)
506 /* XXX is uio->uio_offset the right thing here? */
507 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
508 ap->a_cred, flags, &bp);
511 if (ioflag & IO_DIRECT)
512 bp->b_flags |= B_DIRECT;
513 if (ioflag & IO_NOWDRAIN)
514 bp->b_flags |= B_NOWDRAIN;
516 if (uio->uio_offset + xfersize > ip->i_size) {
517 ip->i_size = uio->uio_offset + xfersize;
521 size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
526 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
527 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
528 (LIST_FIRST(&bp->b_dep) == NULL)) {
529 bp->b_flags |= B_RELBUF;
533 * If IO_SYNC each buffer is written synchronously. Otherwise
534 * if we have a severe page deficiency write the buffer
535 * asynchronously. Otherwise try to cluster, and if that
536 * doesn't do it then either do an async write (if O_DIRECT),
537 * or a delayed write (if not).
539 if (ioflag & IO_SYNC) {
541 } else if (vm_page_count_severe() ||
542 buf_dirty_count_severe() ||
543 (ioflag & IO_ASYNC)) {
544 bp->b_flags |= B_CLUSTEROK;
546 } else if (xfersize + blkoffset == fs->fs_bsize) {
547 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
548 bp->b_flags |= B_CLUSTEROK;
549 cluster_write(bp, ip->i_size, seqcount);
553 } else if (ioflag & IO_DIRECT) {
554 bp->b_flags |= B_CLUSTEROK;
557 bp->b_flags |= B_CLUSTEROK;
560 if (error || xfersize == 0)
562 ip->i_flag |= IN_CHANGE | IN_UPDATE;
565 * If we successfully wrote any data, and we are not the superuser
566 * we clear the setuid and setgid bits as a precaution against
569 if (resid > uio->uio_resid && ap->a_cred &&
570 suser_cred(ap->a_cred, PRISON_ROOT))
571 ip->i_mode &= ~(ISUID | ISGID);
572 if (resid > uio->uio_resid)
573 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
575 if (ioflag & IO_UNIT) {
576 (void)UFS_TRUNCATE(vp, osize,
577 ioflag & IO_SYNC, ap->a_cred, uio->uio_td);
578 uio->uio_offset -= resid - uio->uio_resid;
579 uio->uio_resid = resid;
581 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
582 error = UFS_UPDATE(vp, 1);
585 vm_object_vndeallocate(object);
597 struct vop_getpages_args *ap;
599 off_t foff, physoffset;
601 struct vnode *dp, *vp;
603 vm_pindex_t pindex, firstindex;
605 int bbackwards, bforwards;
606 int pbackwards, pforwards;
617 pcount = round_page(ap->a_count) / PAGE_SIZE;
618 mreq = ap->a_m[ap->a_reqpage];
619 firstindex = ap->a_m[0]->pindex;
622 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
623 * then the entire page is valid. Since the page may be mapped,
624 * user programs might reference data beyond the actual end of file
625 * occuring within the page. We have to zero that data.
628 if (mreq->valid != VM_PAGE_BITS_ALL)
629 vm_page_zero_invalid(mreq, TRUE);
630 for (i = 0; i < pcount; i++) {
631 if (i != ap->a_reqpage) {
632 vm_page_free(ap->a_m[i]);
640 bsize = vp->v_mount->mnt_stat.f_iosize;
641 pindex = mreq->pindex;
642 foff = IDX_TO_OFF(pindex) /* + ap->a_offset should be zero */;
644 if (bsize < PAGE_SIZE)
645 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
650 * foff is the file offset of the required page
651 * reqlblkno is the logical block that contains the page
652 * poff is the index of the page into the logical block
654 reqlblkno = foff / bsize;
655 poff = (foff % bsize) / PAGE_SIZE;
657 dp = VTOI(vp)->i_devvp;
658 if (ufs_bmaparray(vp, reqlblkno, &reqblkno, &bforwards, &bbackwards)
659 || (reqblkno == -1)) {
660 for(i = 0; i < pcount; i++) {
661 if (i != ap->a_reqpage)
662 vm_page_free(ap->a_m[i]);
664 if (reqblkno == -1) {
665 if ((mreq->flags & PG_ZERO) == 0)
666 vm_page_zero_fill(mreq);
667 vm_page_undirty(mreq);
668 mreq->valid = VM_PAGE_BITS_ALL;
671 return VM_PAGER_ERROR;
675 physoffset = (off_t)reqblkno * DEV_BSIZE + poff * PAGE_SIZE;
676 pagesperblock = bsize / PAGE_SIZE;
678 * find the first page that is contiguous...
679 * note that pbackwards is the number of pages that are contiguous
684 pbackwards = poff + bbackwards * pagesperblock;
685 if (ap->a_reqpage > pbackwards) {
686 firstpage = ap->a_reqpage - pbackwards;
687 for(i=0;i<firstpage;i++)
688 vm_page_free(ap->a_m[i]);
692 * pforwards is the number of pages that are contiguous
693 * after the current page.
695 pforwards = (pagesperblock - (poff + 1)) +
696 bforwards * pagesperblock;
697 if (pforwards < (pcount - (ap->a_reqpage + 1))) {
698 for( i = ap->a_reqpage + pforwards + 1; i < pcount; i++)
699 vm_page_free(ap->a_m[i]);
700 pcount = ap->a_reqpage + pforwards + 1;
704 * number of pages for I/O corrected for the non-contig pages at
705 * the beginning of the array.
711 * calculate the size of the transfer
714 size = pcount * PAGE_SIZE;
716 if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) >
717 obj->un_pager.vnp.vnp_size)
718 size = obj->un_pager.vnp.vnp_size -
719 IDX_TO_OFF(ap->a_m[firstpage]->pindex);
722 rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size,
723 (ap->a_reqpage - firstpage), physoffset);