2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include "opt_kdtrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/mount.h>
46 #include <sys/rwlock.h>
47 #include <sys/vmmeter.h>
48 #include <sys/vnode.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vnode_pager.h>
58 #include <fs/nfs/nfsport.h>
59 #include <fs/nfsclient/nfsmount.h>
60 #include <fs/nfsclient/nfs.h>
61 #include <fs/nfsclient/nfsnode.h>
62 #include <fs/nfsclient/nfs_kdtrace.h>
64 extern int newnfs_directio_allow_mmap;
65 extern struct nfsstats newnfsstats;
66 extern struct mtx ncl_iod_mutex;
67 extern int ncl_numasync;
68 extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
69 extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
70 extern int newnfs_directio_enable;
71 extern int nfs_keep_dirty_on_error;
73 int ncl_pbuf_freecnt = -1; /* start out unlimited */
75 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
77 static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
78 struct ucred *cred, int ioflag);
81 * Vnode op for VM getpages.
84 ncl_getpages(struct vop_getpages_args *ap)
86 int i, error, nextoff, size, toff, count, npages;
101 td = curthread; /* XXX */
102 cred = curthread->td_ucred; /* XXX */
103 nmp = VFSTONFS(vp->v_mount);
107 if ((object = vp->v_object) == NULL) {
108 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
109 return (VM_PAGER_ERROR);
112 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
113 mtx_lock(&np->n_mtx);
114 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
115 mtx_unlock(&np->n_mtx);
116 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
117 return (VM_PAGER_ERROR);
119 mtx_unlock(&np->n_mtx);
122 mtx_lock(&nmp->nm_mtx);
123 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
124 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
125 mtx_unlock(&nmp->nm_mtx);
126 /* We'll never get here for v4, because we always have fsinfo */
127 (void)ncl_fsinfo(nmp, vp, cred, td);
129 mtx_unlock(&nmp->nm_mtx);
131 npages = btoc(count);
134 * If the requested page is partially valid, just return it and
135 * allow the pager to zero-out the blanks. Partially valid pages
136 * can only occur at the file EOF.
138 VM_OBJECT_WLOCK(object);
139 if (pages[ap->a_reqpage]->valid != 0) {
140 for (i = 0; i < npages; ++i) {
141 if (i != ap->a_reqpage) {
142 vm_page_lock(pages[i]);
143 vm_page_free(pages[i]);
144 vm_page_unlock(pages[i]);
147 VM_OBJECT_WUNLOCK(object);
150 VM_OBJECT_WUNLOCK(object);
153 * We use only the kva address for the buffer, but this is extremely
154 * convienient and fast.
156 bp = getpbuf(&ncl_pbuf_freecnt);
158 kva = (vm_offset_t) bp->b_data;
159 pmap_qenter(kva, pages, npages);
160 PCPU_INC(cnt.v_vnodein);
161 PCPU_ADD(cnt.v_vnodepgsin, npages);
163 iov.iov_base = (caddr_t) kva;
167 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
168 uio.uio_resid = count;
169 uio.uio_segflg = UIO_SYSSPACE;
170 uio.uio_rw = UIO_READ;
173 error = ncl_readrpc(vp, &uio, cred);
174 pmap_qremove(kva, npages);
176 relpbuf(bp, &ncl_pbuf_freecnt);
178 if (error && (uio.uio_resid == count)) {
179 ncl_printf("nfs_getpages: error %d\n", error);
180 VM_OBJECT_WLOCK(object);
181 for (i = 0; i < npages; ++i) {
182 if (i != ap->a_reqpage) {
183 vm_page_lock(pages[i]);
184 vm_page_free(pages[i]);
185 vm_page_unlock(pages[i]);
188 VM_OBJECT_WUNLOCK(object);
189 return (VM_PAGER_ERROR);
193 * Calculate the number of bytes read and validate only that number
194 * of bytes. Note that due to pending writes, size may be 0. This
195 * does not mean that the remaining data is invalid!
198 size = count - uio.uio_resid;
199 VM_OBJECT_WLOCK(object);
200 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
202 nextoff = toff + PAGE_SIZE;
205 if (nextoff <= size) {
207 * Read operation filled an entire page
209 m->valid = VM_PAGE_BITS_ALL;
210 KASSERT(m->dirty == 0,
211 ("nfs_getpages: page %p is dirty", m));
212 } else if (size > toff) {
214 * Read operation filled a partial page.
217 vm_page_set_valid_range(m, 0, size - toff);
218 KASSERT(m->dirty == 0,
219 ("nfs_getpages: page %p is dirty", m));
222 * Read operation was short. If no error
223 * occured we may have hit a zero-fill
224 * section. We leave valid set to 0, and page
225 * is freed by vm_page_readahead_finish() if
226 * its index is not equal to requested, or
227 * page is zeroed and set valid by
228 * vm_pager_get_pages() for requested page.
232 if (i != ap->a_reqpage)
233 vm_page_readahead_finish(m);
235 VM_OBJECT_WUNLOCK(object);
240 * Vnode op for VM putpages.
243 ncl_putpages(struct vop_putpages_args *ap)
249 int iomode, must_commit, i, error, npages, count;
255 struct nfsmount *nmp;
261 td = curthread; /* XXX */
262 /* Set the cred to n_writecred for the write rpcs. */
263 if (np->n_writecred != NULL)
264 cred = crhold(np->n_writecred);
266 cred = crhold(curthread->td_ucred); /* XXX */
267 nmp = VFSTONFS(vp->v_mount);
270 rtvals = ap->a_rtvals;
271 npages = btoc(count);
272 offset = IDX_TO_OFF(pages[0]->pindex);
274 mtx_lock(&nmp->nm_mtx);
275 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
276 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
277 mtx_unlock(&nmp->nm_mtx);
278 (void)ncl_fsinfo(nmp, vp, cred, td);
280 mtx_unlock(&nmp->nm_mtx);
282 mtx_lock(&np->n_mtx);
283 if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
284 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
285 mtx_unlock(&np->n_mtx);
286 ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
287 mtx_lock(&np->n_mtx);
290 for (i = 0; i < npages; i++)
291 rtvals[i] = VM_PAGER_ERROR;
294 * When putting pages, do not extend file past EOF.
296 if (offset + count > np->n_size) {
297 count = np->n_size - offset;
301 mtx_unlock(&np->n_mtx);
304 * We use only the kva address for the buffer, but this is extremely
305 * convienient and fast.
307 bp = getpbuf(&ncl_pbuf_freecnt);
309 kva = (vm_offset_t) bp->b_data;
310 pmap_qenter(kva, pages, npages);
311 PCPU_INC(cnt.v_vnodeout);
312 PCPU_ADD(cnt.v_vnodepgsout, count);
314 iov.iov_base = (caddr_t) kva;
318 uio.uio_offset = offset;
319 uio.uio_resid = count;
320 uio.uio_segflg = UIO_SYSSPACE;
321 uio.uio_rw = UIO_WRITE;
324 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
325 iomode = NFSWRITE_UNSTABLE;
327 iomode = NFSWRITE_FILESYNC;
329 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
332 pmap_qremove(kva, npages);
333 relpbuf(bp, &ncl_pbuf_freecnt);
335 if (error == 0 || !nfs_keep_dirty_on_error) {
336 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
338 ncl_clearcommit(vp->v_mount);
344 * For nfs, cache consistency can only be maintained approximately.
345 * Although RFC1094 does not specify the criteria, the following is
346 * believed to be compatible with the reference port.
348 * If the file's modify time on the server has changed since the
349 * last read rpc or you have written to the file,
350 * you may have lost data cache consistency with the
351 * server, so flush all of the file's data out of the cache.
352 * Then force a getattr rpc to ensure that you have up to date
354 * NB: This implies that cache data can be read when up to
355 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
356 * attributes this could be forced by setting n_attrstamp to 0 before
357 * the VOP_GETATTR() call.
360 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
364 struct nfsnode *np = VTONFS(vp);
368 * Grab the exclusive lock before checking whether the cache is
370 * XXX - We can make this cheaper later (by acquiring cheaper locks).
371 * But for now, this suffices.
373 old_lock = ncl_upgrade_vnlock(vp);
374 if (vp->v_iflag & VI_DOOMED) {
375 ncl_downgrade_vnlock(vp, old_lock);
379 mtx_lock(&np->n_mtx);
380 if (np->n_flag & NMODIFIED) {
381 mtx_unlock(&np->n_mtx);
382 if (vp->v_type != VREG) {
383 if (vp->v_type != VDIR)
384 panic("nfs: bioread, not dir");
386 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
391 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
392 error = VOP_GETATTR(vp, &vattr, cred);
395 mtx_lock(&np->n_mtx);
396 np->n_mtime = vattr.va_mtime;
397 mtx_unlock(&np->n_mtx);
399 mtx_unlock(&np->n_mtx);
400 error = VOP_GETATTR(vp, &vattr, cred);
403 mtx_lock(&np->n_mtx);
404 if ((np->n_flag & NSIZECHANGED)
405 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
406 mtx_unlock(&np->n_mtx);
407 if (vp->v_type == VDIR)
409 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
412 mtx_lock(&np->n_mtx);
413 np->n_mtime = vattr.va_mtime;
414 np->n_flag &= ~NSIZECHANGED;
416 mtx_unlock(&np->n_mtx);
419 ncl_downgrade_vnlock(vp, old_lock);
424 * Vnode op for read using bio
427 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
429 struct nfsnode *np = VTONFS(vp);
431 struct buf *bp, *rabp;
433 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
437 int nra, error = 0, n = 0, on = 0;
440 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
441 if (uio->uio_resid == 0)
443 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
447 mtx_lock(&nmp->nm_mtx);
448 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
449 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
450 mtx_unlock(&nmp->nm_mtx);
451 (void)ncl_fsinfo(nmp, vp, cred, td);
452 mtx_lock(&nmp->nm_mtx);
454 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
455 (void) newnfs_iosize(nmp);
457 tmp_off = uio->uio_offset + uio->uio_resid;
458 if (vp->v_type != VDIR &&
459 (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
460 mtx_unlock(&nmp->nm_mtx);
463 mtx_unlock(&nmp->nm_mtx);
465 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
466 /* No caching/ no readaheads. Just read data into the user buffer */
467 return ncl_readrpc(vp, uio, cred);
469 biosize = vp->v_bufobj.bo_bsize;
470 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
472 error = nfs_bioread_check_cons(vp, td, cred);
479 mtx_lock(&np->n_mtx);
481 mtx_unlock(&np->n_mtx);
483 switch (vp->v_type) {
485 NFSINCRGLOBAL(newnfsstats.biocache_reads);
486 lbn = uio->uio_offset / biosize;
487 on = uio->uio_offset - (lbn * biosize);
490 * Start the read ahead(s), as required.
492 if (nmp->nm_readahead > 0) {
493 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
494 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
495 rabn = lbn + 1 + nra;
496 if (incore(&vp->v_bufobj, rabn) == NULL) {
497 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
499 error = newnfs_sigintr(nmp, td);
500 return (error ? error : EINTR);
502 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
503 rabp->b_flags |= B_ASYNC;
504 rabp->b_iocmd = BIO_READ;
505 vfs_busy_pages(rabp, 0);
506 if (ncl_asyncio(nmp, rabp, cred, td)) {
507 rabp->b_flags |= B_INVAL;
508 rabp->b_ioflags |= BIO_ERROR;
509 vfs_unbusy_pages(rabp);
520 /* Note that bcount is *not* DEV_BSIZE aligned. */
522 if ((off_t)lbn * biosize >= nsize) {
524 } else if ((off_t)(lbn + 1) * biosize > nsize) {
525 bcount = nsize - (off_t)lbn * biosize;
527 bp = nfs_getcacheblk(vp, lbn, bcount, td);
530 error = newnfs_sigintr(nmp, td);
531 return (error ? error : EINTR);
535 * If B_CACHE is not set, we must issue the read. If this
536 * fails, we return an error.
539 if ((bp->b_flags & B_CACHE) == 0) {
540 bp->b_iocmd = BIO_READ;
541 vfs_busy_pages(bp, 0);
542 error = ncl_doio(vp, bp, cred, td, 0);
550 * on is the offset into the current bp. Figure out how many
551 * bytes we can copy out of the bp. Note that bcount is
552 * NOT DEV_BSIZE aligned.
554 * Then figure out how many bytes we can copy into the uio.
559 n = MIN((unsigned)(bcount - on), uio->uio_resid);
562 NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
563 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
565 error = newnfs_sigintr(nmp, td);
566 return (error ? error : EINTR);
568 if ((bp->b_flags & B_CACHE) == 0) {
569 bp->b_iocmd = BIO_READ;
570 vfs_busy_pages(bp, 0);
571 error = ncl_doio(vp, bp, cred, td, 0);
573 bp->b_ioflags |= BIO_ERROR;
578 n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
582 NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
583 if (np->n_direofoffset
584 && uio->uio_offset >= np->n_direofoffset) {
587 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
588 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
589 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
591 error = newnfs_sigintr(nmp, td);
592 return (error ? error : EINTR);
594 if ((bp->b_flags & B_CACHE) == 0) {
595 bp->b_iocmd = BIO_READ;
596 vfs_busy_pages(bp, 0);
597 error = ncl_doio(vp, bp, cred, td, 0);
601 while (error == NFSERR_BAD_COOKIE) {
603 error = ncl_vinvalbuf(vp, 0, td, 1);
605 * Yuck! The directory has been modified on the
606 * server. The only way to get the block is by
607 * reading from the beginning to get all the
610 * Leave the last bp intact unless there is an error.
611 * Loop back up to the while if the error is another
612 * NFSERR_BAD_COOKIE (double yuch!).
614 for (i = 0; i <= lbn && !error; i++) {
615 if (np->n_direofoffset
616 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
618 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
620 error = newnfs_sigintr(nmp, td);
621 return (error ? error : EINTR);
623 if ((bp->b_flags & B_CACHE) == 0) {
624 bp->b_iocmd = BIO_READ;
625 vfs_busy_pages(bp, 0);
626 error = ncl_doio(vp, bp, cred, td, 0);
628 * no error + B_INVAL == directory EOF,
631 if (error == 0 && (bp->b_flags & B_INVAL))
635 * An error will throw away the block and the
636 * for loop will break out. If no error and this
637 * is not the block we want, we throw away the
638 * block and go for the next one via the for loop.
640 if (error || i < lbn)
645 * The above while is repeated if we hit another cookie
646 * error. If we hit an error and it wasn't a cookie error,
654 * If not eof and read aheads are enabled, start one.
655 * (You need the current block first, so that you have the
656 * directory offset cookie of the next block.)
658 if (nmp->nm_readahead > 0 &&
659 (bp->b_flags & B_INVAL) == 0 &&
660 (np->n_direofoffset == 0 ||
661 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
662 incore(&vp->v_bufobj, lbn + 1) == NULL) {
663 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
665 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
666 rabp->b_flags |= B_ASYNC;
667 rabp->b_iocmd = BIO_READ;
668 vfs_busy_pages(rabp, 0);
669 if (ncl_asyncio(nmp, rabp, cred, td)) {
670 rabp->b_flags |= B_INVAL;
671 rabp->b_ioflags |= BIO_ERROR;
672 vfs_unbusy_pages(rabp);
681 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
682 * chopped for the EOF condition, we cannot tell how large
683 * NFS directories are going to be until we hit EOF. So
684 * an NFS directory buffer is *not* chopped to its EOF. Now,
685 * it just so happens that b_resid will effectively chop it
686 * to EOF. *BUT* this information is lost if the buffer goes
687 * away and is reconstituted into a B_CACHE state ( due to
688 * being VMIO ) later. So we keep track of the directory eof
689 * in np->n_direofoffset and chop it off as an extra step
692 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
693 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
694 n = np->n_direofoffset - uio->uio_offset;
697 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
703 error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
705 if (vp->v_type == VLNK)
709 } while (error == 0 && uio->uio_resid > 0 && n > 0);
714 * The NFS write path cannot handle iovecs with len > 1. So we need to
715 * break up iovecs accordingly (restricting them to wsize).
716 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
717 * For the ASYNC case, 2 copies are needed. The first a copy from the
718 * user buffer to a staging buffer and then a second copy from the staging
719 * buffer to mbufs. This can be optimized by copying from the user buffer
720 * directly into mbufs and passing the chain down, but that requires a
721 * fair amount of re-working of the relevant codepaths (and can be done
725 nfs_directio_write(vp, uiop, cred, ioflag)
732 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
733 struct thread *td = uiop->uio_td;
737 mtx_lock(&nmp->nm_mtx);
738 wsize = nmp->nm_wsize;
739 mtx_unlock(&nmp->nm_mtx);
740 if (ioflag & IO_SYNC) {
741 int iomode, must_commit;
745 while (uiop->uio_resid > 0) {
746 size = MIN(uiop->uio_resid, wsize);
747 size = MIN(uiop->uio_iov->iov_len, size);
748 iov.iov_base = uiop->uio_iov->iov_base;
752 uio.uio_offset = uiop->uio_offset;
753 uio.uio_resid = size;
754 uio.uio_segflg = UIO_USERSPACE;
755 uio.uio_rw = UIO_WRITE;
757 iomode = NFSWRITE_FILESYNC;
758 error = ncl_writerpc(vp, &uio, cred, &iomode,
760 KASSERT((must_commit == 0),
761 ("ncl_directio_write: Did not commit write"));
764 uiop->uio_offset += size;
765 uiop->uio_resid -= size;
766 if (uiop->uio_iov->iov_len <= size) {
770 uiop->uio_iov->iov_base =
771 (char *)uiop->uio_iov->iov_base + size;
772 uiop->uio_iov->iov_len -= size;
781 * Break up the write into blocksize chunks and hand these
782 * over to nfsiod's for write back.
783 * Unfortunately, this incurs a copy of the data. Since
784 * the user could modify the buffer before the write is
787 * The obvious optimization here is that one of the 2 copies
788 * in the async write path can be eliminated by copying the
789 * data here directly into mbufs and passing the mbuf chain
790 * down. But that will require a fair amount of re-working
791 * of the code and can be done if there's enough interest
792 * in NFS directio access.
794 while (uiop->uio_resid > 0) {
795 size = MIN(uiop->uio_resid, wsize);
796 size = MIN(uiop->uio_iov->iov_len, size);
797 bp = getpbuf(&ncl_pbuf_freecnt);
798 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
799 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
800 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
801 t_iov->iov_len = size;
802 t_uio->uio_iov = t_iov;
803 t_uio->uio_iovcnt = 1;
804 t_uio->uio_offset = uiop->uio_offset;
805 t_uio->uio_resid = size;
806 t_uio->uio_segflg = UIO_SYSSPACE;
807 t_uio->uio_rw = UIO_WRITE;
809 KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
810 uiop->uio_segflg == UIO_SYSSPACE,
811 ("nfs_directio_write: Bad uio_segflg"));
812 if (uiop->uio_segflg == UIO_USERSPACE) {
813 error = copyin(uiop->uio_iov->iov_base,
814 t_iov->iov_base, size);
819 * UIO_SYSSPACE may never happen, but handle
820 * it just in case it does.
822 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
824 bp->b_flags |= B_DIRECT;
825 bp->b_iocmd = BIO_WRITE;
826 if (cred != NOCRED) {
830 bp->b_wcred = NOCRED;
831 bp->b_caller1 = (void *)t_uio;
833 error = ncl_asyncio(nmp, bp, NOCRED, td);
836 free(t_iov->iov_base, M_NFSDIRECTIO);
837 free(t_iov, M_NFSDIRECTIO);
838 free(t_uio, M_NFSDIRECTIO);
840 relpbuf(bp, &ncl_pbuf_freecnt);
845 uiop->uio_offset += size;
846 uiop->uio_resid -= size;
847 if (uiop->uio_iov->iov_len <= size) {
851 uiop->uio_iov->iov_base =
852 (char *)uiop->uio_iov->iov_base + size;
853 uiop->uio_iov->iov_len -= size;
861 * Vnode op for write using bio
864 ncl_write(struct vop_write_args *ap)
867 struct uio *uio = ap->a_uio;
868 struct thread *td = uio->uio_td;
869 struct vnode *vp = ap->a_vp;
870 struct nfsnode *np = VTONFS(vp);
871 struct ucred *cred = ap->a_cred;
872 int ioflag = ap->a_ioflag;
875 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
878 int bp_cached, n, on, error = 0, error1;
879 size_t orig_resid, local_resid;
880 off_t orig_size, tmp_off;
882 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
883 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
885 if (vp->v_type != VREG)
887 mtx_lock(&np->n_mtx);
888 if (np->n_flag & NWRITEERR) {
889 np->n_flag &= ~NWRITEERR;
890 mtx_unlock(&np->n_mtx);
891 return (np->n_error);
893 mtx_unlock(&np->n_mtx);
894 mtx_lock(&nmp->nm_mtx);
895 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
896 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
897 mtx_unlock(&nmp->nm_mtx);
898 (void)ncl_fsinfo(nmp, vp, cred, td);
899 mtx_lock(&nmp->nm_mtx);
901 if (nmp->nm_wsize == 0)
902 (void) newnfs_iosize(nmp);
903 mtx_unlock(&nmp->nm_mtx);
906 * Synchronously flush pending buffers if we are in synchronous
907 * mode or if we are appending.
909 if (ioflag & (IO_APPEND | IO_SYNC)) {
910 mtx_lock(&np->n_mtx);
911 if (np->n_flag & NMODIFIED) {
912 mtx_unlock(&np->n_mtx);
913 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
915 * Require non-blocking, synchronous writes to
916 * dirty files to inform the program it needs
917 * to fsync(2) explicitly.
919 if (ioflag & IO_NDELAY)
924 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
925 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
929 mtx_unlock(&np->n_mtx);
932 orig_resid = uio->uio_resid;
933 mtx_lock(&np->n_mtx);
934 orig_size = np->n_size;
935 mtx_unlock(&np->n_mtx);
938 * If IO_APPEND then load uio_offset. We restart here if we cannot
939 * get the append lock.
941 if (ioflag & IO_APPEND) {
943 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
944 error = VOP_GETATTR(vp, &vattr, cred);
947 mtx_lock(&np->n_mtx);
948 uio->uio_offset = np->n_size;
949 mtx_unlock(&np->n_mtx);
952 if (uio->uio_offset < 0)
954 tmp_off = uio->uio_offset + uio->uio_resid;
955 if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
957 if (uio->uio_resid == 0)
960 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
961 return nfs_directio_write(vp, uio, cred, ioflag);
964 * Maybe this should be above the vnode op call, but so long as
965 * file servers have no limits, i don't think it matters
967 if (vn_rlimit_fsize(vp, uio, td))
970 biosize = vp->v_bufobj.bo_bsize;
972 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
973 * would exceed the local maximum per-file write commit size when
974 * combined with those, we must decide whether to flush,
975 * go synchronous, or return error. We don't bother checking
976 * IO_UNIT -- we just make all writes atomic anyway, as there's
977 * no point optimizing for something that really won't ever happen.
979 if (!(ioflag & IO_SYNC)) {
982 mtx_lock(&np->n_mtx);
984 mtx_unlock(&np->n_mtx);
986 if (nmp->nm_wcommitsize < uio->uio_resid) {
988 * If this request could not possibly be completed
989 * without exceeding the maximum outstanding write
990 * commit size, see if we can convert it into a
991 * synchronous write operation.
993 if (ioflag & IO_NDELAY)
996 if (nflag & NMODIFIED)
998 } else if (nflag & NMODIFIED) {
1000 BO_LOCK(&vp->v_bufobj);
1001 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1002 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1004 if (bp->b_flags & B_NEEDCOMMIT)
1005 wouldcommit += bp->b_bcount;
1008 BO_UNLOCK(&vp->v_bufobj);
1010 * Since we're not operating synchronously and
1011 * bypassing the buffer cache, we are in a commit
1012 * and holding all of these buffers whether
1013 * transmitted or not. If not limited, this
1014 * will lead to the buffer cache deadlocking,
1015 * as no one else can flush our uncommitted buffers.
1017 wouldcommit += uio->uio_resid;
1019 * If we would initially exceed the maximum
1020 * outstanding write commit size, flush and restart.
1022 if (wouldcommit > nmp->nm_wcommitsize)
1026 goto flush_and_restart;
1030 NFSINCRGLOBAL(newnfsstats.biocache_writes);
1031 lbn = uio->uio_offset / biosize;
1032 on = uio->uio_offset - (lbn * biosize);
1033 n = MIN((unsigned)(biosize - on), uio->uio_resid);
1036 * Handle direct append and file extension cases, calculate
1037 * unaligned buffer size.
1039 mtx_lock(&np->n_mtx);
1040 if (uio->uio_offset == np->n_size && n) {
1041 mtx_unlock(&np->n_mtx);
1043 * Get the buffer (in its pre-append state to maintain
1044 * B_CACHE if it was previously set). Resize the
1045 * nfsnode after we have locked the buffer to prevent
1046 * readers from reading garbage.
1049 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1054 mtx_lock(&np->n_mtx);
1055 np->n_size = uio->uio_offset + n;
1056 np->n_flag |= NMODIFIED;
1057 vnode_pager_setsize(vp, np->n_size);
1058 mtx_unlock(&np->n_mtx);
1060 save = bp->b_flags & B_CACHE;
1062 allocbuf(bp, bcount);
1063 bp->b_flags |= save;
1067 * Obtain the locked cache block first, and then
1068 * adjust the file's size as appropriate.
1071 if ((off_t)lbn * biosize + bcount < np->n_size) {
1072 if ((off_t)(lbn + 1) * biosize < np->n_size)
1075 bcount = np->n_size - (off_t)lbn * biosize;
1077 mtx_unlock(&np->n_mtx);
1078 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1079 mtx_lock(&np->n_mtx);
1080 if (uio->uio_offset + n > np->n_size) {
1081 np->n_size = uio->uio_offset + n;
1082 np->n_flag |= NMODIFIED;
1083 vnode_pager_setsize(vp, np->n_size);
1085 mtx_unlock(&np->n_mtx);
1089 error = newnfs_sigintr(nmp, td);
1096 * Issue a READ if B_CACHE is not set. In special-append
1097 * mode, B_CACHE is based on the buffer prior to the write
1098 * op and is typically set, avoiding the read. If a read
1099 * is required in special append mode, the server will
1100 * probably send us a short-read since we extended the file
1101 * on our end, resulting in b_resid == 0 and, thusly,
1102 * B_CACHE getting set.
1104 * We can also avoid issuing the read if the write covers
1105 * the entire buffer. We have to make sure the buffer state
1106 * is reasonable in this case since we will not be initiating
1107 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1110 * B_CACHE may also be set due to the buffer being cached
1115 if (on == 0 && n == bcount) {
1116 if ((bp->b_flags & B_CACHE) == 0)
1118 bp->b_flags |= B_CACHE;
1119 bp->b_flags &= ~B_INVAL;
1120 bp->b_ioflags &= ~BIO_ERROR;
1123 if ((bp->b_flags & B_CACHE) == 0) {
1124 bp->b_iocmd = BIO_READ;
1125 vfs_busy_pages(bp, 0);
1126 error = ncl_doio(vp, bp, cred, td, 0);
1132 if (bp->b_wcred == NOCRED)
1133 bp->b_wcred = crhold(cred);
1134 mtx_lock(&np->n_mtx);
1135 np->n_flag |= NMODIFIED;
1136 mtx_unlock(&np->n_mtx);
1139 * If dirtyend exceeds file size, chop it down. This should
1140 * not normally occur but there is an append race where it
1141 * might occur XXX, so we log it.
1143 * If the chopping creates a reverse-indexed or degenerate
1144 * situation with dirtyoff/end, we 0 both of them.
1147 if (bp->b_dirtyend > bcount) {
1148 ncl_printf("NFS append race @%lx:%d\n",
1149 (long)bp->b_blkno * DEV_BSIZE,
1150 bp->b_dirtyend - bcount);
1151 bp->b_dirtyend = bcount;
1154 if (bp->b_dirtyoff >= bp->b_dirtyend)
1155 bp->b_dirtyoff = bp->b_dirtyend = 0;
1158 * If the new write will leave a contiguous dirty
1159 * area, just update the b_dirtyoff and b_dirtyend,
1160 * otherwise force a write rpc of the old dirty area.
1162 * While it is possible to merge discontiguous writes due to
1163 * our having a B_CACHE buffer ( and thus valid read data
1164 * for the hole), we don't because it could lead to
1165 * significant cache coherency problems with multiple clients,
1166 * especially if locking is implemented later on.
1168 * As an optimization we could theoretically maintain
1169 * a linked list of discontinuous areas, but we would still
1170 * have to commit them separately so there isn't much
1171 * advantage to it except perhaps a bit of asynchronization.
1174 if (bp->b_dirtyend > 0 &&
1175 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1176 if (bwrite(bp) == EINTR) {
1183 local_resid = uio->uio_resid;
1184 error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
1186 if (error != 0 && !bp_cached) {
1188 * This block has no other content then what
1189 * possibly was written by the faulty uiomove.
1190 * Release it, forgetting the data pages, to
1191 * prevent the leak of uninitialized data to
1194 bp->b_ioflags |= BIO_ERROR;
1196 uio->uio_offset -= local_resid - uio->uio_resid;
1197 uio->uio_resid = local_resid;
1202 * Since this block is being modified, it must be written
1203 * again and not just committed. Since write clustering does
1204 * not work for the stage 1 data write, only the stage 2
1205 * commit rpc, we have to clear B_CLUSTEROK as well.
1207 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1210 * Get the partial update on the progress made from
1211 * uiomove, if an error occured.
1214 n = local_resid - uio->uio_resid;
1217 * Only update dirtyoff/dirtyend if not a degenerate
1221 if (bp->b_dirtyend > 0) {
1222 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1223 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1225 bp->b_dirtyoff = on;
1226 bp->b_dirtyend = on + n;
1228 vfs_bio_set_valid(bp, on, n);
1232 * If IO_SYNC do bwrite().
1234 * IO_INVAL appears to be unused. The idea appears to be
1235 * to turn off caching in this case. Very odd. XXX
1237 if ((ioflag & IO_SYNC)) {
1238 if (ioflag & IO_INVAL)
1239 bp->b_flags |= B_NOCACHE;
1240 error1 = bwrite(bp);
1246 } else if ((n + on) == biosize) {
1247 bp->b_flags |= B_ASYNC;
1248 (void) ncl_writebp(bp, 0, NULL);
1255 } while (uio->uio_resid > 0 && n > 0);
1258 if (ioflag & IO_UNIT) {
1260 vattr.va_size = orig_size;
1261 /* IO_SYNC is handled implicitely */
1262 (void)VOP_SETATTR(vp, &vattr, cred);
1263 uio->uio_offset -= orig_resid - uio->uio_resid;
1264 uio->uio_resid = orig_resid;
1272 * Get an nfs cache block.
1274 * Allocate a new one if the block isn't currently in the cache
1275 * and return the block marked busy. If the calling process is
1276 * interrupted by a signal for an interruptible mount point, return
1279 * The caller must carefully deal with the possible B_INVAL state of
1280 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1281 * indirectly), so synchronous reads can be issued without worrying about
1282 * the B_INVAL state. We have to be a little more careful when dealing
1283 * with writes (see comments in nfs_write()) when extending a file past
1287 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1291 struct nfsmount *nmp;
1296 if (nmp->nm_flag & NFSMNT_INT) {
1299 newnfs_set_sigmask(td, &oldset);
1300 bp = getblk(vp, bn, size, PCATCH, 0, 0);
1301 newnfs_restore_sigmask(td, &oldset);
1302 while (bp == NULL) {
1303 if (newnfs_sigintr(nmp, td))
1305 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1308 bp = getblk(vp, bn, size, 0, 0, 0);
1311 if (vp->v_type == VREG)
1312 bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1317 * Flush and invalidate all dirty buffers. If another process is already
1318 * doing the flush, just wait for completion.
1321 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1323 struct nfsnode *np = VTONFS(vp);
1324 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1325 int error = 0, slpflag, slptimeo;
1328 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1330 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1332 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1342 old_lock = ncl_upgrade_vnlock(vp);
1343 if (vp->v_iflag & VI_DOOMED) {
1345 * Since vgonel() uses the generic vinvalbuf() to flush
1346 * dirty buffers and it does not call this function, it
1347 * is safe to just return OK when VI_DOOMED is set.
1349 ncl_downgrade_vnlock(vp, old_lock);
1354 * Now, flush as required.
1356 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1357 VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
1358 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1359 VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
1361 * If the page clean was interrupted, fail the invalidation.
1362 * Not doing so, we run the risk of losing dirty pages in the
1363 * vinvalbuf() call below.
1365 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1369 error = vinvalbuf(vp, flags, slpflag, 0);
1371 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1373 error = vinvalbuf(vp, flags, 0, slptimeo);
1375 if (NFSHASPNFS(nmp)) {
1376 nfscl_layoutcommit(vp, td);
1378 * Invalidate the attribute cache, since writes to a DS
1379 * won't update the size attribute.
1381 mtx_lock(&np->n_mtx);
1382 np->n_attrstamp = 0;
1384 mtx_lock(&np->n_mtx);
1385 if (np->n_directio_asyncwr == 0)
1386 np->n_flag &= ~NMODIFIED;
1387 mtx_unlock(&np->n_mtx);
1389 ncl_downgrade_vnlock(vp, old_lock);
1394 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1395 * This is mainly to avoid queueing async I/O requests when the nfsiods
1396 * are all hung on a dead server.
1398 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1399 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1402 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1411 * Commits are usually short and sweet so lets save some cpu and
1412 * leave the async daemons for more important rpc's (such as reads
1415 * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1416 * in the directory in order to update attributes. This can deadlock
1417 * with another thread that is waiting for async I/O to be done by
1418 * an nfsiod thread while holding a lock on one of these vnodes.
1419 * To avoid this deadlock, don't allow the async nfsiod threads to
1420 * perform Readdirplus RPCs.
1422 mtx_lock(&ncl_iod_mutex);
1423 if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1424 (nmp->nm_bufqiods > ncl_numasync / 2)) ||
1425 (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
1426 mtx_unlock(&ncl_iod_mutex);
1430 if (nmp->nm_flag & NFSMNT_INT)
1435 * Find a free iod to process this request.
1437 for (iod = 0; iod < ncl_numasync; iod++)
1438 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1444 * Try to create one if none are free.
1450 * Found one, so wake it up and tell it which
1453 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1455 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1456 ncl_iodmount[iod] = nmp;
1458 wakeup(&ncl_iodwant[iod]);
1462 * If none are free, we may already have an iod working on this mount
1463 * point. If so, it will process our request.
1466 if (nmp->nm_bufqiods > 0) {
1468 ("ncl_asyncio: %d iods are already processing mount %p\n",
1469 nmp->nm_bufqiods, nmp));
1475 * If we have an iod which can process the request, then queue
1480 * Ensure that the queue never grows too large. We still want
1481 * to asynchronize so we block rather then return EIO.
1483 while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1485 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1486 nmp->nm_bufqwant = TRUE;
1487 error = newnfs_msleep(td, &nmp->nm_bufq,
1488 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1491 error2 = newnfs_sigintr(nmp, td);
1493 mtx_unlock(&ncl_iod_mutex);
1496 if (slpflag == PCATCH) {
1502 * We might have lost our iod while sleeping,
1503 * so check and loop if nescessary.
1508 /* We might have lost our nfsiod */
1509 if (nmp->nm_bufqiods == 0) {
1511 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1515 if (bp->b_iocmd == BIO_READ) {
1516 if (bp->b_rcred == NOCRED && cred != NOCRED)
1517 bp->b_rcred = crhold(cred);
1519 if (bp->b_wcred == NOCRED && cred != NOCRED)
1520 bp->b_wcred = crhold(cred);
1523 if (bp->b_flags & B_REMFREE)
1526 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1528 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1529 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1530 VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1531 VTONFS(bp->b_vp)->n_directio_asyncwr++;
1532 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1534 mtx_unlock(&ncl_iod_mutex);
1538 mtx_unlock(&ncl_iod_mutex);
1541 * All the iods are busy on other mounts, so return EIO to
1542 * force the caller to process the i/o synchronously.
1544 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1549 ncl_doio_directwrite(struct buf *bp)
1551 int iomode, must_commit;
1552 struct uio *uiop = (struct uio *)bp->b_caller1;
1553 char *iov_base = uiop->uio_iov->iov_base;
1555 iomode = NFSWRITE_FILESYNC;
1556 uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1557 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1558 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1559 free(iov_base, M_NFSDIRECTIO);
1560 free(uiop->uio_iov, M_NFSDIRECTIO);
1561 free(uiop, M_NFSDIRECTIO);
1562 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1563 struct nfsnode *np = VTONFS(bp->b_vp);
1564 mtx_lock(&np->n_mtx);
1565 if (NFSHASPNFS(VFSTONFS(vnode_mount(bp->b_vp)))) {
1567 * Invalidate the attribute cache, since writes to a DS
1568 * won't update the size attribute.
1570 np->n_attrstamp = 0;
1572 np->n_directio_asyncwr--;
1573 if (np->n_directio_asyncwr == 0) {
1574 np->n_flag &= ~NMODIFIED;
1575 if ((np->n_flag & NFSYNCWAIT)) {
1576 np->n_flag &= ~NFSYNCWAIT;
1577 wakeup((caddr_t)&np->n_directio_asyncwr);
1580 mtx_unlock(&np->n_mtx);
1583 relpbuf(bp, &ncl_pbuf_freecnt);
1587 * Do an I/O operation to/from a cache block. This may be called
1588 * synchronously or from an nfsiod.
1591 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1592 int called_from_strategy)
1596 struct nfsmount *nmp;
1597 int error = 0, iomode, must_commit = 0;
1600 struct proc *p = td ? td->td_proc : NULL;
1604 nmp = VFSTONFS(vp->v_mount);
1606 uiop->uio_iov = &io;
1607 uiop->uio_iovcnt = 1;
1608 uiop->uio_segflg = UIO_SYSSPACE;
1612 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1613 * do this here so we do not have to do it in all the code that
1616 bp->b_flags &= ~B_INVAL;
1617 bp->b_ioflags &= ~BIO_ERROR;
1619 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1620 iocmd = bp->b_iocmd;
1621 if (iocmd == BIO_READ) {
1622 io.iov_len = uiop->uio_resid = bp->b_bcount;
1623 io.iov_base = bp->b_data;
1624 uiop->uio_rw = UIO_READ;
1626 switch (vp->v_type) {
1628 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1629 NFSINCRGLOBAL(newnfsstats.read_bios);
1630 error = ncl_readrpc(vp, uiop, cr);
1633 if (uiop->uio_resid) {
1635 * If we had a short read with no error, we must have
1636 * hit a file hole. We should zero-fill the remainder.
1637 * This can also occur if the server hits the file EOF.
1639 * Holes used to be able to occur due to pending
1640 * writes, but that is not possible any longer.
1642 int nread = bp->b_bcount - uiop->uio_resid;
1643 ssize_t left = uiop->uio_resid;
1646 bzero((char *)bp->b_data + nread, left);
1647 uiop->uio_resid = 0;
1650 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1651 if (p && (vp->v_vflag & VV_TEXT)) {
1652 mtx_lock(&np->n_mtx);
1653 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1654 mtx_unlock(&np->n_mtx);
1656 killproc(p, "text file modification");
1659 mtx_unlock(&np->n_mtx);
1663 uiop->uio_offset = (off_t)0;
1664 NFSINCRGLOBAL(newnfsstats.readlink_bios);
1665 error = ncl_readlinkrpc(vp, uiop, cr);
1668 NFSINCRGLOBAL(newnfsstats.readdir_bios);
1669 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1670 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1671 error = ncl_readdirplusrpc(vp, uiop, cr, td);
1672 if (error == NFSERR_NOTSUPP)
1673 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1675 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1676 error = ncl_readdirrpc(vp, uiop, cr, td);
1678 * end-of-directory sets B_INVAL but does not generate an
1681 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1682 bp->b_flags |= B_INVAL;
1685 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type);
1689 bp->b_ioflags |= BIO_ERROR;
1690 bp->b_error = error;
1694 * If we only need to commit, try to commit
1696 if (bp->b_flags & B_NEEDCOMMIT) {
1700 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1701 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1704 bp->b_dirtyoff = bp->b_dirtyend = 0;
1705 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1710 if (retv == NFSERR_STALEWRITEVERF) {
1711 ncl_clearcommit(vp->v_mount);
1716 * Setup for actual write
1718 mtx_lock(&np->n_mtx);
1719 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1720 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1721 mtx_unlock(&np->n_mtx);
1723 if (bp->b_dirtyend > bp->b_dirtyoff) {
1724 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1726 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1728 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1729 uiop->uio_rw = UIO_WRITE;
1730 NFSINCRGLOBAL(newnfsstats.write_bios);
1732 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1733 iomode = NFSWRITE_UNSTABLE;
1735 iomode = NFSWRITE_FILESYNC;
1737 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1738 called_from_strategy);
1741 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1742 * to cluster the buffers needing commit. This will allow
1743 * the system to submit a single commit rpc for the whole
1744 * cluster. We can do this even if the buffer is not 100%
1745 * dirty (relative to the NFS blocksize), so we optimize the
1746 * append-to-file-case.
1748 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1749 * cleared because write clustering only works for commit
1750 * rpc's, not for the data portion of the write).
1753 if (!error && iomode == NFSWRITE_UNSTABLE) {
1754 bp->b_flags |= B_NEEDCOMMIT;
1755 if (bp->b_dirtyoff == 0
1756 && bp->b_dirtyend == bp->b_bcount)
1757 bp->b_flags |= B_CLUSTEROK;
1759 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1763 * For an interrupted write, the buffer is still valid
1764 * and the write hasn't been pushed to the server yet,
1765 * so we can't set BIO_ERROR and report the interruption
1766 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1767 * is not relevant, so the rpc attempt is essentially
1768 * a noop. For the case of a V3 write rpc not being
1769 * committed to stable storage, the block is still
1770 * dirty and requires either a commit rpc or another
1771 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1772 * the block is reused. This is indicated by setting
1773 * the B_DELWRI and B_NEEDCOMMIT flags.
1775 * EIO is returned by ncl_writerpc() to indicate a recoverable
1776 * write error and is handled as above, except that
1777 * B_EINTR isn't set. One cause of this is a stale stateid
1778 * error for the RPC that indicates recovery is required,
1779 * when called with called_from_strategy != 0.
1781 * If the buffer is marked B_PAGING, it does not reside on
1782 * the vp's paging queues so we cannot call bdirty(). The
1783 * bp in this case is not an NFS cache block so we should
1786 * The logic below breaks up errors into recoverable and
1787 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1788 * and keep the buffer around for potential write retries.
1789 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1790 * and save the error in the nfsnode. This is less than ideal
1791 * but necessary. Keeping such buffers around could potentially
1792 * cause buffer exhaustion eventually (they can never be written
1793 * out, so will get constantly be re-dirtied). It also causes
1794 * all sorts of vfs panics. For non-recoverable write errors,
1795 * also invalidate the attrcache, so we'll be forced to go over
1796 * the wire for this object, returning an error to user on next
1797 * call (most of the time).
1799 if (error == EINTR || error == EIO || error == ETIMEDOUT
1800 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1804 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1805 if ((bp->b_flags & B_PAGING) == 0) {
1807 bp->b_flags &= ~B_DONE;
1809 if ((error == EINTR || error == ETIMEDOUT) &&
1810 (bp->b_flags & B_ASYNC) == 0)
1811 bp->b_flags |= B_EINTR;
1815 bp->b_ioflags |= BIO_ERROR;
1816 bp->b_flags |= B_INVAL;
1817 bp->b_error = np->n_error = error;
1818 mtx_lock(&np->n_mtx);
1819 np->n_flag |= NWRITEERR;
1820 np->n_attrstamp = 0;
1821 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1822 mtx_unlock(&np->n_mtx);
1824 bp->b_dirtyoff = bp->b_dirtyend = 0;
1832 bp->b_resid = uiop->uio_resid;
1834 ncl_clearcommit(vp->v_mount);
1840 * Used to aid in handling ftruncate() operations on the NFS client side.
1841 * Truncation creates a number of special problems for NFS. We have to
1842 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1843 * we have to properly handle VM pages or (potentially dirty) buffers
1844 * that straddle the truncation point.
1848 ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1850 struct nfsnode *np = VTONFS(vp);
1852 int biosize = vp->v_bufobj.bo_bsize;
1855 mtx_lock(&np->n_mtx);
1858 mtx_unlock(&np->n_mtx);
1860 if (nsize < tsize) {
1866 * vtruncbuf() doesn't get the buffer overlapping the
1867 * truncation point. We may have a B_DELWRI and/or B_CACHE
1868 * buffer that now needs to be truncated.
1870 error = vtruncbuf(vp, cred, nsize, biosize);
1871 lbn = nsize / biosize;
1872 bufsize = nsize - (lbn * biosize);
1873 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1876 if (bp->b_dirtyoff > bp->b_bcount)
1877 bp->b_dirtyoff = bp->b_bcount;
1878 if (bp->b_dirtyend > bp->b_bcount)
1879 bp->b_dirtyend = bp->b_bcount;
1880 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1883 vnode_pager_setsize(vp, nsize);