2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * vnode op calls for Sun NFS version 2 and 3
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/systm.h>
47 #include <sys/resourcevar.h>
49 #include <sys/mount.h>
52 #include <sys/malloc.h>
54 #include <sys/namei.h>
55 #include <sys/socket.h>
56 #include <sys/vnode.h>
57 #include <sys/dirent.h>
58 #include <sys/fcntl.h>
59 #include <sys/lockf.h>
61 #include <sys/sysctl.h>
62 #include <sys/signalvar.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_extern.h>
67 #include <vm/vm_object.h>
69 #include <fs/fifofs/fifo.h>
71 #include <rpc/rpcclnt.h>
73 #include <nfs/rpcv2.h>
74 #include <nfs/nfsproto.h>
75 #include <nfsclient/nfs.h>
76 #include <nfsclient/nfsnode.h>
77 #include <nfsclient/nfsmount.h>
78 #include <nfsclient/nfs_lock.h>
79 #include <nfs/xdr_subs.h>
80 #include <nfsclient/nfsm_subs.h>
83 #include <netinet/in.h>
84 #include <netinet/in_var.h>
91 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
92 * calls are not in getblk() and brelse() so that they would not be necessary
96 #define vfs_busy_pages(bp, f)
99 static vop_read_t nfsfifo_read;
100 static vop_write_t nfsfifo_write;
101 static vop_close_t nfsfifo_close;
102 static int nfs_flush(struct vnode *, int, struct thread *,
104 static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
106 static vop_lookup_t nfs_lookup;
107 static vop_create_t nfs_create;
108 static vop_mknod_t nfs_mknod;
109 static vop_open_t nfs_open;
110 static vop_close_t nfs_close;
111 static vop_access_t nfs_access;
112 static vop_getattr_t nfs_getattr;
113 static vop_setattr_t nfs_setattr;
114 static vop_read_t nfs_read;
115 static vop_fsync_t nfs_fsync;
116 static vop_remove_t nfs_remove;
117 static vop_link_t nfs_link;
118 static vop_rename_t nfs_rename;
119 static vop_mkdir_t nfs_mkdir;
120 static vop_rmdir_t nfs_rmdir;
121 static vop_symlink_t nfs_symlink;
122 static vop_readdir_t nfs_readdir;
123 static vop_strategy_t nfs_strategy;
124 static int nfs_lookitup(struct vnode *, const char *, int,
125 struct ucred *, struct thread *, struct nfsnode **);
126 static int nfs_sillyrename(struct vnode *, struct vnode *,
127 struct componentname *);
128 static vop_access_t nfsspec_access;
129 static vop_readlink_t nfs_readlink;
130 static vop_print_t nfs_print;
131 static vop_advlock_t nfs_advlock;
132 static vop_advlockasync_t nfs_advlockasync;
135 * Global vfs data structures for nfs
137 struct vop_vector nfs_vnodeops = {
138 .vop_default = &default_vnodeops,
139 .vop_access = nfs_access,
140 .vop_advlock = nfs_advlock,
141 .vop_advlockasync = nfs_advlockasync,
142 .vop_close = nfs_close,
143 .vop_create = nfs_create,
144 .vop_fsync = nfs_fsync,
145 .vop_getattr = nfs_getattr,
146 .vop_getpages = nfs_getpages,
147 .vop_putpages = nfs_putpages,
148 .vop_inactive = nfs_inactive,
149 .vop_lease = VOP_NULL,
150 .vop_link = nfs_link,
151 .vop_lookup = nfs_lookup,
152 .vop_mkdir = nfs_mkdir,
153 .vop_mknod = nfs_mknod,
154 .vop_open = nfs_open,
155 .vop_print = nfs_print,
156 .vop_read = nfs_read,
157 .vop_readdir = nfs_readdir,
158 .vop_readlink = nfs_readlink,
159 .vop_reclaim = nfs_reclaim,
160 .vop_remove = nfs_remove,
161 .vop_rename = nfs_rename,
162 .vop_rmdir = nfs_rmdir,
163 .vop_setattr = nfs_setattr,
164 .vop_strategy = nfs_strategy,
165 .vop_symlink = nfs_symlink,
166 .vop_write = nfs_write,
169 struct vop_vector nfs_fifoops = {
170 .vop_default = &fifo_specops,
171 .vop_access = nfsspec_access,
172 .vop_close = nfsfifo_close,
173 .vop_fsync = nfs_fsync,
174 .vop_getattr = nfs_getattr,
175 .vop_inactive = nfs_inactive,
176 .vop_print = nfs_print,
177 .vop_read = nfsfifo_read,
178 .vop_reclaim = nfs_reclaim,
179 .vop_setattr = nfs_setattr,
180 .vop_write = nfsfifo_write,
183 static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
184 struct componentname *cnp, struct vattr *vap);
185 static int nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
186 struct ucred *cred, struct thread *td);
187 static int nfs_renamerpc(struct vnode *fdvp, const char *fnameptr,
188 int fnamelen, struct vnode *tdvp,
189 const char *tnameptr, int tnamelen,
190 struct ucred *cred, struct thread *td);
191 static int nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
192 struct sillyrename *sp);
197 struct mtx nfs_iod_mtx;
198 struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
199 struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
200 int nfs_numasync = 0;
201 vop_advlock_t *nfs_advlock_p = nfs_dolock;
202 vop_reclaim_t *nfs_reclaim_p = NULL;
203 #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
205 SYSCTL_DECL(_vfs_nfs);
207 static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
208 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
209 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
211 static int nfsv3_commit_on_close = 0;
212 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
213 &nfsv3_commit_on_close, 0, "write+commit on close, else only write");
215 static int nfs_clean_pages_on_close = 1;
216 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
217 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
219 int nfs_directio_enable = 0;
220 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
221 &nfs_directio_enable, 0, "Enable NFS directio");
224 * This sysctl allows other processes to mmap a file that has been opened
225 * O_DIRECT by a process. In general, having processes mmap the file while
226 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow
227 * this by default to prevent DoS attacks - to prevent a malicious user from
228 * opening up files O_DIRECT preventing other users from mmap'ing these
229 * files. "Protected" environments where stricter consistency guarantees are
230 * required can disable this knob. The process that opened the file O_DIRECT
231 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
234 int nfs_directio_allow_mmap = 1;
235 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
236 &nfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
239 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
240 &nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
242 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
243 &nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
246 #define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \
247 | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \
248 | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP)
252 * The list of locks after the description of the lock is the ordering
253 * of other locks acquired with the lock held.
254 * np->n_mtx : Protects the fields in the nfsnode.
256 VI_MTX (acquired indirectly)
257 * nmp->nm_mtx : Protects the fields in the nfsmount.
259 * nfs_iod_mtx : Global lock, protects shared nfsiod state.
260 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
263 * rep->r_mtx : Protects the fields in an nfsreq.
267 nfs3_access_otw(struct vnode *vp, int wmode, struct thread *td,
272 int error = 0, attrflag;
274 struct mbuf *mreq, *mrep, *md, *mb;
277 struct nfsnode *np = VTONFS(vp);
279 nfsstats.rpccnt[NFSPROC_ACCESS]++;
280 mreq = nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
282 bpos = mtod(mb, caddr_t);
284 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
285 *tl = txdr_unsigned(wmode);
286 nfsm_request(vp, NFSPROC_ACCESS, td, cred);
287 nfsm_postop_attr(vp, attrflag);
289 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
290 rmode = fxdr_unsigned(u_int32_t, *tl);
291 mtx_lock(&np->n_mtx);
293 np->n_modeuid = cred->cr_uid;
294 np->n_modestamp = time_second;
295 mtx_unlock(&np->n_mtx);
303 * nfs access vnode op.
304 * For nfs version 2, just return ok. File accesses may fail later.
305 * For nfs version 3, use the access rpc to check accessibility. If file modes
306 * are changed on the server, accesses might still fail later.
309 nfs_access(struct vop_access_args *ap)
311 struct vnode *vp = ap->a_vp;
313 u_int32_t mode, wmode;
314 int v3 = NFS_ISV3(vp);
315 struct nfsnode *np = VTONFS(vp);
318 * Disallow write attempts on filesystems mounted read-only;
319 * unless the file is a socket, fifo, or a block or character
320 * device resident on the filesystem.
322 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
323 switch (vp->v_type) {
333 * For nfs v3, check to see if we have done this recently, and if
334 * so return our cached result instead of making an ACCESS call.
335 * If not, do an access rpc, otherwise you are stuck emulating
336 * ufs_access() locally using the vattr. This may not be correct,
337 * since the server may apply other access criteria such as
338 * client uid-->server uid mapping that we do not know about.
341 if (ap->a_mode & VREAD)
342 mode = NFSV3ACCESS_READ;
345 if (vp->v_type != VDIR) {
346 if (ap->a_mode & VWRITE)
347 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
348 if (ap->a_mode & VEXEC)
349 mode |= NFSV3ACCESS_EXECUTE;
351 if (ap->a_mode & VWRITE)
352 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
354 if (ap->a_mode & VEXEC)
355 mode |= NFSV3ACCESS_LOOKUP;
357 /* XXX safety belt, only make blanket request if caching */
358 if (nfsaccess_cache_timeout > 0) {
359 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
360 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
361 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
367 * Does our cached result allow us to give a definite yes to
370 mtx_lock(&np->n_mtx);
371 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
372 (ap->a_cred->cr_uid == np->n_modeuid) &&
373 ((np->n_mode & mode) == mode)) {
374 nfsstats.accesscache_hits++;
377 * Either a no, or a don't know. Go to the wire.
379 nfsstats.accesscache_misses++;
380 mtx_unlock(&np->n_mtx);
381 error = nfs3_access_otw(vp, wmode, ap->a_td,ap->a_cred);
382 mtx_lock(&np->n_mtx);
384 if ((np->n_mode & mode) != mode) {
389 mtx_unlock(&np->n_mtx);
392 if ((error = nfsspec_access(ap)) != 0) {
396 * Attempt to prevent a mapped root from accessing a file
397 * which it shouldn't. We try to read a byte from the file
398 * if the user is root and the file is not zero length.
399 * After calling nfsspec_access, we should have the correct
402 mtx_lock(&np->n_mtx);
403 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
404 && VTONFS(vp)->n_size > 0) {
409 mtx_unlock(&np->n_mtx);
412 auio.uio_iov = &aiov;
416 auio.uio_segflg = UIO_SYSSPACE;
417 auio.uio_rw = UIO_READ;
418 auio.uio_td = ap->a_td;
420 if (vp->v_type == VREG)
421 error = nfs_readrpc(vp, &auio, ap->a_cred);
422 else if (vp->v_type == VDIR) {
424 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
426 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
427 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
429 } else if (vp->v_type == VLNK)
430 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
434 mtx_unlock(&np->n_mtx);
439 int nfs_otw_getattr_avoid = 0;
443 * Check to see if the type is ok
444 * and that deletion is not in progress.
445 * For paged in text files, you will need to flush the page cache
446 * if consistency is lost.
450 nfs_open(struct vop_open_args *ap)
452 struct vnode *vp = ap->a_vp;
453 struct nfsnode *np = VTONFS(vp);
456 int fmode = ap->a_mode;
458 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
462 * Get a valid lease. If cached data is stale, flush it.
464 mtx_lock(&np->n_mtx);
465 if (np->n_flag & NMODIFIED) {
466 mtx_unlock(&np->n_mtx);
467 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
468 if (error == EINTR || error == EIO)
471 if (vp->v_type == VDIR)
472 np->n_direofoffset = 0;
473 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
476 mtx_lock(&np->n_mtx);
477 np->n_mtime = vattr.va_mtime;
478 mtx_unlock(&np->n_mtx);
480 struct thread *td = curthread;
482 if (np->n_ac_ts_syscalls != td->td_syscalls ||
483 np->n_ac_ts_tid != td->td_tid ||
484 td->td_proc == NULL ||
485 np->n_ac_ts_pid != td->td_proc->p_pid) {
488 mtx_unlock(&np->n_mtx);
489 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
492 mtx_lock(&np->n_mtx);
493 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
494 if (vp->v_type == VDIR)
495 np->n_direofoffset = 0;
496 mtx_unlock(&np->n_mtx);
497 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
498 if (error == EINTR || error == EIO) {
501 mtx_lock(&np->n_mtx);
502 np->n_mtime = vattr.va_mtime;
504 mtx_unlock(&np->n_mtx);
507 * If the object has >= 1 O_DIRECT active opens, we disable caching.
509 if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
510 if (np->n_directio_opens == 0) {
511 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
514 mtx_lock(&np->n_mtx);
515 np->n_flag |= NNONCACHE;
516 mtx_unlock(&np->n_mtx);
518 np->n_directio_opens++;
520 vnode_create_vobject(vp, vattr.va_size, ap->a_td);
526 * What an NFS client should do upon close after writing is a debatable issue.
527 * Most NFS clients push delayed writes to the server upon close, basically for
529 * 1 - So that any write errors may be reported back to the client process
530 * doing the close system call. By far the two most likely errors are
531 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
532 * 2 - To put a worst case upper bound on cache inconsistency between
533 * multiple clients for the file.
534 * There is also a consistency problem for Version 2 of the protocol w.r.t.
535 * not being able to tell if other clients are writing a file concurrently,
536 * since there is no way of knowing if the changed modify time in the reply
537 * is only due to the write for this client.
538 * (NFS Version 3 provides weak cache consistency data in the reply that
539 * should be sufficient to detect and handle this case.)
541 * The current code does the following:
542 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
543 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
544 * or commit them (this satisfies 1 and 2 except for the
545 * case where the server crashes after this close but
546 * before the commit RPC, which is felt to be "good
547 * enough". Changing the last argument to nfs_flush() to
548 * a 1 would force a commit operation, if it is felt a
549 * commit is necessary now.
553 nfs_close(struct vop_close_args *ap)
555 struct vnode *vp = ap->a_vp;
556 struct nfsnode *np = VTONFS(vp);
558 int fmode = ap->a_fflag;
560 if (vp->v_type == VREG) {
562 * Examine and clean dirty pages, regardless of NMODIFIED.
563 * This closes a major hole in close-to-open consistency.
564 * We want to push out all dirty pages (and buffers) on
565 * close, regardless of whether they were dirtied by
566 * mmap'ed writes or via write().
568 if (nfs_clean_pages_on_close && vp->v_object) {
569 VM_OBJECT_LOCK(vp->v_object);
570 vm_object_page_clean(vp->v_object, 0, 0, 0);
571 VM_OBJECT_UNLOCK(vp->v_object);
573 mtx_lock(&np->n_mtx);
574 if (np->n_flag & NMODIFIED) {
575 mtx_unlock(&np->n_mtx);
578 * Under NFSv3 we have dirty buffers to dispose of. We
579 * must flush them to the NFS server. We have the option
580 * of waiting all the way through the commit rpc or just
581 * waiting for the initial write. The default is to only
582 * wait through the initial write so the data is in the
583 * server's cache, which is roughly similar to the state
584 * a standard disk subsystem leaves the file in on close().
586 * We cannot clear the NMODIFIED bit in np->n_flag due to
587 * potential races with other processes, and certainly
588 * cannot clear it if we don't commit.
590 int cm = nfsv3_commit_on_close ? 1 : 0;
591 error = nfs_flush(vp, MNT_WAIT, ap->a_td, cm);
592 /* np->n_flag &= ~NMODIFIED; */
594 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
595 mtx_lock(&np->n_mtx);
598 * Invalidate the attribute cache in all cases.
599 * An open is going to fetch fresh attrs any way, other procs
600 * on this node that have file open will be forced to do an
601 * otw attr fetch, but this is safe.
604 if (np->n_flag & NWRITEERR) {
605 np->n_flag &= ~NWRITEERR;
608 mtx_unlock(&np->n_mtx);
610 if (nfs_directio_enable)
611 KASSERT((np->n_directio_asyncwr == 0),
612 ("nfs_close: dirty unflushed (%d) directio buffers\n",
613 np->n_directio_asyncwr));
614 if (nfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
615 mtx_lock(&np->n_mtx);
616 KASSERT((np->n_directio_opens > 0),
617 ("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
618 np->n_directio_opens--;
619 if (np->n_directio_opens == 0)
620 np->n_flag &= ~NNONCACHE;
621 mtx_unlock(&np->n_mtx);
627 * nfs getattr call from vfs.
630 nfs_getattr(struct vop_getattr_args *ap)
632 struct vnode *vp = ap->a_vp;
633 struct nfsnode *np = VTONFS(vp);
636 struct mbuf *mreq, *mrep, *md, *mb;
637 int v3 = NFS_ISV3(vp);
640 * Update local times for special files.
642 mtx_lock(&np->n_mtx);
643 if (np->n_flag & (NACC | NUPD))
645 mtx_unlock(&np->n_mtx);
647 * First look in the cache.
649 if (nfs_getattrcache(vp, ap->a_vap) == 0)
651 if (v3 && nfsaccess_cache_timeout > 0) {
652 nfsstats.accesscache_misses++;
653 nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_td, ap->a_cred);
654 if (nfs_getattrcache(vp, ap->a_vap) == 0)
657 nfsstats.rpccnt[NFSPROC_GETATTR]++;
658 mreq = nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
660 bpos = mtod(mb, caddr_t);
662 nfsm_request(vp, NFSPROC_GETATTR, ap->a_td, ap->a_cred);
664 nfsm_loadattr(vp, ap->a_vap);
675 nfs_setattr(struct vop_setattr_args *ap)
677 struct vnode *vp = ap->a_vp;
678 struct nfsnode *np = VTONFS(vp);
679 struct vattr *vap = ap->a_vap;
688 * Setting of flags and marking of atimes are not supported.
690 if (vap->va_flags != VNOVAL || (vap->va_vaflags & VA_MARK_ATIME))
694 * Disallow write attempts if the filesystem is mounted read-only.
696 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
697 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
698 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
699 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
703 if (vap->va_size != VNOVAL) {
704 switch (vp->v_type) {
711 if (vap->va_mtime.tv_sec == VNOVAL &&
712 vap->va_atime.tv_sec == VNOVAL &&
713 vap->va_mode == (mode_t)VNOVAL &&
714 vap->va_uid == (uid_t)VNOVAL &&
715 vap->va_gid == (gid_t)VNOVAL)
717 vap->va_size = VNOVAL;
721 * Disallow write attempts if the filesystem is
724 if (vp->v_mount->mnt_flag & MNT_RDONLY)
727 * We run vnode_pager_setsize() early (why?),
728 * we must set np->n_size now to avoid vinvalbuf
729 * V_SAVE races that might setsize a lower
732 mtx_lock(&np->n_mtx);
734 mtx_unlock(&np->n_mtx);
735 error = nfs_meta_setsize(vp, ap->a_cred,
736 ap->a_td, vap->va_size);
737 mtx_lock(&np->n_mtx);
738 if (np->n_flag & NMODIFIED) {
740 mtx_unlock(&np->n_mtx);
741 if (vap->va_size == 0)
742 error = nfs_vinvalbuf(vp, 0, ap->a_td, 1);
744 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
746 vnode_pager_setsize(vp, tsize);
750 mtx_unlock(&np->n_mtx);
752 * np->n_size has already been set to vap->va_size
753 * in nfs_meta_setsize(). We must set it again since
754 * nfs_loadattrcache() could be called through
755 * nfs_meta_setsize() and could modify np->n_size.
757 mtx_lock(&np->n_mtx);
758 np->n_vattr.va_size = np->n_size = vap->va_size;
759 mtx_unlock(&np->n_mtx);
762 mtx_lock(&np->n_mtx);
763 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) &&
764 (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
765 mtx_unlock(&np->n_mtx);
766 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_td, 1)) != 0 &&
767 (error == EINTR || error == EIO))
770 mtx_unlock(&np->n_mtx);
772 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_td);
773 if (error && vap->va_size != VNOVAL) {
774 mtx_lock(&np->n_mtx);
775 np->n_size = np->n_vattr.va_size = tsize;
776 vnode_pager_setsize(vp, tsize);
777 mtx_unlock(&np->n_mtx);
784 * Do an nfs setattr rpc.
787 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
790 struct nfsv2_sattr *sp;
791 struct nfsnode *np = VTONFS(vp);
794 int error = 0, wccflag = NFSV3_WCCRATTR;
795 struct mbuf *mreq, *mrep, *md, *mb;
796 int v3 = NFS_ISV3(vp);
798 nfsstats.rpccnt[NFSPROC_SETATTR]++;
799 mreq = nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
801 bpos = mtod(mb, caddr_t);
804 nfsm_v3attrbuild(vap, TRUE);
805 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
808 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
809 if (vap->va_mode == (mode_t)VNOVAL)
810 sp->sa_mode = nfs_xdrneg1;
812 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
813 if (vap->va_uid == (uid_t)VNOVAL)
814 sp->sa_uid = nfs_xdrneg1;
816 sp->sa_uid = txdr_unsigned(vap->va_uid);
817 if (vap->va_gid == (gid_t)VNOVAL)
818 sp->sa_gid = nfs_xdrneg1;
820 sp->sa_gid = txdr_unsigned(vap->va_gid);
821 sp->sa_size = txdr_unsigned(vap->va_size);
822 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
823 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
825 nfsm_request(vp, NFSPROC_SETATTR, td, cred);
828 nfsm_wcc_data(vp, wccflag);
830 nfsm_loadattr(vp, NULL);
837 * nfs lookup call, one step at a time...
838 * First look in cache
839 * If not found, unlock the directory nfsnode and do the rpc
842 nfs_lookup(struct vop_lookup_args *ap)
844 struct componentname *cnp = ap->a_cnp;
845 struct vnode *dvp = ap->a_dvp;
846 struct vnode **vpp = ap->a_vpp;
847 int flags = cnp->cn_flags;
849 struct nfsmount *nmp;
851 struct mbuf *mreq, *mrep, *md, *mb;
855 int error = 0, attrflag, fhsize;
856 int v3 = NFS_ISV3(dvp);
857 struct thread *td = cnp->cn_thread;
860 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
861 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
863 if (dvp->v_type != VDIR)
865 nmp = VFSTONFS(dvp->v_mount);
867 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) {
871 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
875 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, td)
876 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
877 nfsstats.lookupcache_hits++;
878 if (cnp->cn_nameiop != LOOKUP &&
880 cnp->cn_flags |= SAVENAME;
892 nfsstats.lookupcache_misses++;
893 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
894 len = cnp->cn_namelen;
895 mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
896 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
898 bpos = mtod(mb, caddr_t);
900 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
901 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_thread, cnp->cn_cred);
904 nfsm_postop_attr(dvp, attrflag);
909 nfsm_getfh(fhp, fhsize, v3);
912 * Handle RENAME case...
914 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
915 if (NFS_CMPFH(np, fhp, fhsize)) {
919 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np, LK_EXCLUSIVE);
926 nfsm_postop_attr(newvp, attrflag);
927 nfsm_postop_attr(dvp, attrflag);
929 nfsm_loadattr(newvp, NULL);
932 cnp->cn_flags |= SAVENAME;
936 if (flags & ISDOTDOT) {
938 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np, cnp->cn_lkflags);
939 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
943 } else if (NFS_CMPFH(np, fhp, fhsize)) {
947 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np, cnp->cn_lkflags);
955 nfsm_postop_attr(newvp, attrflag);
956 nfsm_postop_attr(dvp, attrflag);
958 nfsm_loadattr(newvp, NULL);
959 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
960 cnp->cn_flags |= SAVENAME;
961 if ((cnp->cn_flags & MAKEENTRY) &&
962 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
963 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
964 cache_enter(dvp, newvp, cnp);
970 if (newvp != NULLVP) {
974 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
975 (flags & ISLASTCN) && error == ENOENT) {
976 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
981 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
982 cnp->cn_flags |= SAVENAME;
989 * Just call nfs_bioread() to do the work.
992 nfs_read(struct vop_read_args *ap)
994 struct vnode *vp = ap->a_vp;
996 switch (vp->v_type) {
998 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1002 return (EOPNOTSUPP);
1010 nfs_readlink(struct vop_readlink_args *ap)
1012 struct vnode *vp = ap->a_vp;
1014 if (vp->v_type != VLNK)
1016 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
1020 * Do a readlink rpc.
1021 * Called by nfs_doio() from below the buffer cache.
1024 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1027 int error = 0, len, attrflag;
1028 struct mbuf *mreq, *mrep, *md, *mb;
1029 int v3 = NFS_ISV3(vp);
1031 nfsstats.rpccnt[NFSPROC_READLINK]++;
1032 mreq = nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1034 bpos = mtod(mb, caddr_t);
1036 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_td, cred);
1038 nfsm_postop_attr(vp, attrflag);
1040 nfsm_strsiz(len, NFS_MAXPATHLEN);
1041 if (len == NFS_MAXPATHLEN) {
1042 struct nfsnode *np = VTONFS(vp);
1043 mtx_lock(&np->n_mtx);
1044 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1046 mtx_unlock(&np->n_mtx);
1048 nfsm_mtouio(uiop, len);
1060 nfs_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1064 struct mbuf *mreq, *mrep, *md, *mb;
1065 struct nfsmount *nmp;
1066 int error = 0, len, retlen, tsiz, eof, attrflag;
1067 int v3 = NFS_ISV3(vp);
1073 nmp = VFSTONFS(vp->v_mount);
1074 tsiz = uiop->uio_resid;
1075 mtx_lock(&nmp->nm_mtx);
1076 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) {
1077 mtx_unlock(&nmp->nm_mtx);
1080 rsize = nmp->nm_rsize;
1081 mtx_unlock(&nmp->nm_mtx);
1083 nfsstats.rpccnt[NFSPROC_READ]++;
1084 len = (tsiz > rsize) ? rsize : tsiz;
1085 mreq = nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1087 bpos = mtod(mb, caddr_t);
1089 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED * 3);
1091 txdr_hyper(uiop->uio_offset, tl);
1092 *(tl + 2) = txdr_unsigned(len);
1094 *tl++ = txdr_unsigned(uiop->uio_offset);
1095 *tl++ = txdr_unsigned(len);
1098 nfsm_request(vp, NFSPROC_READ, uiop->uio_td, cred);
1100 nfsm_postop_attr(vp, attrflag);
1105 tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED);
1106 eof = fxdr_unsigned(int, *(tl + 1));
1108 nfsm_loadattr(vp, NULL);
1110 nfsm_strsiz(retlen, rsize);
1111 nfsm_mtouio(uiop, retlen);
1115 if (eof || retlen == 0) {
1118 } else if (retlen < len) {
1130 nfs_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
1131 int *iomode, int *must_commit)
1136 struct mbuf *mreq, *mrep, *md, *mb;
1137 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1138 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1139 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1143 if (uiop->uio_iovcnt != 1)
1144 panic("nfs: writerpc iovcnt > 1");
1147 tsiz = uiop->uio_resid;
1148 mtx_lock(&nmp->nm_mtx);
1149 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) {
1150 mtx_unlock(&nmp->nm_mtx);
1153 wsize = nmp->nm_wsize;
1154 mtx_unlock(&nmp->nm_mtx);
1156 nfsstats.rpccnt[NFSPROC_WRITE]++;
1157 len = (tsiz > wsize) ? wsize : tsiz;
1158 mreq = nfsm_reqhead(vp, NFSPROC_WRITE,
1159 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1161 bpos = mtod(mb, caddr_t);
1164 tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
1165 txdr_hyper(uiop->uio_offset, tl);
1167 *tl++ = txdr_unsigned(len);
1168 *tl++ = txdr_unsigned(*iomode);
1169 *tl = txdr_unsigned(len);
1173 tl = nfsm_build(u_int32_t *, 4 * NFSX_UNSIGNED);
1174 /* Set both "begin" and "current" to non-garbage. */
1175 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1176 *tl++ = x; /* "begin offset" */
1177 *tl++ = x; /* "current offset" */
1178 x = txdr_unsigned(len);
1179 *tl++ = x; /* total to this offset */
1180 *tl = x; /* size of this write */
1182 nfsm_uiotom(uiop, len);
1183 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_td, cred);
1185 wccflag = NFSV3_WCCCHK;
1186 nfsm_wcc_data(vp, wccflag);
1188 tl = nfsm_dissect(u_int32_t *, 2 * NFSX_UNSIGNED
1189 + NFSX_V3WRITEVERF);
1190 rlen = fxdr_unsigned(int, *tl++);
1195 } else if (rlen < len) {
1196 backup = len - rlen;
1197 uiop->uio_iov->iov_base =
1198 (char *)uiop->uio_iov->iov_base -
1200 uiop->uio_iov->iov_len += backup;
1201 uiop->uio_offset -= backup;
1202 uiop->uio_resid += backup;
1205 commit = fxdr_unsigned(int, *tl++);
1208 * Return the lowest committment level
1209 * obtained by any of the RPCs.
1211 if (committed == NFSV3WRITE_FILESYNC)
1213 else if (committed == NFSV3WRITE_DATASYNC &&
1214 commit == NFSV3WRITE_UNSTABLE)
1216 mtx_lock(&nmp->nm_mtx);
1217 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1218 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1220 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1221 } else if (bcmp((caddr_t)tl,
1222 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1224 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1227 mtx_unlock(&nmp->nm_mtx);
1230 nfsm_loadattr(vp, NULL);
1233 mtx_lock(&(VTONFS(vp))->n_mtx);
1234 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime;
1235 mtx_unlock(&(VTONFS(vp))->n_mtx);
1243 if (vp->v_mount->mnt_kern_flag & MNTK_ASYNC)
1244 committed = NFSV3WRITE_FILESYNC;
1245 *iomode = committed;
1247 uiop->uio_resid = tsiz;
1253 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1254 * mode set to specify the file type and the size field for rdev.
1257 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1260 struct nfsv2_sattr *sp;
1262 struct vnode *newvp = NULL;
1263 struct nfsnode *np = NULL;
1266 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1267 struct mbuf *mreq, *mrep, *md, *mb;
1269 int v3 = NFS_ISV3(dvp);
1271 if (vap->va_type == VCHR || vap->va_type == VBLK)
1272 rdev = txdr_unsigned(vap->va_rdev);
1273 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1276 return (EOPNOTSUPP);
1278 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1281 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1282 mreq = nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1283 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1285 bpos = mtod(mb, caddr_t);
1286 nfsm_fhtom(dvp, v3);
1287 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1289 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
1290 *tl++ = vtonfsv3_type(vap->va_type);
1291 nfsm_v3attrbuild(vap, FALSE);
1292 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1293 tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
1294 *tl++ = txdr_unsigned(umajor(vap->va_rdev));
1295 *tl = txdr_unsigned(uminor(vap->va_rdev));
1298 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1299 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1300 sp->sa_uid = nfs_xdrneg1;
1301 sp->sa_gid = nfs_xdrneg1;
1303 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1304 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1306 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_thread, cnp->cn_cred);
1308 nfsm_mtofh(dvp, newvp, v3, gotvp);
1314 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1315 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
1321 nfsm_wcc_data(dvp, wccflag);
1328 if (cnp->cn_flags & MAKEENTRY)
1329 cache_enter(dvp, newvp, cnp);
1332 mtx_lock(&(VTONFS(dvp))->n_mtx);
1333 VTONFS(dvp)->n_flag |= NMODIFIED;
1335 VTONFS(dvp)->n_attrstamp = 0;
1336 mtx_unlock(&(VTONFS(dvp))->n_mtx);
1342 * just call nfs_mknodrpc() to do the work.
1346 nfs_mknod(struct vop_mknod_args *ap)
1348 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
1351 static u_long create_verf;
1353 * nfs file create call
1356 nfs_create(struct vop_create_args *ap)
1358 struct vnode *dvp = ap->a_dvp;
1359 struct vattr *vap = ap->a_vap;
1360 struct componentname *cnp = ap->a_cnp;
1361 struct nfsv2_sattr *sp;
1363 struct nfsnode *np = NULL;
1364 struct vnode *newvp = NULL;
1366 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1367 struct mbuf *mreq, *mrep, *md, *mb;
1369 int v3 = NFS_ISV3(dvp);
1372 * Oops, not for me..
1374 if (vap->va_type == VSOCK)
1375 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1377 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1380 if (vap->va_vaflags & VA_EXCLUSIVE)
1383 nfsstats.rpccnt[NFSPROC_CREATE]++;
1384 mreq = nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1385 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1387 bpos = mtod(mb, caddr_t);
1388 nfsm_fhtom(dvp, v3);
1389 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1391 tl = nfsm_build(u_int32_t *, NFSX_UNSIGNED);
1392 if (fmode & O_EXCL) {
1393 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1394 tl = nfsm_build(u_int32_t *, NFSX_V3CREATEVERF);
1396 if (!TAILQ_EMPTY(&in_ifaddrhead))
1397 *tl++ = IA_SIN(TAILQ_FIRST(&in_ifaddrhead))->sin_addr.s_addr;
1400 *tl++ = create_verf;
1401 *tl = ++create_verf;
1403 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1404 nfsm_v3attrbuild(vap, FALSE);
1407 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1408 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1409 sp->sa_uid = nfs_xdrneg1;
1410 sp->sa_gid = nfs_xdrneg1;
1412 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1413 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1415 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_thread, cnp->cn_cred);
1417 nfsm_mtofh(dvp, newvp, v3, gotvp);
1423 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1424 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, &np);
1430 nfsm_wcc_data(dvp, wccflag);
1434 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1440 } else if (v3 && (fmode & O_EXCL)) {
1442 * We are normally called with only a partially initialized
1443 * VAP. Since the NFSv3 spec says that server may use the
1444 * file attributes to store the verifier, the spec requires
1445 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1446 * in atime, but we can't really assume that all servers will
1447 * so we ensure that our SETATTR sets both atime and mtime.
1449 if (vap->va_mtime.tv_sec == VNOVAL)
1450 vfs_timestamp(&vap->va_mtime);
1451 if (vap->va_atime.tv_sec == VNOVAL)
1452 vap->va_atime = vap->va_mtime;
1453 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_thread);
1458 if (cnp->cn_flags & MAKEENTRY)
1459 cache_enter(dvp, newvp, cnp);
1462 mtx_lock(&(VTONFS(dvp))->n_mtx);
1463 VTONFS(dvp)->n_flag |= NMODIFIED;
1465 VTONFS(dvp)->n_attrstamp = 0;
1466 mtx_unlock(&(VTONFS(dvp))->n_mtx);
1471 * nfs file remove call
1472 * To try and make nfs semantics closer to ufs semantics, a file that has
1473 * other processes using the vnode is renamed instead of removed and then
1474 * removed later on the last close.
1475 * - If v_usecount > 1
1476 * If a rename is not already in the works
1477 * call nfs_sillyrename() to set it up
1482 nfs_remove(struct vop_remove_args *ap)
1484 struct vnode *vp = ap->a_vp;
1485 struct vnode *dvp = ap->a_dvp;
1486 struct componentname *cnp = ap->a_cnp;
1487 struct nfsnode *np = VTONFS(vp);
1492 if ((cnp->cn_flags & HASBUF) == 0)
1493 panic("nfs_remove: no name");
1494 if (vrefcnt(vp) < 1)
1495 panic("nfs_remove: bad v_usecount");
1497 if (vp->v_type == VDIR)
1499 else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
1500 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_thread) == 0 &&
1501 vattr.va_nlink > 1)) {
1503 * Purge the name cache so that the chance of a lookup for
1504 * the name succeeding while the remove is in progress is
1505 * minimized. Without node locking it can still happen, such
1506 * that an I/O op returns ESTALE, but since you get this if
1507 * another host removes the file..
1511 * throw away biocache buffers, mainly to avoid
1512 * unnecessary delayed writes later.
1514 error = nfs_vinvalbuf(vp, 0, cnp->cn_thread, 1);
1516 if (error != EINTR && error != EIO)
1517 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1518 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
1520 * Kludge City: If the first reply to the remove rpc is lost..
1521 * the reply to the retransmitted request will be ENOENT
1522 * since the file was in fact removed
1523 * Therefore, we cheat and return success.
1525 if (error == ENOENT)
1527 } else if (!np->n_sillyrename)
1528 error = nfs_sillyrename(dvp, vp, cnp);
1529 np->n_attrstamp = 0;
1534 * nfs file remove rpc called from nfs_inactive
1537 nfs_removeit(struct sillyrename *sp)
1540 * Make sure that the directory vnode is still valid.
1541 * XXX we should lock sp->s_dvp here.
1543 if (sp->s_dvp->v_type == VBAD)
1545 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1550 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1553 nfs_removerpc(struct vnode *dvp, const char *name, int namelen,
1554 struct ucred *cred, struct thread *td)
1557 int error = 0, wccflag = NFSV3_WCCRATTR;
1558 struct mbuf *mreq, *mrep, *md, *mb;
1559 int v3 = NFS_ISV3(dvp);
1561 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1562 mreq = nfsm_reqhead(dvp, NFSPROC_REMOVE,
1563 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1565 bpos = mtod(mb, caddr_t);
1566 nfsm_fhtom(dvp, v3);
1567 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1568 nfsm_request(dvp, NFSPROC_REMOVE, td, cred);
1570 nfsm_wcc_data(dvp, wccflag);
1573 mtx_lock(&(VTONFS(dvp))->n_mtx);
1574 VTONFS(dvp)->n_flag |= NMODIFIED;
1576 VTONFS(dvp)->n_attrstamp = 0;
1577 mtx_unlock(&(VTONFS(dvp))->n_mtx);
1582 * nfs file rename call
1585 nfs_rename(struct vop_rename_args *ap)
1587 struct vnode *fvp = ap->a_fvp;
1588 struct vnode *tvp = ap->a_tvp;
1589 struct vnode *fdvp = ap->a_fdvp;
1590 struct vnode *tdvp = ap->a_tdvp;
1591 struct componentname *tcnp = ap->a_tcnp;
1592 struct componentname *fcnp = ap->a_fcnp;
1596 if ((tcnp->cn_flags & HASBUF) == 0 ||
1597 (fcnp->cn_flags & HASBUF) == 0)
1598 panic("nfs_rename: no name");
1600 /* Check for cross-device rename */
1601 if ((fvp->v_mount != tdvp->v_mount) ||
1602 (tvp && (fvp->v_mount != tvp->v_mount))) {
1608 nfs_printf("nfs_rename: fvp == tvp (can't happen)\n");
1612 if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
1616 * We have to flush B_DELWRI data prior to renaming
1617 * the file. If we don't, the delayed-write buffers
1618 * can be flushed out later after the file has gone stale
1619 * under NFSV3. NFSV2 does not have this problem because
1620 * ( as far as I can tell ) it flushes dirty buffers more
1623 * Skip the rename operation if the fsync fails, this can happen
1624 * due to the server's volume being full, when we pushed out data
1625 * that was written back to our cache earlier. Not checking for
1626 * this condition can result in potential (silent) data loss.
1628 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
1631 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
1636 * If the tvp exists and is in use, sillyrename it before doing the
1637 * rename of the new file over it.
1638 * XXX Can't sillyrename a directory.
1640 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
1641 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1646 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1647 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1650 if (fvp->v_type == VDIR) {
1651 if (tvp != NULL && tvp->v_type == VDIR)
1666 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1668 if (error == ENOENT)
1674 * nfs file rename rpc called from nfs_remove() above
1677 nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
1678 struct sillyrename *sp)
1681 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, sdvp,
1682 sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_thread));
1686 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1689 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen,
1690 struct vnode *tdvp, const char *tnameptr, int tnamelen, struct ucred *cred,
1694 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1695 struct mbuf *mreq, *mrep, *md, *mb;
1696 int v3 = NFS_ISV3(fdvp);
1698 nfsstats.rpccnt[NFSPROC_RENAME]++;
1699 mreq = nfsm_reqhead(fdvp, NFSPROC_RENAME,
1700 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1701 nfsm_rndup(tnamelen));
1703 bpos = mtod(mb, caddr_t);
1704 nfsm_fhtom(fdvp, v3);
1705 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1706 nfsm_fhtom(tdvp, v3);
1707 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1708 nfsm_request(fdvp, NFSPROC_RENAME, td, cred);
1710 nfsm_wcc_data(fdvp, fwccflag);
1711 nfsm_wcc_data(tdvp, twccflag);
1715 mtx_lock(&(VTONFS(fdvp))->n_mtx);
1716 VTONFS(fdvp)->n_flag |= NMODIFIED;
1717 mtx_unlock(&(VTONFS(fdvp))->n_mtx);
1718 mtx_lock(&(VTONFS(tdvp))->n_mtx);
1719 VTONFS(tdvp)->n_flag |= NMODIFIED;
1720 mtx_unlock(&(VTONFS(tdvp))->n_mtx);
1722 VTONFS(fdvp)->n_attrstamp = 0;
1724 VTONFS(tdvp)->n_attrstamp = 0;
1729 * nfs hard link create call
1732 nfs_link(struct vop_link_args *ap)
1734 struct vnode *vp = ap->a_vp;
1735 struct vnode *tdvp = ap->a_tdvp;
1736 struct componentname *cnp = ap->a_cnp;
1738 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1739 struct mbuf *mreq, *mrep, *md, *mb;
1742 if (vp->v_mount != tdvp->v_mount) {
1747 * Push all writes to the server, so that the attribute cache
1748 * doesn't get "out of sync" with the server.
1749 * XXX There should be a better way!
1751 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
1754 nfsstats.rpccnt[NFSPROC_LINK]++;
1755 mreq = nfsm_reqhead(vp, NFSPROC_LINK,
1756 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1758 bpos = mtod(mb, caddr_t);
1760 nfsm_fhtom(tdvp, v3);
1761 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1762 nfsm_request(vp, NFSPROC_LINK, cnp->cn_thread, cnp->cn_cred);
1764 nfsm_postop_attr(vp, attrflag);
1765 nfsm_wcc_data(tdvp, wccflag);
1769 mtx_lock(&(VTONFS(tdvp))->n_mtx);
1770 VTONFS(tdvp)->n_flag |= NMODIFIED;
1771 mtx_unlock(&(VTONFS(tdvp))->n_mtx);
1773 VTONFS(vp)->n_attrstamp = 0;
1775 VTONFS(tdvp)->n_attrstamp = 0;
1780 * nfs symbolic link create call
1783 nfs_symlink(struct vop_symlink_args *ap)
1785 struct vnode *dvp = ap->a_dvp;
1786 struct vattr *vap = ap->a_vap;
1787 struct componentname *cnp = ap->a_cnp;
1788 struct nfsv2_sattr *sp;
1790 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1791 struct mbuf *mreq, *mrep, *md, *mb;
1792 struct vnode *newvp = NULL;
1793 int v3 = NFS_ISV3(dvp);
1795 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1796 slen = strlen(ap->a_target);
1797 mreq = nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1798 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1800 bpos = mtod(mb, caddr_t);
1801 nfsm_fhtom(dvp, v3);
1802 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1804 nfsm_v3attrbuild(vap, FALSE);
1806 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1808 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1809 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1810 sp->sa_uid = nfs_xdrneg1;
1811 sp->sa_gid = nfs_xdrneg1;
1812 sp->sa_size = nfs_xdrneg1;
1813 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1814 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1818 * Issue the NFS request and get the rpc response.
1820 * Only NFSv3 responses returning an error of 0 actually return
1821 * a file handle that can be converted into newvp without having
1822 * to do an extra lookup rpc.
1824 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_thread, cnp->cn_cred);
1827 nfsm_mtofh(dvp, newvp, v3, gotvp);
1828 nfsm_wcc_data(dvp, wccflag);
1832 * out code jumps -> here, mrep is also freed.
1839 * If we do not have an error and we could not extract the newvp from
1840 * the response due to the request being NFSv2, we have to do a
1841 * lookup in order to obtain a newvp to return.
1843 if (error == 0 && newvp == NULL) {
1844 struct nfsnode *np = NULL;
1846 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1847 cnp->cn_cred, cnp->cn_thread, &np);
1857 mtx_lock(&(VTONFS(dvp))->n_mtx);
1858 VTONFS(dvp)->n_flag |= NMODIFIED;
1859 mtx_unlock(&(VTONFS(dvp))->n_mtx);
1861 VTONFS(dvp)->n_attrstamp = 0;
1869 nfs_mkdir(struct vop_mkdir_args *ap)
1871 struct vnode *dvp = ap->a_dvp;
1872 struct vattr *vap = ap->a_vap;
1873 struct componentname *cnp = ap->a_cnp;
1874 struct nfsv2_sattr *sp;
1876 struct nfsnode *np = NULL;
1877 struct vnode *newvp = NULL;
1879 int error = 0, wccflag = NFSV3_WCCRATTR;
1881 struct mbuf *mreq, *mrep, *md, *mb;
1883 int v3 = NFS_ISV3(dvp);
1885 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_thread)) != 0) {
1888 len = cnp->cn_namelen;
1889 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1890 mreq = nfsm_reqhead(dvp, NFSPROC_MKDIR,
1891 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1893 bpos = mtod(mb, caddr_t);
1894 nfsm_fhtom(dvp, v3);
1895 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1897 nfsm_v3attrbuild(vap, FALSE);
1899 sp = nfsm_build(struct nfsv2_sattr *, NFSX_V2SATTR);
1900 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1901 sp->sa_uid = nfs_xdrneg1;
1902 sp->sa_gid = nfs_xdrneg1;
1903 sp->sa_size = nfs_xdrneg1;
1904 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1905 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1907 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_thread, cnp->cn_cred);
1909 nfsm_mtofh(dvp, newvp, v3, gotvp);
1911 nfsm_wcc_data(dvp, wccflag);
1914 mtx_lock(&(VTONFS(dvp))->n_mtx);
1915 VTONFS(dvp)->n_flag |= NMODIFIED;
1916 mtx_unlock(&(VTONFS(dvp))->n_mtx);
1918 VTONFS(dvp)->n_attrstamp = 0;
1919 if (error == 0 && newvp == NULL) {
1920 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1921 cnp->cn_thread, &np);
1924 if (newvp->v_type != VDIR)
1937 * nfs remove directory call
1940 nfs_rmdir(struct vop_rmdir_args *ap)
1942 struct vnode *vp = ap->a_vp;
1943 struct vnode *dvp = ap->a_dvp;
1944 struct componentname *cnp = ap->a_cnp;
1946 int error = 0, wccflag = NFSV3_WCCRATTR;
1947 struct mbuf *mreq, *mrep, *md, *mb;
1948 int v3 = NFS_ISV3(dvp);
1952 nfsstats.rpccnt[NFSPROC_RMDIR]++;
1953 mreq = nfsm_reqhead(dvp, NFSPROC_RMDIR,
1954 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1956 bpos = mtod(mb, caddr_t);
1957 nfsm_fhtom(dvp, v3);
1958 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1959 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_thread, cnp->cn_cred);
1961 nfsm_wcc_data(dvp, wccflag);
1964 mtx_lock(&(VTONFS(dvp))->n_mtx);
1965 VTONFS(dvp)->n_flag |= NMODIFIED;
1966 mtx_unlock(&(VTONFS(dvp))->n_mtx);
1968 VTONFS(dvp)->n_attrstamp = 0;
1972 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
1974 if (error == ENOENT)
1983 nfs_readdir(struct vop_readdir_args *ap)
1985 struct vnode *vp = ap->a_vp;
1986 struct nfsnode *np = VTONFS(vp);
1987 struct uio *uio = ap->a_uio;
1988 int tresid, error = 0;
1991 if (vp->v_type != VDIR)
1995 * First, check for hit on the EOF offset cache
1997 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
1998 (np->n_flag & NMODIFIED) == 0) {
1999 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_td) == 0) {
2000 mtx_lock(&np->n_mtx);
2001 if (!NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
2002 mtx_unlock(&np->n_mtx);
2003 nfsstats.direofcache_hits++;
2006 mtx_unlock(&np->n_mtx);
2011 * Call nfs_bioread() to do the real work.
2013 tresid = uio->uio_resid;
2014 error = nfs_bioread(vp, uio, 0, ap->a_cred);
2016 if (!error && uio->uio_resid == tresid) {
2017 nfsstats.direofcache_misses++;
2025 * Called from below the buffer cache by nfs_doio().
2028 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
2031 struct dirent *dp = NULL;
2036 struct mbuf *mreq, *mrep, *md, *mb;
2038 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2039 struct nfsnode *dnp = VTONFS(vp);
2041 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2043 int v3 = NFS_ISV3(vp);
2046 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2047 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2048 panic("nfs readdirrpc bad uio");
2052 * If there is no cookie, assume directory was stale.
2054 nfs_dircookie_lock(dnp);
2055 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2058 nfs_dircookie_unlock(dnp);
2060 nfs_dircookie_unlock(dnp);
2061 return (NFSERR_BAD_COOKIE);
2065 * Loop around doing readdir rpc's of size nm_readdirsize
2066 * truncated to a multiple of DIRBLKSIZ.
2067 * The stopping criteria is EOF or buffer full.
2069 while (more_dirs && bigenough) {
2070 nfsstats.rpccnt[NFSPROC_READDIR]++;
2071 mreq = nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2074 bpos = mtod(mb, caddr_t);
2077 tl = nfsm_build(u_int32_t *, 5 * NFSX_UNSIGNED);
2078 *tl++ = cookie.nfsuquad[0];
2079 *tl++ = cookie.nfsuquad[1];
2080 mtx_lock(&dnp->n_mtx);
2081 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2082 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2083 mtx_unlock(&dnp->n_mtx);
2085 tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
2086 *tl++ = cookie.nfsuquad[0];
2088 *tl = txdr_unsigned(nmp->nm_readdirsize);
2089 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_td, cred);
2091 nfsm_postop_attr(vp, attrflag);
2093 tl = nfsm_dissect(u_int32_t *,
2095 mtx_lock(&dnp->n_mtx);
2096 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2097 dnp->n_cookieverf.nfsuquad[1] = *tl;
2098 mtx_unlock(&dnp->n_mtx);
2104 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2105 more_dirs = fxdr_unsigned(int, *tl);
2107 /* loop thru the dir entries, doctoring them to 4bsd form */
2108 while (more_dirs && bigenough) {
2110 tl = nfsm_dissect(u_int32_t *,
2112 fileno = fxdr_hyper(tl);
2113 len = fxdr_unsigned(int, *(tl + 2));
2115 tl = nfsm_dissect(u_int32_t *,
2117 fileno = fxdr_unsigned(u_quad_t, *tl++);
2118 len = fxdr_unsigned(int, *tl);
2120 if (len <= 0 || len > NFS_MAXNAMLEN) {
2125 tlen = nfsm_rndup(len);
2127 tlen += 4; /* To ensure null termination */
2128 left = DIRBLKSIZ - blksiz;
2129 if ((tlen + DIRHDSIZ) > left) {
2130 dp->d_reclen += left;
2131 uiop->uio_iov->iov_base =
2132 (char *)uiop->uio_iov->iov_base + left;
2133 uiop->uio_iov->iov_len -= left;
2134 uiop->uio_offset += left;
2135 uiop->uio_resid -= left;
2138 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2141 dp = (struct dirent *)uiop->uio_iov->iov_base;
2142 dp->d_fileno = (int)fileno;
2144 dp->d_reclen = tlen + DIRHDSIZ;
2145 dp->d_type = DT_UNKNOWN;
2146 blksiz += dp->d_reclen;
2147 if (blksiz == DIRBLKSIZ)
2149 uiop->uio_offset += DIRHDSIZ;
2150 uiop->uio_resid -= DIRHDSIZ;
2151 uiop->uio_iov->iov_base =
2152 (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
2153 uiop->uio_iov->iov_len -= DIRHDSIZ;
2154 nfsm_mtouio(uiop, len);
2155 cp = uiop->uio_iov->iov_base;
2157 *cp = '\0'; /* null terminate */
2158 uiop->uio_iov->iov_base =
2159 (char *)uiop->uio_iov->iov_base + tlen;
2160 uiop->uio_iov->iov_len -= tlen;
2161 uiop->uio_offset += tlen;
2162 uiop->uio_resid -= tlen;
2164 nfsm_adv(nfsm_rndup(len));
2166 tl = nfsm_dissect(u_int32_t *,
2169 tl = nfsm_dissect(u_int32_t *,
2173 cookie.nfsuquad[0] = *tl++;
2175 cookie.nfsuquad[1] = *tl++;
2180 more_dirs = fxdr_unsigned(int, *tl);
2183 * If at end of rpc data, get the eof boolean
2186 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2187 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2192 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2193 * by increasing d_reclen for the last record.
2196 left = DIRBLKSIZ - blksiz;
2197 dp->d_reclen += left;
2198 uiop->uio_iov->iov_base =
2199 (char *)uiop->uio_iov->iov_base + left;
2200 uiop->uio_iov->iov_len -= left;
2201 uiop->uio_offset += left;
2202 uiop->uio_resid -= left;
2206 * We are now either at the end of the directory or have filled the
2210 dnp->n_direofoffset = uiop->uio_offset;
2212 if (uiop->uio_resid > 0)
2213 nfs_printf("EEK! readdirrpc resid > 0\n");
2214 nfs_dircookie_lock(dnp);
2215 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2217 nfs_dircookie_unlock(dnp);
2224 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2227 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
2233 struct vnode *newvp;
2235 caddr_t bpos, dpos, dpossav1, dpossav2;
2236 struct mbuf *mreq, *mrep, *md, *mb, *mdsav1, *mdsav2;
2237 struct nameidata nami, *ndp = &nami;
2238 struct componentname *cnp = &ndp->ni_cnd;
2240 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2241 struct nfsnode *dnp = VTONFS(vp), *np;
2244 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2245 int attrflag, fhsize;
2251 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2252 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2253 panic("nfs readdirplusrpc bad uio");
2259 * If there is no cookie, assume directory was stale.
2261 nfs_dircookie_lock(dnp);
2262 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2265 nfs_dircookie_unlock(dnp);
2267 nfs_dircookie_unlock(dnp);
2268 return (NFSERR_BAD_COOKIE);
2271 * Loop around doing readdir rpc's of size nm_readdirsize
2272 * truncated to a multiple of DIRBLKSIZ.
2273 * The stopping criteria is EOF or buffer full.
2275 while (more_dirs && bigenough) {
2276 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2277 mreq = nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2278 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2280 bpos = mtod(mb, caddr_t);
2282 tl = nfsm_build(u_int32_t *, 6 * NFSX_UNSIGNED);
2283 *tl++ = cookie.nfsuquad[0];
2284 *tl++ = cookie.nfsuquad[1];
2285 mtx_lock(&dnp->n_mtx);
2286 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2287 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2288 mtx_unlock(&dnp->n_mtx);
2289 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2290 *tl = txdr_unsigned(nmp->nm_rsize);
2291 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_td, cred);
2292 nfsm_postop_attr(vp, attrflag);
2297 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
2298 mtx_lock(&dnp->n_mtx);
2299 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2300 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2301 mtx_unlock(&dnp->n_mtx);
2302 more_dirs = fxdr_unsigned(int, *tl);
2304 /* loop thru the dir entries, doctoring them to 4bsd form */
2305 while (more_dirs && bigenough) {
2306 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
2307 fileno = fxdr_hyper(tl);
2308 len = fxdr_unsigned(int, *(tl + 2));
2309 if (len <= 0 || len > NFS_MAXNAMLEN) {
2314 tlen = nfsm_rndup(len);
2316 tlen += 4; /* To ensure null termination*/
2317 left = DIRBLKSIZ - blksiz;
2318 if ((tlen + DIRHDSIZ) > left) {
2319 dp->d_reclen += left;
2320 uiop->uio_iov->iov_base =
2321 (char *)uiop->uio_iov->iov_base + left;
2322 uiop->uio_iov->iov_len -= left;
2323 uiop->uio_offset += left;
2324 uiop->uio_resid -= left;
2327 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2330 dp = (struct dirent *)uiop->uio_iov->iov_base;
2331 dp->d_fileno = (int)fileno;
2333 dp->d_reclen = tlen + DIRHDSIZ;
2334 dp->d_type = DT_UNKNOWN;
2335 blksiz += dp->d_reclen;
2336 if (blksiz == DIRBLKSIZ)
2338 uiop->uio_offset += DIRHDSIZ;
2339 uiop->uio_resid -= DIRHDSIZ;
2340 uiop->uio_iov->iov_base =
2341 (char *)uiop->uio_iov->iov_base + DIRHDSIZ;
2342 uiop->uio_iov->iov_len -= DIRHDSIZ;
2343 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2344 cnp->cn_namelen = len;
2345 nfsm_mtouio(uiop, len);
2346 cp = uiop->uio_iov->iov_base;
2349 uiop->uio_iov->iov_base =
2350 (char *)uiop->uio_iov->iov_base + tlen;
2351 uiop->uio_iov->iov_len -= tlen;
2352 uiop->uio_offset += tlen;
2353 uiop->uio_resid -= tlen;
2355 nfsm_adv(nfsm_rndup(len));
2356 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
2358 cookie.nfsuquad[0] = *tl++;
2359 cookie.nfsuquad[1] = *tl++;
2364 * Since the attributes are before the file handle
2365 * (sigh), we must skip over the attributes and then
2366 * come back and get them.
2368 attrflag = fxdr_unsigned(int, *tl);
2372 nfsm_adv(NFSX_V3FATTR);
2373 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2374 doit = fxdr_unsigned(int, *tl);
2376 * Skip loading the attrs for "..". There's a
2377 * race between loading the attrs here and
2378 * lookups that look for the directory currently
2379 * being read (in the parent). We try to acquire
2380 * the exclusive lock on ".." here, owning the
2381 * lock on the directory being read. Lookup will
2382 * hold the lock on ".." and try to acquire the
2383 * lock on the directory being read.
2385 * There are other ways of fixing this, one would
2386 * be to do a trylock on the ".." vnode and skip
2387 * loading the attrs on ".." if it happens to be
2388 * locked by another process. But skipping the
2389 * attrload on ".." seems the easiest option.
2391 if (strcmp(dp->d_name, "..") == 0) {
2394 * We've already skipped over the attrs,
2395 * skip over the filehandle. And store d_type
2398 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2399 i = fxdr_unsigned(int, *tl);
2400 nfsm_adv(nfsm_rndup(i));
2401 dp->d_type = IFTODT(VTTOIF(VDIR));
2404 nfsm_getfh(fhp, fhsize, 1);
2405 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2410 error = nfs_nget(vp->v_mount, fhp,
2411 fhsize, &np, LK_EXCLUSIVE);
2418 if (doit && bigenough) {
2423 nfsm_loadattr(newvp, NULL);
2427 IFTODT(VTTOIF(np->n_vattr.va_type));
2429 /* Update n_ctime, so subsequent lookup doesn't purge entry */
2430 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
2431 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2434 /* Just skip over the file handle */
2435 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2436 i = fxdr_unsigned(int, *tl);
2438 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2439 fhsize = fxdr_unsigned(int, *tl);
2440 nfsm_adv(nfsm_rndup(fhsize));
2443 if (newvp != NULLVP) {
2450 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2451 more_dirs = fxdr_unsigned(int, *tl);
2454 * If at end of rpc data, get the eof boolean
2457 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
2458 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2463 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2464 * by increasing d_reclen for the last record.
2467 left = DIRBLKSIZ - blksiz;
2468 dp->d_reclen += left;
2469 uiop->uio_iov->iov_base =
2470 (char *)uiop->uio_iov->iov_base + left;
2471 uiop->uio_iov->iov_len -= left;
2472 uiop->uio_offset += left;
2473 uiop->uio_resid -= left;
2477 * We are now either at the end of the directory or have filled the
2481 dnp->n_direofoffset = uiop->uio_offset;
2483 if (uiop->uio_resid > 0)
2484 nfs_printf("EEK! readdirplusrpc resid > 0\n");
2485 nfs_dircookie_lock(dnp);
2486 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2488 nfs_dircookie_unlock(dnp);
2491 if (newvp != NULLVP) {
2502 * Silly rename. To make the NFS filesystem that is stateless look a little
2503 * more like the "ufs" a remove of an active vnode is translated to a rename
2504 * to a funny looking filename that is removed by nfs_inactive on the
2505 * nfsnode. There is the potential for another process on a different client
2506 * to create the same funny name between the nfs_lookitup() fails and the
2507 * nfs_rename() completes, but...
2510 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2512 struct sillyrename *sp;
2516 unsigned int lticks;
2521 if (vp->v_type == VDIR)
2522 panic("nfs: sillyrename dir");
2524 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2525 M_NFSREQ, M_WAITOK);
2526 sp->s_cred = crhold(cnp->cn_cred);
2528 sp->s_removeit = nfs_removeit;
2532 * Fudge together a funny name.
2533 * Changing the format of the funny name to accomodate more
2534 * sillynames per directory.
2535 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is
2536 * CPU ticks since boot.
2538 pid = cnp->cn_thread->td_proc->p_pid;
2539 lticks = (unsigned int)ticks;
2541 sp->s_namlen = sprintf(sp->s_name,
2542 ".nfs.%08x.%04x4.4", lticks,
2544 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2545 cnp->cn_thread, NULL))
2549 error = nfs_renameit(dvp, cnp, sp);
2552 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2553 cnp->cn_thread, &np);
2554 np->n_sillyrename = sp;
2559 free((caddr_t)sp, M_NFSREQ);
2564 * Look up a file name and optionally either update the file handle or
2565 * allocate an nfsnode, depending on the value of npp.
2566 * npp == NULL --> just do the lookup
2567 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2569 * *npp != NULL --> update the file handle in the vnode
2572 nfs_lookitup(struct vnode *dvp, const char *name, int len, struct ucred *cred,
2573 struct thread *td, struct nfsnode **npp)
2575 struct vnode *newvp = NULL;
2576 struct nfsnode *np, *dnp = VTONFS(dvp);
2578 int error = 0, fhlen, attrflag;
2579 struct mbuf *mreq, *mrep, *md, *mb;
2581 int v3 = NFS_ISV3(dvp);
2583 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2584 mreq = nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2585 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2587 bpos = mtod(mb, caddr_t);
2588 nfsm_fhtom(dvp, v3);
2589 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2590 nfsm_request(dvp, NFSPROC_LOOKUP, td, cred);
2591 if (npp && !error) {
2592 nfsm_getfh(nfhp, fhlen, v3);
2595 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2596 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2597 np->n_fhp = &np->n_fh;
2598 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2599 np->n_fhp =(nfsfh_t *)malloc(fhlen, M_NFSBIGFH, M_WAITOK);
2600 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2601 np->n_fhsize = fhlen;
2603 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2607 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np, LK_EXCLUSIVE);
2615 nfsm_postop_attr(newvp, attrflag);
2616 if (!attrflag && *npp == NULL) {
2625 nfsm_loadattr(newvp, NULL);
2629 if (npp && *npp == NULL) {
2644 * Nfs Version 3 commit rpc
2647 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
2651 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2653 int error = 0, wccflag = NFSV3_WCCRATTR;
2654 struct mbuf *mreq, *mrep, *md, *mb;
2656 mtx_lock(&nmp->nm_mtx);
2657 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
2658 mtx_unlock(&nmp->nm_mtx);
2661 mtx_unlock(&nmp->nm_mtx);
2662 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2663 mreq = nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2665 bpos = mtod(mb, caddr_t);
2667 tl = nfsm_build(u_int32_t *, 3 * NFSX_UNSIGNED);
2668 txdr_hyper(offset, tl);
2670 *tl = txdr_unsigned(cnt);
2671 nfsm_request(vp, NFSPROC_COMMIT, td, cred);
2672 nfsm_wcc_data(vp, wccflag);
2674 tl = nfsm_dissect(u_int32_t *, NFSX_V3WRITEVERF);
2675 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2676 NFSX_V3WRITEVERF)) {
2677 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2679 error = NFSERR_STALEWRITEVERF;
2689 * For async requests when nfsiod(s) are running, queue the request by
2690 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2694 nfs_strategy(struct vop_strategy_args *ap)
2696 struct buf *bp = ap->a_bp;
2699 KASSERT(!(bp->b_flags & B_DONE),
2700 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2701 BUF_ASSERT_HELD(bp);
2703 if (bp->b_iocmd == BIO_READ)
2709 * If the op is asynchronous and an i/o daemon is waiting
2710 * queue the request, wake it up and wait for completion
2711 * otherwise just do it ourselves.
2713 if ((bp->b_flags & B_ASYNC) == 0 ||
2714 nfs_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread))
2715 (void)nfs_doio(ap->a_vp, bp, cr, curthread);
2720 * fsync vnode op. Just call nfs_flush() with commit == 1.
2724 nfs_fsync(struct vop_fsync_args *ap)
2726 return (nfs_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1));
2730 * Flush all the blocks associated with a vnode.
2731 * Walk through the buffer pool and push any dirty pages
2732 * associated with the vnode.
2735 nfs_flush(struct vnode *vp, int waitfor, struct thread *td,
2738 struct nfsnode *np = VTONFS(vp);
2742 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2743 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2745 u_quad_t off, endoff, toff;
2746 struct ucred* wcred = NULL;
2747 struct buf **bvec = NULL;
2749 #ifndef NFS_COMMITBVECSIZ
2750 #define NFS_COMMITBVECSIZ 20
2752 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2753 int bvecsize = 0, bveccount;
2755 if (nmp->nm_flag & NFSMNT_INT)
2761 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2762 * server, but has not been committed to stable storage on the server
2763 * yet. On the first pass, the byte range is worked out and the commit
2764 * rpc is done. On the second pass, nfs_writebp() is called to do the
2771 if (NFS_ISV3(vp) && commit) {
2772 if (bvec != NULL && bvec != bvec_on_stack)
2775 * Count up how many buffers waiting for a commit.
2779 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2780 if (!BUF_ISLOCKED(bp) &&
2781 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2782 == (B_DELWRI | B_NEEDCOMMIT))
2786 * Allocate space to remember the list of bufs to commit. It is
2787 * important to use M_NOWAIT here to avoid a race with nfs_write.
2788 * If we can't get memory (for whatever reason), we will end up
2789 * committing the buffers one-by-one in the loop below.
2791 if (bveccount > NFS_COMMITBVECSIZ) {
2793 * Release the vnode interlock to avoid a lock
2797 bvec = (struct buf **)
2798 malloc(bveccount * sizeof(struct buf *),
2802 bvec = bvec_on_stack;
2803 bvecsize = NFS_COMMITBVECSIZ;
2805 bvecsize = bveccount;
2807 bvec = bvec_on_stack;
2808 bvecsize = NFS_COMMITBVECSIZ;
2810 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2811 if (bvecpos >= bvecsize)
2813 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2814 nbp = TAILQ_NEXT(bp, b_bobufs);
2817 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2818 (B_DELWRI | B_NEEDCOMMIT)) {
2820 nbp = TAILQ_NEXT(bp, b_bobufs);
2826 * Work out if all buffers are using the same cred
2827 * so we can deal with them all with one commit.
2829 * NOTE: we are not clearing B_DONE here, so we have
2830 * to do it later on in this routine if we intend to
2831 * initiate I/O on the bp.
2833 * Note: to avoid loopback deadlocks, we do not
2834 * assign b_runningbufspace.
2837 wcred = bp->b_wcred;
2838 else if (wcred != bp->b_wcred)
2840 vfs_busy_pages(bp, 1);
2844 * bp is protected by being locked, but nbp is not
2845 * and vfs_busy_pages() may sleep. We have to
2848 nbp = TAILQ_NEXT(bp, b_bobufs);
2851 * A list of these buffers is kept so that the
2852 * second loop knows which buffers have actually
2853 * been committed. This is necessary, since there
2854 * may be a race between the commit rpc and new
2855 * uncommitted writes on the file.
2857 bvec[bvecpos++] = bp;
2858 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2862 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2870 * Commit data on the server, as required.
2871 * If all bufs are using the same wcred, then use that with
2872 * one call for all of them, otherwise commit each one
2875 if (wcred != NOCRED)
2876 retv = nfs_commit(vp, off, (int)(endoff - off),
2880 for (i = 0; i < bvecpos; i++) {
2883 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2885 size = (u_quad_t)(bp->b_dirtyend
2887 retv = nfs_commit(vp, off, (int)size,
2893 if (retv == NFSERR_STALEWRITEVERF)
2894 nfs_clearcommit(vp->v_mount);
2897 * Now, either mark the blocks I/O done or mark the
2898 * blocks dirty, depending on whether the commit
2901 for (i = 0; i < bvecpos; i++) {
2903 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
2906 * Error, leave B_DELWRI intact
2908 vfs_unbusy_pages(bp);
2912 * Success, remove B_DELWRI ( bundirty() ).
2914 * b_dirtyoff/b_dirtyend seem to be NFS
2915 * specific. We should probably move that
2916 * into bundirty(). XXX
2919 bp->b_flags |= B_ASYNC;
2921 bp->b_flags &= ~B_DONE;
2922 bp->b_ioflags &= ~BIO_ERROR;
2923 bp->b_dirtyoff = bp->b_dirtyend = 0;
2930 * Start/do any write(s) that are required.
2934 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2935 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2936 if (waitfor != MNT_WAIT || passone)
2939 error = BUF_TIMELOCK(bp,
2940 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2941 BO_MTX(bo), "nfsfsync", slpflag, slptimeo);
2946 if (error == ENOLCK) {
2950 if (nfs_sigintr(nmp, NULL, td)) {
2954 if (slpflag == PCATCH) {
2960 if ((bp->b_flags & B_DELWRI) == 0)
2961 panic("nfs_fsync: not dirty");
2962 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
2968 if (passone || !commit)
2969 bp->b_flags |= B_ASYNC;
2971 bp->b_flags |= B_ASYNC;
2973 if (nfs_sigintr(nmp, NULL, td)) {
2984 if (waitfor == MNT_WAIT) {
2985 while (bo->bo_numoutput) {
2986 error = bufobj_wwait(bo, slpflag, slptimeo);
2989 error = nfs_sigintr(nmp, NULL, td);
2992 if (slpflag == PCATCH) {
2999 if (bo->bo_dirty.bv_cnt != 0 && commit) {
3004 * Wait for all the async IO requests to drain
3007 mtx_lock(&np->n_mtx);
3008 while (np->n_directio_asyncwr > 0) {
3009 np->n_flag |= NFSYNCWAIT;
3010 error = nfs_msleep(td, (caddr_t)&np->n_directio_asyncwr,
3011 &np->n_mtx, slpflag | (PRIBIO + 1),
3014 if (nfs_sigintr(nmp, (struct nfsreq *)0, td)) {
3015 mtx_unlock(&np->n_mtx);
3021 mtx_unlock(&np->n_mtx);
3024 mtx_lock(&np->n_mtx);
3025 if (np->n_flag & NWRITEERR) {
3026 error = np->n_error;
3027 np->n_flag &= ~NWRITEERR;
3029 if (commit && bo->bo_dirty.bv_cnt == 0 &&
3030 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
3031 np->n_flag &= ~NMODIFIED;
3032 mtx_unlock(&np->n_mtx);
3034 if (bvec != NULL && bvec != bvec_on_stack)
3040 * NFS advisory byte-level locks.
3043 nfs_advlock(struct vop_advlock_args *ap)
3045 struct vnode *vp = ap->a_vp;
3049 error = vn_lock(vp, LK_SHARED);
3052 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
3053 size = VTONFS(vp)->n_size;
3055 error = lf_advlock(ap, &(vp->v_lockf), size);
3058 error = nfs_advlock_p(ap);
3067 * NFS advisory byte-level locks.
3070 nfs_advlockasync(struct vop_advlockasync_args *ap)
3072 struct vnode *vp = ap->a_vp;
3076 error = vn_lock(vp, LK_SHARED);
3079 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
3080 size = VTONFS(vp)->n_size;
3082 error = lf_advlockasync(ap, &(vp->v_lockf), size);
3091 * Print out the contents of an nfsnode.
3094 nfs_print(struct vop_print_args *ap)
3096 struct vnode *vp = ap->a_vp;
3097 struct nfsnode *np = VTONFS(vp);
3099 nfs_printf("\tfileid %ld fsid 0x%x",
3100 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3101 if (vp->v_type == VFIFO)
3108 * This is the "real" nfs::bwrite(struct buf*).
3109 * We set B_CACHE if this is a VMIO buffer.
3112 nfs_writebp(struct buf *bp, int force __unused, struct thread *td)
3115 int oldflags = bp->b_flags;
3121 BUF_ASSERT_HELD(bp);
3123 if (bp->b_flags & B_INVAL) {
3128 bp->b_flags |= B_CACHE;
3131 * Undirty the bp. We will redirty it later if the I/O fails.
3136 bp->b_flags &= ~B_DONE;
3137 bp->b_ioflags &= ~BIO_ERROR;
3138 bp->b_iocmd = BIO_WRITE;
3140 bufobj_wref(bp->b_bufobj);
3141 curthread->td_ru.ru_oublock++;
3145 * Note: to avoid loopback deadlocks, we do not
3146 * assign b_runningbufspace.
3148 vfs_busy_pages(bp, 1);
3151 bp->b_iooffset = dbtob(bp->b_blkno);
3154 if( (oldflags & B_ASYNC) == 0) {
3155 int rtval = bufwait(bp);
3157 if (oldflags & B_DELWRI) {
3170 * nfs special file access vnode op.
3171 * Essentially just get vattr and then imitate iaccess() since the device is
3172 * local to the client.
3175 nfsspec_access(struct vop_access_args *ap)
3178 struct ucred *cred = ap->a_cred;
3179 struct vnode *vp = ap->a_vp;
3180 mode_t mode = ap->a_mode;
3185 * Disallow write attempts on filesystems mounted read-only;
3186 * unless the file is a socket, fifo, or a block or character
3187 * device resident on the filesystem.
3189 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3190 switch (vp->v_type) {
3200 error = VOP_GETATTR(vp, vap, cred, ap->a_td);
3203 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
3210 * Read wrapper for fifos.
3213 nfsfifo_read(struct vop_read_args *ap)
3215 struct nfsnode *np = VTONFS(ap->a_vp);
3221 mtx_lock(&np->n_mtx);
3223 getnanotime(&np->n_atim);
3224 mtx_unlock(&np->n_mtx);
3225 error = fifo_specops.vop_read(ap);
3230 * Write wrapper for fifos.
3233 nfsfifo_write(struct vop_write_args *ap)
3235 struct nfsnode *np = VTONFS(ap->a_vp);
3240 mtx_lock(&np->n_mtx);
3242 getnanotime(&np->n_mtim);
3243 mtx_unlock(&np->n_mtx);
3244 return(fifo_specops.vop_write(ap));
3248 * Close wrapper for fifos.
3250 * Update the times on the nfsnode then do fifo close.
3253 nfsfifo_close(struct vop_close_args *ap)
3255 struct vnode *vp = ap->a_vp;
3256 struct nfsnode *np = VTONFS(vp);
3260 mtx_lock(&np->n_mtx);
3261 if (np->n_flag & (NACC | NUPD)) {
3263 if (np->n_flag & NACC)
3265 if (np->n_flag & NUPD)
3268 if (vrefcnt(vp) == 1 &&
3269 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3271 if (np->n_flag & NACC)
3272 vattr.va_atime = np->n_atim;
3273 if (np->n_flag & NUPD)
3274 vattr.va_mtime = np->n_mtim;
3275 mtx_unlock(&np->n_mtx);
3276 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_td);
3280 mtx_unlock(&np->n_mtx);
3282 return (fifo_specops.vop_close(ap));
3286 * Just call nfs_writebp() with the force argument set to 1.
3288 * NOTE: B_DONE may or may not be set in a_bp on call.
3291 nfs_bwrite(struct buf *bp)
3294 return (nfs_writebp(bp, 1, curthread));
3297 struct buf_ops buf_ops_nfs = {
3298 .bop_name = "buf_ops_nfs",
3299 .bop_write = nfs_bwrite,
3300 .bop_strategy = bufstrategy,
3301 .bop_sync = bufsync,
3302 .bop_bdflush = bufbdflush,