2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * These functions support the macros and help fiddle mbuf chains for
40 * the nfs op functions. They do things like create the rpc header and
41 * copy data between mbuf chains and uio lists.
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/namei.h>
54 #include <sys/socket.h>
56 #include <sys/malloc.h>
57 #include <sys/rwlock.h>
58 #include <sys/sysent.h>
59 #include <sys/syscall.h>
60 #include <sys/sysproto.h>
61 #include <sys/taskqueue.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
68 #include <nfs/nfsproto.h>
69 #include <nfsclient/nfs.h>
70 #include <nfsclient/nfsnode.h>
71 #include <nfs/nfs_kdtrace.h>
72 #include <nfs/xdr_subs.h>
73 #include <nfsclient/nfsm_subs.h>
74 #include <nfsclient/nfsmount.h>
76 #include <netinet/in.h>
79 * Note that stdarg.h and the ANSI style va_start macro is used for both
80 * ANSI and traditional C compilers.
82 #include <machine/stdarg.h>
85 dtrace_nfsclient_attrcache_flush_probe_func_t
86 dtrace_nfsclient_attrcache_flush_done_probe;
87 uint32_t nfsclient_attrcache_flush_done_id;
89 dtrace_nfsclient_attrcache_get_hit_probe_func_t
90 dtrace_nfsclient_attrcache_get_hit_probe;
91 uint32_t nfsclient_attrcache_get_hit_id;
93 dtrace_nfsclient_attrcache_get_miss_probe_func_t
94 dtrace_nfsclient_attrcache_get_miss_probe;
95 uint32_t nfsclient_attrcache_get_miss_id;
97 dtrace_nfsclient_attrcache_load_probe_func_t
98 dtrace_nfsclient_attrcache_load_done_probe;
99 uint32_t nfsclient_attrcache_load_done_id;
100 #endif /* !KDTRACE_HOOKS */
103 * Data items converted to xdr at startup, since they are constant
104 * This is kinda hokey, but may save a little time doing byte swaps
106 u_int32_t nfs_xdrneg1;
107 u_int32_t nfs_true, nfs_false;
109 /* And other global data */
110 static u_int32_t nfs_xid = 0;
111 static enum vtype nv2tov_type[8]= {
112 VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON
116 int nfs_pbuf_freecnt = -1; /* start out unlimited */
118 struct nfs_bufq nfs_bufq;
119 static struct mtx nfs_xid_mtx;
120 struct task nfs_nfsiodnew_task;
123 * and the reverse mapping from generic to Version 2 procedure numbers
125 int nfsv2_procid[NFS_NPROCS] = {
151 LIST_HEAD(nfsnodehashhead, nfsnode);
158 mtx_lock(&nfs_xid_mtx);
160 /* Get a pretty random xid to start with */
164 * Skip zero xid if it should ever happen.
169 mtx_unlock(&nfs_xid_mtx);
174 * copies a uio scatter/gather list to an mbuf chain.
175 * NOTE: can ony handle iovcnt == 1
178 nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos)
181 struct mbuf *mp, *mp2;
182 int xfer, left, mlen;
183 int uiosiz, clflg, rem;
186 KASSERT(uiop->uio_iovcnt == 1, ("nfsm_uiotombuf: iovcnt != 1"));
188 if (siz > MLEN) /* or should it >= MCLBYTES ?? */
192 rem = nfsm_rndup(siz)-siz;
195 left = uiop->uio_iov->iov_len;
196 uiocp = uiop->uio_iov->iov_base;
201 mlen = M_TRAILINGSPACE(mp);
204 mp = m_getcl(M_WAITOK, MT_DATA, 0);
206 mp = m_get(M_WAITOK, MT_DATA);
209 mlen = M_TRAILINGSPACE(mp);
211 xfer = (left > mlen) ? mlen : left;
214 if (uiop->uio_iov->iov_op != NULL)
215 (*(uiop->uio_iov->iov_op))
216 (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
219 if (uiop->uio_segflg == UIO_SYSSPACE)
220 bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
222 copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
226 uiop->uio_offset += xfer;
227 uiop->uio_resid -= xfer;
229 uiop->uio_iov->iov_base =
230 (char *)uiop->uio_iov->iov_base + uiosiz;
231 uiop->uio_iov->iov_len -= uiosiz;
235 if (rem > M_TRAILINGSPACE(mp)) {
236 mp = m_get(M_WAITOK, MT_DATA);
239 cp = mtod(mp, caddr_t)+mp->m_len;
240 for (left = 0; left < rem; left++)
245 *bpos = mtod(mp, caddr_t)+mp->m_len;
251 * Copy a string into mbufs for the hard cases...
254 nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz)
256 struct mbuf *m1 = NULL, *m2;
257 long left, xfer, len, tlen;
263 left = M_TRAILINGSPACE(m2);
265 tl = ((u_int32_t *)(*bpos));
266 *tl++ = txdr_unsigned(siz);
268 left -= NFSX_UNSIGNED;
269 m2->m_len += NFSX_UNSIGNED;
271 bcopy(cp, (caddr_t) tl, left);
278 /* Loop around adding mbufs */
281 m1 = m_getcl(M_WAITOK, MT_DATA, 0);
282 m1->m_len = MCLBYTES;
284 m1 = m_get(M_WAITOK, MT_DATA);
289 tl = mtod(m1, u_int32_t *);
292 *tl++ = txdr_unsigned(siz);
293 m1->m_len -= NFSX_UNSIGNED;
294 tlen = NFSX_UNSIGNED;
297 if (siz < m1->m_len) {
298 len = nfsm_rndup(siz);
303 xfer = len = m1->m_len;
305 bcopy(cp, (caddr_t) tl, xfer);
306 m1->m_len = len+tlen;
311 *bpos = mtod(m1, caddr_t)+m1->m_len;
316 * Called once to initialize data structures...
319 nfs_init(struct vfsconf *vfsp)
323 nfsmount_zone = uma_zcreate("NFSMOUNT", sizeof(struct nfsmount),
324 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
325 nfs_true = txdr_unsigned(TRUE);
326 nfs_false = txdr_unsigned(FALSE);
327 nfs_xdrneg1 = txdr_unsigned(-1);
328 nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
331 /* Ensure async daemons disabled */
332 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
333 nfs_iodwant[i] = NFSIOD_NOT_AVAILABLE;
334 nfs_iodmount[i] = NULL;
336 nfs_nhinit(); /* Init the nfsnode table */
339 * Initialize reply list and start timer
341 mtx_init(&nfs_iod_mtx, "NFS iod lock", NULL, MTX_DEF);
342 mtx_init(&nfs_xid_mtx, "NFS xid lock", NULL, MTX_DEF);
343 TASK_INIT(&nfs_nfsiodnew_task, 0, nfs_nfsiodnew_tq, NULL);
345 nfs_pbuf_freecnt = nswbuf / 2 + 1;
351 nfs_uninit(struct vfsconf *vfsp)
356 * Tell all nfsiod processes to exit. Clear nfs_iodmax, and wakeup
357 * any sleeping nfsiods so they check nfs_iodmax and exit.
358 * Drain nfsiodnew task before we wait for them to finish.
360 mtx_lock(&nfs_iod_mtx);
362 mtx_unlock(&nfs_iod_mtx);
363 taskqueue_drain(taskqueue_thread, &nfs_nfsiodnew_task);
364 mtx_lock(&nfs_iod_mtx);
365 for (i = 0; i < nfs_numasync; i++)
366 if (nfs_iodwant[i] == NFSIOD_AVAILABLE)
367 wakeup(&nfs_iodwant[i]);
368 /* The last nfsiod to exit will wake us up when nfs_numasync hits 0 */
370 msleep(&nfs_numasync, &nfs_iod_mtx, PWAIT, "ioddie", 0);
371 mtx_unlock(&nfs_iod_mtx);
373 uma_zdestroy(nfsmount_zone);
378 nfs_dircookie_lock(struct nfsnode *np)
380 mtx_lock(&np->n_mtx);
381 while (np->n_flag & NDIRCOOKIELK)
382 (void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0);
383 np->n_flag |= NDIRCOOKIELK;
384 mtx_unlock(&np->n_mtx);
388 nfs_dircookie_unlock(struct nfsnode *np)
390 mtx_lock(&np->n_mtx);
391 np->n_flag &= ~NDIRCOOKIELK;
393 mtx_unlock(&np->n_mtx);
397 nfs_upgrade_vnlock(struct vnode *vp)
401 ASSERT_VOP_LOCKED(vp, "nfs_upgrade_vnlock");
402 old_lock = VOP_ISLOCKED(vp);
403 if (old_lock != LK_EXCLUSIVE) {
404 KASSERT(old_lock == LK_SHARED,
405 ("nfs_upgrade_vnlock: wrong old_lock %d", old_lock));
406 /* Upgrade to exclusive lock, this might block */
407 vn_lock(vp, LK_UPGRADE | LK_RETRY);
413 nfs_downgrade_vnlock(struct vnode *vp, int old_lock)
415 if (old_lock != LK_EXCLUSIVE) {
416 KASSERT(old_lock == LK_SHARED, ("wrong old_lock %d", old_lock));
417 /* Downgrade from exclusive lock. */
418 vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
423 nfs_printf(const char *fmt, ...)
435 * Attribute cache routines.
436 * nfs_loadattrcache() - loads or updates the cache contents from attributes
437 * that are on the mbuf list
438 * nfs_getattrcache() - returns valid attributes if found in cache, returns
443 * Load the attribute cache (that lives in the nfsnode entry) with
444 * the values on the mbuf list and
446 * copy the attributes to *vaper
449 nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
450 struct vattr *vaper, int dontshrink)
452 struct vnode *vp = *vpp;
454 struct nfs_fattr *fp;
455 struct nfsnode *np = NULL;
462 struct timespec mtime, mtime_save;
463 int v3 = NFS_ISV3(vp);
469 t1 = (mtod(md, caddr_t) + md->m_len) - *dposp;
470 cp2 = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, M_WAITOK);
475 fp = (struct nfs_fattr *)cp2;
477 vtyp = nfsv3tov_type(fp->fa_type);
478 vmode = fxdr_unsigned(u_short, fp->fa_mode);
479 rdev = makedev(fxdr_unsigned(int, fp->fa3_rdev.specdata1),
480 fxdr_unsigned(int, fp->fa3_rdev.specdata2));
481 fxdr_nfsv3time(&fp->fa3_mtime, &mtime);
483 vtyp = nfsv2tov_type(fp->fa_type);
484 vmode = fxdr_unsigned(u_short, fp->fa_mode);
488 * The duplicate information returned in fa_type and fa_mode
489 * is an ambiguity in the NFS version 2 protocol.
491 * VREG should be taken literally as a regular file. If a
492 * server intents to return some type information differently
493 * in the upper bits of the mode field (e.g. for sockets, or
494 * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we
495 * leave the examination of the mode bits even in the VREG
496 * case to avoid breakage for bogus servers, but we make sure
497 * that there are actually type bits set in the upper part of
498 * fa_mode (and failing that, trust the va_type field).
500 * NFSv3 cleared the issue, and requires fa_mode to not
501 * contain any type information (while also introduing sockets
502 * and FIFOs for fa_type).
504 if (vtyp == VNON || (vtyp == VREG && (vmode & S_IFMT) != 0))
505 vtyp = IFTOVT(vmode);
506 rdev = fxdr_unsigned(int32_t, fp->fa2_rdev);
507 fxdr_nfsv2time(&fp->fa2_mtime, &mtime);
510 * Really ugly NFSv2 kludge.
512 if (vtyp == VCHR && rdev == 0xffffffff)
517 * If v_type == VNON it is a new node, so fill in the v_type,
518 * n_mtime fields. Check to see if it represents a special
519 * device, and if so, check for a possible alias. Once the
520 * correct vnode has been obtained, fill in the rest of the
524 mtx_lock(&np->n_mtx);
525 if (vp->v_type != vtyp) {
527 if (vp->v_type == VFIFO)
528 vp->v_op = &nfs_fifoops;
533 vap->va_mode = (vmode & 07777);
535 mtime_save = vap->va_mtime;
536 vap->va_mtime = mtime;
537 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
539 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
540 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
541 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
542 vap->va_size = fxdr_hyper(&fp->fa3_size);
543 vap->va_blocksize = NFS_FABLKSIZE;
544 vap->va_bytes = fxdr_hyper(&fp->fa3_used);
545 vap->va_fileid = fxdr_unsigned(int32_t,
546 fp->fa3_fileid.nfsuquad[1]);
547 fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime);
548 fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime);
552 vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
553 vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
554 vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
555 vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
556 vap->va_blocksize = fxdr_unsigned(int32_t, fp->fa2_blocksize);
557 vap->va_bytes = (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks)
559 vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid);
560 fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime);
562 vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t,
563 fp->fa2_ctime.nfsv2_sec);
564 vap->va_ctime.tv_nsec = 0;
565 vap->va_gen = fxdr_unsigned(u_int32_t, fp->fa2_ctime.nfsv2_usec);
568 np->n_attrstamp = time_second;
571 if (vap->va_size != np->n_size) {
572 if (vap->va_type == VREG) {
573 if (dontshrink && vap->va_size < np->n_size) {
575 * We've been told not to shrink the file;
576 * zero np->n_attrstamp to indicate that
577 * the attributes are stale.
579 vap->va_size = np->n_size;
581 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
582 vnode_pager_setsize(vp, np->n_size);
583 } else if (np->n_flag & NMODIFIED) {
585 * We've modified the file: Use the larger
586 * of our size, and the server's size.
588 if (vap->va_size < np->n_size) {
589 vap->va_size = np->n_size;
591 np->n_size = vap->va_size;
592 np->n_flag |= NSIZECHANGED;
594 vnode_pager_setsize(vp, np->n_size);
595 } else if (vap->va_size < np->n_size) {
597 * When shrinking the size, the call to
598 * vnode_pager_setsize() cannot be done
599 * with the mutex held, so delay it until
600 * after the mtx_unlock call.
602 nsize = np->n_size = vap->va_size;
603 np->n_flag |= NSIZECHANGED;
606 np->n_size = vap->va_size;
607 np->n_flag |= NSIZECHANGED;
608 vnode_pager_setsize(vp, np->n_size);
611 np->n_size = vap->va_size;
615 * The following checks are added to prevent a race between (say)
616 * a READDIR+ and a WRITE.
617 * READDIR+, WRITE requests sent out.
618 * READDIR+ resp, WRITE resp received on client.
619 * However, the WRITE resp was handled before the READDIR+ resp
620 * causing the post op attrs from the write to be loaded first
621 * and the attrs from the READDIR+ to be loaded later. If this
622 * happens, we have stale attrs loaded into the attrcache.
623 * We detect this by for the mtime moving back. We invalidate the
624 * attrcache when this happens.
626 if (timespeccmp(&mtime_save, &vap->va_mtime, >)) {
627 /* Size changed or mtime went backwards */
629 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
632 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
633 if (np->n_flag & NCHG) {
634 if (np->n_flag & NACC)
635 vaper->va_atime = np->n_atim;
636 if (np->n_flag & NUPD)
637 vaper->va_mtime = np->n_mtim;
642 if (np->n_attrstamp != 0)
643 KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, &np->n_vattr, 0);
645 mtx_unlock(&np->n_mtx);
647 vnode_pager_setsize(vp, nsize);
651 KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, NULL, error);
657 #include <sys/sysctl.h>
658 SYSCTL_DECL(_vfs_oldnfs);
659 static int nfs_acdebug;
660 SYSCTL_INT(_vfs_oldnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0,
661 "Toggle acdebug (attribute cache debug) flag");
665 * Check the time stamp
666 * If the cache is valid, copy contents to *vap and return 0
667 * otherwise return an error
670 nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
674 struct nfsmount *nmp;
679 nmp = VFSTONFS(vp->v_mount);
681 mtx_lock(&Giant); /* nfs_printf() */
683 mtx_lock(&np->n_mtx);
684 /* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
685 timeo = (time_second - np->n_mtime.tv_sec) / 10;
689 nfs_printf("nfs_getattrcache: initial timeo = %d\n", timeo);
692 if (vap->va_type == VDIR) {
693 if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin)
694 timeo = nmp->nm_acdirmin;
695 else if (timeo > nmp->nm_acdirmax)
696 timeo = nmp->nm_acdirmax;
698 if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin)
699 timeo = nmp->nm_acregmin;
700 else if (timeo > nmp->nm_acregmax)
701 timeo = nmp->nm_acregmax;
706 nfs_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n",
707 nmp->nm_acregmin, nmp->nm_acregmax,
708 nmp->nm_acdirmin, nmp->nm_acdirmax);
711 nfs_printf("nfs_getattrcache: age = %d; final timeo = %d\n",
712 (time_second - np->n_attrstamp), timeo);
715 if ((time_second - np->n_attrstamp) >= timeo) {
716 nfsstats.attrcache_misses++;
717 mtx_unlock(&np->n_mtx);
719 mtx_unlock(&Giant); /* nfs_printf() */
721 KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
724 nfsstats.attrcache_hits++;
725 if (vap->va_size != np->n_size) {
726 if (vap->va_type == VREG) {
727 if (np->n_flag & NMODIFIED) {
728 if (vap->va_size < np->n_size)
729 vap->va_size = np->n_size;
731 np->n_size = vap->va_size;
733 np->n_size = vap->va_size;
735 vnode_pager_setsize(vp, np->n_size);
737 np->n_size = vap->va_size;
740 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr));
741 if (np->n_flag & NCHG) {
742 if (np->n_flag & NACC)
743 vaper->va_atime = np->n_atim;
744 if (np->n_flag & NUPD)
745 vaper->va_mtime = np->n_mtim;
747 mtx_unlock(&np->n_mtx);
749 mtx_unlock(&Giant); /* nfs_printf() */
751 KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap);
756 * Purge all cached information about an NFS vnode including name
757 * cache entries, the attribute cache, and the access cache. This is
758 * called when an NFS request for a node fails with a stale
762 nfs_purgecache(struct vnode *vp)
769 mtx_lock(&np->n_mtx);
771 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
772 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
773 np->n_accesscache[i].stamp = 0;
774 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
775 mtx_unlock(&np->n_mtx);
778 static nfsuint64 nfs_nullcookie = { { 0, 0 } };
780 * This function finds the directory cookie that corresponds to the
781 * logical byte offset given.
784 nfs_getcookie(struct nfsnode *np, off_t off, int add)
786 struct nfsdmap *dp, *dp2;
788 nfsuint64 *retval = NULL;
790 pos = (uoff_t)off / NFS_DIRBLKSIZ;
791 if (pos == 0 || off < 0) {
792 KASSERT(!add, ("nfs getcookie add at <= 0"));
793 return (&nfs_nullcookie);
796 dp = LIST_FIRST(&np->n_cookies);
799 dp = malloc(sizeof (struct nfsdmap),
800 M_NFSDIROFF, M_WAITOK);
801 dp->ndm_eocookie = 0;
802 LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
806 while (pos >= NFSNUMCOOKIES) {
807 pos -= NFSNUMCOOKIES;
808 if (LIST_NEXT(dp, ndm_list)) {
809 if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
810 pos >= dp->ndm_eocookie)
812 dp = LIST_NEXT(dp, ndm_list);
814 dp2 = malloc(sizeof (struct nfsdmap),
815 M_NFSDIROFF, M_WAITOK);
816 dp2->ndm_eocookie = 0;
817 LIST_INSERT_AFTER(dp, dp2, ndm_list);
822 if (pos >= dp->ndm_eocookie) {
824 dp->ndm_eocookie = pos + 1;
828 retval = &dp->ndm_cookies[pos];
834 * Invalidate cached directory information, except for the actual directory
835 * blocks (which are invalidated separately).
836 * Done mainly to avoid the use of stale offset cookies.
839 nfs_invaldir(struct vnode *vp)
841 struct nfsnode *np = VTONFS(vp);
843 KASSERT(vp->v_type == VDIR, ("nfs: invaldir not dir"));
844 nfs_dircookie_lock(np);
845 np->n_direofoffset = 0;
846 np->n_cookieverf.nfsuquad[0] = 0;
847 np->n_cookieverf.nfsuquad[1] = 0;
848 if (LIST_FIRST(&np->n_cookies))
849 LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0;
850 nfs_dircookie_unlock(np);
854 * The write verifier has changed (probably due to a server reboot), so all
855 * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
856 * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
857 * and B_CLUSTEROK flags. Once done the new write verifier can be set for the
860 * B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data
861 * writes are not clusterable.
864 nfs_clearcommit(struct mount *mp)
866 struct vnode *vp, *nvp;
867 struct buf *bp, *nbp;
870 MNT_VNODE_FOREACH_ALL(vp, mp, nvp) {
875 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
876 if (!BUF_ISLOCKED(bp) &&
877 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
878 == (B_DELWRI | B_NEEDCOMMIT))
879 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
887 * Helper functions for former macros. Some of these should be
888 * moved to their callers.
892 nfsm_mtofh_xx(struct vnode *d, struct vnode **v, int v3, int *f,
893 struct mbuf **md, caddr_t *dpos)
895 struct nfsnode *ttnp;
903 tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
906 *f = fxdr_unsigned(int, *tl);
910 t1 = nfsm_getfh_xx(&ttfhp, &ttfhsize, (v3), md, dpos);
913 t1 = nfs_nget(d->v_mount, ttfhp, ttfhsize, &ttnp, LK_EXCLUSIVE);
919 tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
923 *f = fxdr_unsigned(int, *tl);
924 else if (fxdr_unsigned(int, *tl))
925 nfsm_adv_xx(NFSX_V3FATTR, md, dpos);
929 t1 = nfs_loadattrcache(&ttvp, md, dpos, NULL, 0);
938 nfsm_getfh_xx(nfsfh_t **f, int *s, int v3, struct mbuf **md, caddr_t *dpos)
943 tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
946 *s = fxdr_unsigned(int, *tl);
947 if (*s <= 0 || *s > NFSX_V3FHMAX)
951 *f = nfsm_dissect_xx(nfsm_rndup(*s), md, dpos);
960 nfsm_loadattr_xx(struct vnode **v, struct vattr *va, struct mbuf **md,
965 struct vnode *ttvp = *v;
966 t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 0);
974 nfsm_postop_attr_xx(struct vnode **v, int *f, struct vattr *va,
975 struct mbuf **md, caddr_t *dpos)
980 struct vnode *ttvp = *v;
981 tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
984 *f = fxdr_unsigned(int, *tl);
986 t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 1);
997 nfsm_wcc_data_xx(struct vnode **v, int *f, struct mbuf **md, caddr_t *dpos)
1000 int ttattrf, ttretf = 0;
1003 tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
1006 if (*tl == nfs_true) {
1007 tl = nfsm_dissect_xx(6 * NFSX_UNSIGNED, md, dpos);
1010 mtx_lock(&(VTONFS(*v))->n_mtx);
1012 ttretf = (VTONFS(*v)->n_mtime.tv_sec == fxdr_unsigned(u_int32_t, *(tl + 2)) &&
1013 VTONFS(*v)->n_mtime.tv_nsec == fxdr_unsigned(u_int32_t, *(tl + 3)));
1014 mtx_unlock(&(VTONFS(*v))->n_mtx);
1016 t1 = nfsm_postop_attr_xx(v, &ttattrf, NULL, md, dpos);
1027 nfsm_strtom_xx(const char *a, int s, int m, struct mbuf **mb, caddr_t *bpos)
1033 return ENAMETOOLONG;
1034 t1 = nfsm_rndup(s) + NFSX_UNSIGNED;
1035 if (t1 <= M_TRAILINGSPACE(*mb)) {
1036 tl = nfsm_build_xx(t1, mb, bpos);
1037 *tl++ = txdr_unsigned(s);
1038 *(tl + ((t1 >> 2) - 2)) = 0;
1041 t1 = nfsm_strtmbuf(mb, bpos, a, s);
1049 nfsm_fhtom_xx(struct vnode *v, int v3, struct mbuf **mb, caddr_t *bpos)
1056 t1 = nfsm_rndup(VTONFS(v)->n_fhsize) + NFSX_UNSIGNED;
1057 if (t1 < M_TRAILINGSPACE(*mb)) {
1058 tl = nfsm_build_xx(t1, mb, bpos);
1059 *tl++ = txdr_unsigned(VTONFS(v)->n_fhsize);
1060 *(tl + ((t1 >> 2) - 2)) = 0;
1061 bcopy(VTONFS(v)->n_fhp, tl, VTONFS(v)->n_fhsize);
1063 t1 = nfsm_strtmbuf(mb, bpos,
1064 (const char *)VTONFS(v)->n_fhp,
1065 VTONFS(v)->n_fhsize);
1070 cp = nfsm_build_xx(NFSX_V2FH, mb, bpos);
1071 bcopy(VTONFS(v)->n_fhp, cp, NFSX_V2FH);
1077 nfsm_v3attrbuild_xx(struct vattr *va, int full, struct mbuf **mb,
1082 if (va->va_mode != (mode_t)VNOVAL) {
1083 tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1085 *tl = txdr_unsigned(va->va_mode);
1087 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1090 if (full && va->va_uid != (uid_t)VNOVAL) {
1091 tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1093 *tl = txdr_unsigned(va->va_uid);
1095 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1098 if (full && va->va_gid != (gid_t)VNOVAL) {
1099 tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1101 *tl = txdr_unsigned(va->va_gid);
1103 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1106 if (full && va->va_size != VNOVAL) {
1107 tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1109 txdr_hyper(va->va_size, tl);
1111 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1114 if (va->va_atime.tv_sec != VNOVAL) {
1115 if ((va->va_vaflags & VA_UTIMES_NULL) == 0) {
1116 tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1117 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1118 txdr_nfsv3time(&va->va_atime, tl);
1120 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1121 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1124 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1125 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1127 if (va->va_mtime.tv_sec != VNOVAL) {
1128 if ((va->va_vaflags & VA_UTIMES_NULL) == 0) {
1129 tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1130 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1131 txdr_nfsv3time(&va->va_mtime, tl);
1133 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1134 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1137 tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1138 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);