2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from nfs_subs.c 8.8 (Berkeley) 5/22/95
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * These functions support the macros and help fiddle mbuf chains for
40 * the nfs op functions. They do things like create the rpc header and
41 * copy data between mbuf chains and uio lists.
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/namei.h>
54 #include <sys/socket.h>
56 #include <sys/malloc.h>
57 #include <sys/sysent.h>
58 #include <sys/syscall.h>
59 #include <sys/sysproto.h>
62 #include <vm/vm_object.h>
63 #include <vm/vm_extern.h>
66 #include <fs/nfs/nfsport.h>
67 #include <fs/nfsclient/nfsnode.h>
68 #include <fs/nfsclient/nfsmount.h>
69 #include <fs/nfsclient/nfs.h>
70 #include <fs/nfsclient/nfs_lock.h>
72 #include <netinet/in.h>
75 * Note that stdarg.h and the ANSI style va_start macro is used for both
76 * ANSI and traditional C compilers.
78 #include <machine/stdarg.h>
80 extern struct mtx ncl_iod_mutex;
81 extern struct proc *ncl_iodwant[NFS_MAXRAHEAD];
82 extern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD];
83 extern int ncl_numasync;
84 extern unsigned int ncl_iodmax;
85 extern struct nfsstats newnfsstats;
88 ncl_uninit(struct vfsconf *vfsp)
93 * Tell all nfsiod processes to exit. Clear ncl_iodmax, and wakeup
94 * any sleeping nfsiods so they check ncl_iodmax and exit.
96 mtx_lock(&ncl_iod_mutex);
98 for (i = 0; i < ncl_numasync; i++)
100 wakeup(&ncl_iodwant[i]);
101 /* The last nfsiod to exit will wake us up when ncl_numasync hits 0 */
103 msleep(&ncl_numasync, &ncl_iod_mutex, PWAIT, "ioddie", 0);
104 mtx_unlock(&ncl_iod_mutex);
110 ncl_dircookie_lock(struct nfsnode *np)
112 mtx_lock(&np->n_mtx);
113 while (np->n_flag & NDIRCOOKIELK)
114 (void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0);
115 np->n_flag |= NDIRCOOKIELK;
116 mtx_unlock(&np->n_mtx);
120 ncl_dircookie_unlock(struct nfsnode *np)
122 mtx_lock(&np->n_mtx);
123 np->n_flag &= ~NDIRCOOKIELK;
125 mtx_unlock(&np->n_mtx);
129 ncl_upgrade_vnlock(struct vnode *vp)
133 ASSERT_VOP_LOCKED(vp, "ncl_upgrade_vnlock");
134 old_lock = VOP_ISLOCKED(vp);
135 if (old_lock != LK_EXCLUSIVE) {
136 KASSERT(old_lock == LK_SHARED,
137 ("ncl_upgrade_vnlock: wrong old_lock %d", old_lock));
138 /* Upgrade to exclusive lock, this might block */
139 vn_lock(vp, LK_UPGRADE | LK_RETRY);
145 ncl_downgrade_vnlock(struct vnode *vp, int old_lock)
147 if (old_lock != LK_EXCLUSIVE) {
148 KASSERT(old_lock == LK_SHARED, ("wrong old_lock %d", old_lock));
149 /* Downgrade from exclusive lock. */
150 vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
155 ncl_printf(const char *fmt, ...)
167 #include <sys/sysctl.h>
168 SYSCTL_DECL(_vfs_newnfs);
169 static int nfs_acdebug;
170 SYSCTL_INT(_vfs_newnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0, "");
174 * Check the time stamp
175 * If the cache is valid, copy contents to *vap and return 0
176 * otherwise return an error
179 ncl_getattrcache(struct vnode *vp, struct vattr *vaper)
183 struct nfsmount *nmp;
187 vap = &np->n_vattr.na_vattr;
188 nmp = VFSTONFS(vp->v_mount);
190 mtx_lock(&Giant); /* ncl_printf() */
192 mtx_lock(&np->n_mtx);
193 /* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
194 timeo = (time_second - np->n_mtime.tv_sec) / 10;
198 ncl_printf("nfs_getattrcache: initial timeo = %d\n", timeo);
201 if (vap->va_type == VDIR) {
202 if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin)
203 timeo = nmp->nm_acdirmin;
204 else if (timeo > nmp->nm_acdirmax)
205 timeo = nmp->nm_acdirmax;
207 if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin)
208 timeo = nmp->nm_acregmin;
209 else if (timeo > nmp->nm_acregmax)
210 timeo = nmp->nm_acregmax;
215 ncl_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n",
216 nmp->nm_acregmin, nmp->nm_acregmax,
217 nmp->nm_acdirmin, nmp->nm_acdirmax);
220 ncl_printf("nfs_getattrcache: age = %d; final timeo = %d\n",
221 (time_second - np->n_attrstamp), timeo);
224 if ((time_second - np->n_attrstamp) >= timeo) {
225 newnfsstats.attrcache_misses++;
226 mtx_unlock(&np->n_mtx);
229 newnfsstats.attrcache_hits++;
230 if (vap->va_size != np->n_size) {
231 if (vap->va_type == VREG) {
232 if (np->n_flag & NMODIFIED) {
233 if (vap->va_size < np->n_size)
234 vap->va_size = np->n_size;
236 np->n_size = vap->va_size;
238 np->n_size = vap->va_size;
240 vnode_pager_setsize(vp, np->n_size);
242 np->n_size = vap->va_size;
245 bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr));
246 if (np->n_flag & NCHG) {
247 if (np->n_flag & NACC)
248 vaper->va_atime = np->n_atim;
249 if (np->n_flag & NUPD)
250 vaper->va_mtime = np->n_mtim;
252 mtx_unlock(&np->n_mtx);
254 mtx_unlock(&Giant); /* ncl_printf() */
259 static nfsuint64 nfs_nullcookie = { { 0, 0 } };
261 * This function finds the directory cookie that corresponds to the
262 * logical byte offset given.
265 ncl_getcookie(struct nfsnode *np, off_t off, int add)
267 struct nfsdmap *dp, *dp2;
269 nfsuint64 *retval = NULL;
271 pos = (uoff_t)off / NFS_DIRBLKSIZ;
272 if (pos == 0 || off < 0) {
275 panic("nfs getcookie add at <= 0");
277 return (&nfs_nullcookie);
280 dp = LIST_FIRST(&np->n_cookies);
283 MALLOC(dp, struct nfsdmap *, sizeof (struct nfsdmap),
284 M_NFSDIROFF, M_WAITOK);
285 dp->ndm_eocookie = 0;
286 LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
290 while (pos >= NFSNUMCOOKIES) {
291 pos -= NFSNUMCOOKIES;
292 if (LIST_NEXT(dp, ndm_list)) {
293 if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
294 pos >= dp->ndm_eocookie)
296 dp = LIST_NEXT(dp, ndm_list);
298 MALLOC(dp2, struct nfsdmap *, sizeof (struct nfsdmap),
299 M_NFSDIROFF, M_WAITOK);
300 dp2->ndm_eocookie = 0;
301 LIST_INSERT_AFTER(dp, dp2, ndm_list);
306 if (pos >= dp->ndm_eocookie) {
308 dp->ndm_eocookie = pos + 1;
312 retval = &dp->ndm_cookies[pos];
318 * Invalidate cached directory information, except for the actual directory
319 * blocks (which are invalidated separately).
320 * Done mainly to avoid the use of stale offset cookies.
323 ncl_invaldir(struct vnode *vp)
325 struct nfsnode *np = VTONFS(vp);
328 if (vp->v_type != VDIR)
329 panic("nfs: invaldir not dir");
331 ncl_dircookie_lock(np);
332 np->n_direofoffset = 0;
333 np->n_cookieverf.nfsuquad[0] = 0;
334 np->n_cookieverf.nfsuquad[1] = 0;
335 if (LIST_FIRST(&np->n_cookies))
336 LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0;
337 ncl_dircookie_unlock(np);
341 * The write verifier has changed (probably due to a server reboot), so all
342 * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
343 * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
344 * and B_CLUSTEROK flags. Once done the new write verifier can be set for the
347 * B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data
348 * writes are not clusterable.
351 ncl_clearcommit(struct mount *mp)
353 struct vnode *vp, *nvp;
354 struct buf *bp, *nbp;
358 MNT_VNODE_FOREACH(vp, mp, nvp) {
361 if (vp->v_iflag & VI_DOOMED) {
369 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
370 if (!BUF_ISLOCKED(bp) &&
371 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
372 == (B_DELWRI | B_NEEDCOMMIT))
373 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
383 * Called once to initialize data structures...
386 ncl_init(struct vfsconf *vfsp)
390 /* Ensure async daemons disabled */
391 for (i = 0; i < NFS_MAXRAHEAD; i++) {
392 ncl_iodwant[i] = NULL;
393 ncl_iodmount[i] = NULL;
395 ncl_nhinit(); /* Init the nfsnode table */