2 * Copyright (c) 2009 Rick Macklem, University of Guelph
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 * These functions implement the client side state handling for NFSv4.
33 * NFSv4 state handling:
34 * - A lockowner is used to determine lock contention, so it
35 * corresponds directly to a Posix pid. (1 to 1 mapping)
36 * - The correct granularity of an OpenOwner is not nearly so
37 * obvious. An OpenOwner does the following:
38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39 * - is used to check for Open/Share contention (not applicable to
40 * this client, since all Opens are Deny_None)
41 * As such, I considered both extreme.
42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
43 * all Open, Close and Lock (with a new lockowner) Ops.
44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
45 * every Open, for most servers.
46 * So, I chose to use the same mapping as I did for LockOwnwers.
47 * The main concern here is that you can end up with multiple Opens
48 * for the same File Handle, but on different OpenOwners (opens
49 * inherited from parents, grandparents...) and you do not know
50 * which of these the vnodeop close applies to. This is handled by
51 * delaying the Close Op(s) until all of the Opens have been closed.
52 * (It is not yet obvious if this is the correct granularity.)
53 * - How the code handles serialization:
54 * - For the ClientId, it uses an exclusive lock while getting its
55 * SetClientId and during recovery. Otherwise, it uses a shared
56 * lock via a reference count.
57 * - For the rest of the data structures, it uses an SMP mutex
58 * (once the nfs client is SMP safe) and doesn't sleep while
59 * manipulating the linked lists.
60 * - The serialization of Open/Close/Lock/LockU falls out in the
61 * "wash", since OpenOwners and LockOwners are both mapped from
62 * Posix pid. In other words, there is only one Posix pid using
63 * any given owner, so that owner is serialized. (If you change
64 * the granularity of the OpenOwner, then code must be added to
65 * serialize Ops on the OpenOwner.)
66 * - When to get rid of OpenOwners and LockOwners.
67 * - The function nfscl_cleanup_common() is executed after a process exits.
68 * It goes through the client list looking for all Open and Lock Owners.
69 * When one is found, it is marked "defunct" or in the case of
70 * an OpenOwner without any Opens, freed.
71 * The renew thread scans for defunct Owners and gets rid of them,
72 * if it can. The LockOwners will also be deleted when the
73 * associated Open is closed.
74 * - If the LockU or Close Op(s) fail during close in a way
75 * that could be recovered upon retry, they are relinked to the
76 * ClientId's defunct open list and retried by the renew thread
77 * until they succeed or an unmount/recovery occurs.
78 * (Since we are done with them, they do not need to be recovered.)
82 #include <fs/nfs/nfsport.h>
87 extern struct nfsstatsv1 nfsstatsv1;
88 extern struct nfsreqhead nfsd_reqq;
89 extern u_int32_t newnfs_false, newnfs_true;
90 extern int nfscl_debuglevel;
91 extern int nfscl_enablecallb;
92 extern int nfs_numnfscbd;
96 struct nfsclhead nfsclhead; /* Head of clientid list */
97 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
98 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER;
99 #endif /* !APPLEKEXT */
101 static int nfscl_delegcnt = 0;
102 static int nfscl_layoutcnt = 0;
103 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
104 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **);
105 static void nfscl_clrelease(struct nfsclclient *);
106 static void nfscl_cleanclient(struct nfsclclient *);
107 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
108 struct ucred *, NFSPROC_T *);
109 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
110 struct nfsmount *, struct ucred *, NFSPROC_T *);
111 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
112 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
113 struct nfscllock *, int);
114 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
115 struct nfscllock **, int);
116 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
117 static u_int32_t nfscl_nextcbident(void);
118 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
119 static struct nfsclclient *nfscl_getclnt(u_int32_t);
120 static struct nfsclclient *nfscl_getclntsess(uint8_t *);
121 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
123 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
124 int, struct nfsclrecalllayout **);
125 static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
126 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
128 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
129 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
130 u_int8_t *, struct nfscllock **);
131 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
132 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
133 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
134 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
135 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
136 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
137 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
138 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
139 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
140 static void nfscl_totalrecall(struct nfsclclient *);
141 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
142 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
143 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
144 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
145 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
146 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
147 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
148 struct ucred *, NFSPROC_T *);
149 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
150 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
151 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
152 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
153 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
154 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
155 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int);
156 static void nfscl_freeopenowner(struct nfsclowner *, int);
157 static void nfscl_cleandeleg(struct nfscldeleg *);
158 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
159 struct nfsmount *, NFSPROC_T *);
160 static void nfscl_emptylockowner(struct nfscllockowner *,
161 struct nfscllockownerfhhead *);
162 static void nfscl_mergeflayouts(struct nfsclflayouthead *,
163 struct nfsclflayouthead *);
164 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
165 uint64_t, uint32_t, struct nfsclrecalllayout *);
166 static int nfscl_seq(uint32_t, uint32_t);
167 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
168 struct ucred *, NFSPROC_T *);
169 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
170 struct ucred *, NFSPROC_T *);
172 static short nfscberr_null[] = {
177 static short nfscberr_getattr[] = {
186 static short nfscberr_recall[] = {
196 static short *nfscl_cberrmap[] = {
204 #define NETFAMILY(clp) \
205 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
208 * Called for an open operation.
209 * If the nfhp argument is NULL, just get an openowner.
212 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
213 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
214 struct nfsclopen **opp, int *newonep, int *retp, int lockit)
216 struct nfsclclient *clp;
217 struct nfsclowner *owp, *nowp;
218 struct nfsclopen *op = NULL, *nop = NULL;
219 struct nfscldeleg *dp;
220 struct nfsclownerhead *ohp;
221 u_int8_t own[NFSV4CL_LOCKNAMELEN];
232 * Might need one or both of these, so MALLOC them now, to
233 * avoid a tsleep() in MALLOC later.
235 MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner),
236 M_NFSCLOWNER, M_WAITOK);
238 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
239 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
240 ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
242 FREE((caddr_t)nowp, M_NFSCLOWNER);
244 FREE((caddr_t)nop, M_NFSCLOPEN);
249 * Get the Open iff it already exists.
250 * If none found, add the new one or return error, depending upon
255 /* First check the delegation list */
256 if (nfhp != NULL && usedeleg) {
257 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
258 if (dp->nfsdl_fhlen == fhlen &&
259 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
260 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
261 (dp->nfsdl_flags & NFSCLDL_WRITE))
270 nfscl_filllockowner(p->td_proc, own, F_POSIX);
271 ohp = &dp->nfsdl_owner;
273 /* For NFSv4.1 and this option, use a single open_owner. */
274 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
275 nfscl_filllockowner(NULL, own, F_POSIX);
277 nfscl_filllockowner(p->td_proc, own, F_POSIX);
278 ohp = &clp->nfsc_owner;
280 /* Now, search for an openowner */
281 LIST_FOREACH(owp, ohp, nfsow_list) {
282 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
287 * Create a new open, as required.
289 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
293 * Now, check the mode on the open and return the appropriate
297 if (nfhp != NULL && dp != NULL && nop == NULL)
298 /* new local open on delegation */
299 *retp = NFSCLOPEN_SETCRED;
301 *retp = NFSCLOPEN_OK;
303 if (op != NULL && (amode & ~(op->nfso_mode))) {
304 op->nfso_mode |= amode;
305 if (retp != NULL && dp == NULL)
306 *retp = NFSCLOPEN_DOOPEN;
310 * Serialize modifications to the open owner for multiple threads
311 * within the same process using a read/write sleep lock.
312 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
313 * by acquiring a shared lock. The close operations still use an
314 * exclusive lock for this case.
317 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) {
319 * Get a shared lock on the OpenOwner, but first
320 * wait for any pending exclusive lock, so that the
321 * exclusive locker gets priority.
323 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
324 NFSCLSTATEMUTEXPTR, NULL);
325 nfsv4_getref(&owp->nfsow_rwlock, NULL,
326 NFSCLSTATEMUTEXPTR, NULL);
328 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
332 FREE((caddr_t)nowp, M_NFSCLOWNER);
334 FREE((caddr_t)nop, M_NFSCLOPEN);
343 * Create a new open, as required.
346 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
347 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
348 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
349 struct ucred *cred, int *newonep)
351 struct nfsclowner *owp = *owpp, *nowp;
352 struct nfsclopen *op, *nop;
362 if (owp == NULL && nowp != NULL) {
363 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
364 LIST_INIT(&nowp->nfsow_open);
365 nowp->nfsow_clp = clp;
366 nowp->nfsow_seqid = 0;
367 nowp->nfsow_defunct = 0;
368 nfscl_lockinit(&nowp->nfsow_rwlock);
370 nfsstatsv1.cllocalopenowners++;
371 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
373 nfsstatsv1.clopenowners++;
374 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
382 /* If an fhp has been specified, create an Open as well. */
384 /* and look for the correct open, based upon FH */
385 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
386 if (op->nfso_fhlen == fhlen &&
387 !NFSBCMP(op->nfso_fh, fhp, fhlen))
390 if (op == NULL && nop != NULL) {
393 nop->nfso_opencnt = 0;
394 nop->nfso_posixlock = 1;
395 nop->nfso_fhlen = fhlen;
396 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
397 LIST_INIT(&nop->nfso_lock);
398 nop->nfso_stateid.seqid = 0;
399 nop->nfso_stateid.other[0] = 0;
400 nop->nfso_stateid.other[1] = 0;
401 nop->nfso_stateid.other[2] = 0;
402 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
403 newnfs_copyincred(cred, &nop->nfso_cred);
405 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
406 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
408 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
409 nfsstatsv1.cllocalopens++;
411 nfsstatsv1.clopens++;
413 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
425 * Called to find/add a delegation to a client.
428 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
429 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
431 struct nfscldeleg *dp = *dpp, *tdp;
434 * First, if we have received a Read delegation for a file on a
435 * read/write file system, just return it, because they aren't
438 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
439 (dp->nfsdl_flags & NFSCLDL_READ)) {
440 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
441 FREE((caddr_t)dp, M_NFSCLDELEG);
446 /* Look for the correct deleg, based upon FH */
448 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
452 return (NFSERR_BADSTATEID);
455 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
456 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
458 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
459 nfsstatsv1.cldelegates++;
463 * Delegation already exists, what do we do if a new one??
466 printf("Deleg already exists!\n");
467 FREE((caddr_t)dp, M_NFSCLDELEG);
478 * Find a delegation for this file handle. Return NULL upon failure.
480 static struct nfscldeleg *
481 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
483 struct nfscldeleg *dp;
485 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
486 if (dp->nfsdl_fhlen == fhlen &&
487 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
494 * Get a stateid for an I/O operation. First, look for an open and iff
495 * found, return either a lockowner stateid or the open stateid.
496 * If no Open is found, just return error and the special stateid of all zeros.
499 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
500 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
503 struct nfsclclient *clp;
504 struct nfsclowner *owp;
505 struct nfsclopen *op = NULL, *top;
506 struct nfscllockowner *lp;
507 struct nfscldeleg *dp;
509 struct nfsmount *nmp;
510 u_int8_t own[NFSV4CL_LOCKNAMELEN];
515 * Initially, just set the special stateid of all zeros.
516 * (Don't do this for a DS, since the special stateid can't be used.)
520 stateidp->other[0] = 0;
521 stateidp->other[1] = 0;
522 stateidp->other[2] = 0;
524 if (vnode_vtype(vp) != VREG)
527 nmp = VFSTONFS(vnode_mount(vp));
529 clp = nfscl_findcl(nmp);
536 * Wait for recovery to complete.
538 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
539 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
540 PZERO, "nfsrecvr", NULL);
543 * First, look for a delegation.
545 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
546 if (dp->nfsdl_fhlen == fhlen &&
547 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
548 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
549 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
550 stateidp->seqid = dp->nfsdl_stateid.seqid;
551 stateidp->other[0] = dp->nfsdl_stateid.other[0];
552 stateidp->other[1] = dp->nfsdl_stateid.other[1];
553 stateidp->other[2] = dp->nfsdl_stateid.other[2];
554 if (!(np->n_flag & NDELEGRECALL)) {
555 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
557 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
559 dp->nfsdl_timestamp = NFSD_MONOSEC +
561 dp->nfsdl_rwlock.nfslock_usecnt++;
562 *lckpp = (void *)&dp->nfsdl_rwlock;
573 * If p != NULL, we want to search the parentage tree
574 * for a matching OpenOwner and use that.
576 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
577 nfscl_filllockowner(NULL, own, F_POSIX);
579 nfscl_filllockowner(p->td_proc, own, F_POSIX);
581 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own,
583 if (error == 0 && lp != NULL && fords == 0) {
584 /* Don't return a lock stateid for a DS. */
586 lp->nfsl_stateid.seqid;
588 lp->nfsl_stateid.other[0];
590 lp->nfsl_stateid.other[1];
592 lp->nfsl_stateid.other[2];
598 /* If not found, just look for any OpenOwner that will work. */
601 owp = LIST_FIRST(&clp->nfsc_owner);
602 while (!done && owp != NULL) {
603 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
604 if (op->nfso_fhlen == fhlen &&
605 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
606 if (top == NULL && (op->nfso_mode &
607 NFSV4OPEN_ACCESSWRITE) != 0 &&
608 (mode & NFSV4OPEN_ACCESSREAD) != 0)
610 if ((mode & op->nfso_mode) == mode) {
617 owp = LIST_NEXT(owp, nfsow_list);
620 NFSCL_DEBUG(2, "openmode top=%p\n", top);
621 if (top == NULL || NFSHASOPENMODE(nmp)) {
628 * For read aheads or write behinds, use the open cred.
629 * A read ahead or write behind is indicated by p == NULL.
632 newnfs_copycred(&op->nfso_cred, cred);
636 * No lock stateid, so return the open stateid.
638 stateidp->seqid = op->nfso_stateid.seqid;
639 stateidp->other[0] = op->nfso_stateid.other[0];
640 stateidp->other[1] = op->nfso_stateid.other[1];
641 stateidp->other[2] = op->nfso_stateid.other[2];
647 * Search for a matching file, mode and, optionally, lockowner.
650 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
651 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode,
652 struct nfscllockowner **lpp, struct nfsclopen **opp)
654 struct nfsclowner *owp;
655 struct nfsclopen *op, *rop, *rop2;
656 struct nfscllockowner *lp;
662 * rop will be set to the open to be returned. There are three
663 * variants of this, all for an open of the correct file:
664 * 1 - A match of lockown.
665 * 2 - A match of the openown, when no lockown match exists.
666 * 3 - A match for any open, if no openown or lockown match exists.
667 * Looking for #2 over #3 probably isn't necessary, but since
668 * RFC3530 is vague w.r.t. the relationship between openowners and
669 * lockowners, I think this is the safer way to go.
674 /* Search the client list */
675 owp = LIST_FIRST(ohp);
676 while (owp != NULL && keep_looping != 0) {
677 /* and look for the correct open */
678 op = LIST_FIRST(&owp->nfsow_open);
679 while (op != NULL && keep_looping != 0) {
680 if (op->nfso_fhlen == fhlen &&
681 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
682 && (op->nfso_mode & mode) == mode) {
684 /* Now look for a matching lockowner. */
685 LIST_FOREACH(lp, &op->nfso_lock,
687 if (!NFSBCMP(lp->nfsl_owner,
689 NFSV4CL_LOCKNAMELEN)) {
697 if (rop == NULL && !NFSBCMP(owp->nfsow_owner,
698 openown, NFSV4CL_LOCKNAMELEN)) {
706 op = LIST_NEXT(op, nfso_list);
708 owp = LIST_NEXT(owp, nfsow_list);
719 * Release use of an open owner. Called when open operations are done
720 * with the open owner.
723 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
724 __unused int error, __unused int candelete, int unlocked)
731 if (NFSHASONEOPENOWN(nmp))
732 nfsv4_relref(&owp->nfsow_rwlock);
734 nfscl_lockunlock(&owp->nfsow_rwlock);
736 nfscl_clrelease(owp->nfsow_clp);
741 * Release use of an open structure under an open owner.
744 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
747 struct nfsclclient *clp;
748 struct nfsclowner *owp;
754 if (NFSHASONEOPENOWN(nmp))
755 nfsv4_relref(&owp->nfsow_rwlock);
757 nfscl_lockunlock(&owp->nfsow_rwlock);
758 clp = owp->nfsow_clp;
759 if (error && candelete && op->nfso_opencnt == 0)
760 nfscl_freeopen(op, 0);
761 nfscl_clrelease(clp);
766 * Called to get a clientid structure. It will optionally lock the
767 * client data structures to do the SetClientId/SetClientId_confirm,
768 * but will release that lock and return the clientid with a reference
770 * If the "cred" argument is NULL, a new clientid should not be created.
771 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
773 * The start_renewthread argument tells nfscl_getcl() to start a renew
774 * thread if this creates a new clp.
775 * It always clpp with a reference count on it, unless returning an error.
778 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
779 int start_renewthread, struct nfsclclient **clpp)
781 struct nfsclclient *clp;
782 struct nfsclclient *newclp = NULL;
783 struct nfsmount *nmp;
784 char uuid[HOSTUUIDLEN];
785 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
790 getcredhostuuid(cred, uuid, sizeof uuid);
791 idlen = strlen(uuid);
793 idlen += sizeof (u_int64_t);
795 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
796 MALLOC(newclp, struct nfsclclient *,
797 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
802 * If a forced dismount is already in progress, don't
803 * allocate a new clientid and get out now. For the case where
804 * clp != NULL, this is a harmless optimization.
806 if (NFSCL_FORCEDISM(mp)) {
809 free(newclp, M_NFSCLCLIENT);
814 if (newclp == NULL) {
819 clp->nfsc_idlen = idlen;
820 LIST_INIT(&clp->nfsc_owner);
821 TAILQ_INIT(&clp->nfsc_deleg);
822 TAILQ_INIT(&clp->nfsc_layout);
823 LIST_INIT(&clp->nfsc_devinfo);
824 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
825 LIST_INIT(&clp->nfsc_deleghash[i]);
826 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
827 LIST_INIT(&clp->nfsc_layouthash[i]);
828 clp->nfsc_flags = NFSCLFLAGS_INITED;
829 clp->nfsc_clientidrev = 1;
830 clp->nfsc_cbident = nfscl_nextcbident();
831 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
833 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
837 if (start_renewthread != 0)
838 nfscl_start_renewthread(clp);
842 free(newclp, M_NFSCLCLIENT);
845 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
846 !NFSCL_FORCEDISM(mp))
847 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
848 NFSCLSTATEMUTEXPTR, mp);
851 * Call nfsv4_lock() with "iwantlock == 0" so that it will
852 * wait for a pending exclusive lock request. This gives the
853 * exclusive lock request priority over this shared lock
855 * An exclusive lock on nfsc_lock is used mainly for server
858 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp);
859 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
861 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
863 * Both nfsv4_lock() and nfsv4_getref() know to check
864 * for NFSCL_FORCEDISM() and return without sleeping to
865 * wait for the exclusive lock to be released, since it
866 * might be held by nfscl_umount() and we need to get out
867 * now for that case and not wait until nfscl_umount()
876 * If it needs a clientid, do the setclientid now.
878 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
880 panic("nfscl_clget");
881 if (p == NULL || cred == NULL) {
883 nfsv4_unlock(&clp->nfsc_lock, 0);
888 * If RFC3530 Sec. 14.2.33 is taken literally,
889 * NFSERR_CLIDINUSE will be returned persistently for the
890 * case where a new mount of the same file system is using
891 * a different principal. In practice, NFSERR_CLIDINUSE is
892 * only returned when there is outstanding unexpired state
893 * on the clientid. As such, try for twice the lease
894 * interval, if we know what that is. Otherwise, make a
896 * The case of returning NFSERR_STALECLIENTID is far less
897 * likely, but might occur if there is a significant delay
898 * between doing the SetClientID and SetClientIDConfirm Ops,
899 * such that the server throws away the clientid before
900 * receiving the SetClientIDConfirm.
902 if (clp->nfsc_renew > 0)
903 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
905 clidinusedelay = 120;
908 error = nfsrpc_setclient(nmp, clp, 0, cred, p);
909 if (error == NFSERR_STALECLIENTID ||
910 error == NFSERR_STALEDONTRECOVER ||
911 error == NFSERR_BADSESSION ||
912 error == NFSERR_CLIDINUSE) {
913 (void) nfs_catnap(PZERO, error, "nfs_setcl");
915 } while (((error == NFSERR_STALECLIENTID ||
916 error == NFSERR_BADSESSION ||
917 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
918 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
921 nfsv4_unlock(&clp->nfsc_lock, 0);
925 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
929 nfsv4_unlock(&clp->nfsc_lock, 1);
938 * Get a reference to a clientid and return it, if valid.
940 APPLESTATIC struct nfsclclient *
941 nfscl_findcl(struct nfsmount *nmp)
943 struct nfsclclient *clp;
946 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
952 * Release the clientid structure. It may be locked or reference counted.
955 nfscl_clrelease(struct nfsclclient *clp)
958 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
959 nfsv4_unlock(&clp->nfsc_lock, 0);
961 nfsv4_relref(&clp->nfsc_lock);
965 * External call for nfscl_clrelease.
968 nfscl_clientrelease(struct nfsclclient *clp)
972 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
973 nfsv4_unlock(&clp->nfsc_lock, 0);
975 nfsv4_relref(&clp->nfsc_lock);
980 * Called when wanting to lock a byte region.
983 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
984 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
985 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
986 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
988 struct nfscllockowner *lp;
989 struct nfsclopen *op;
990 struct nfsclclient *clp;
991 struct nfscllockowner *nlp;
992 struct nfscllock *nlop, *otherlop;
993 struct nfscldeleg *dp = NULL, *ldp = NULL;
994 struct nfscllockownerhead *lhp = NULL;
996 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
998 int error = 0, ret, donelocally = 0;
1001 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1010 * Might need these, so MALLOC them now, to
1011 * avoid a tsleep() in MALLOC later.
1013 MALLOC(nlp, struct nfscllockowner *,
1014 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1015 MALLOC(otherlop, struct nfscllock *,
1016 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1017 MALLOC(nlop, struct nfscllock *,
1018 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1019 nlop->nfslo_type = type;
1020 nlop->nfslo_first = off;
1021 if (len == NFS64BITSSET) {
1022 nlop->nfslo_end = NFS64BITSSET;
1024 nlop->nfslo_end = off + len;
1025 if (nlop->nfslo_end <= nlop->nfslo_first)
1026 error = NFSERR_INVAL;
1033 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
1036 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1037 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1038 FREE((caddr_t)nlop, M_NFSCLLOCK);
1045 openownp = ropenownp;
1047 nfscl_filllockowner(id, own, flags);
1049 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
1050 nfscl_filllockowner(NULL, openown, F_POSIX);
1052 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1058 * First, search for a delegation. If one exists for this file,
1059 * the lock can be done locally against it, so long as there
1060 * isn't a local lock conflict.
1062 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1063 np->n_fhp->nfh_len);
1064 /* Just sanity check for correct type of delegation */
1065 if (dp != NULL && ((dp->nfsdl_flags &
1066 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1068 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1072 /* Now, find an open and maybe a lockowner. */
1073 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
1074 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1076 ret = nfscl_getopen(&clp->nfsc_owner,
1077 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1078 ownp, mode, NULL, &op);
1080 lhp = &dp->nfsdl_lock;
1081 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1082 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1083 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1091 * Get the related Open and maybe lockowner.
1093 error = nfscl_getopen(&clp->nfsc_owner,
1094 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1095 ownp, mode, &lp, &op);
1097 lhp = &op->nfso_lock;
1099 if (!error && !recovery)
1100 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1101 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1104 nfscl_clrelease(clp);
1107 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1108 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1109 FREE((caddr_t)nlop, M_NFSCLLOCK);
1114 * Ok, see if a lockowner exists and create one, as required.
1117 LIST_FOREACH(lp, lhp, nfsl_list) {
1118 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1122 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1124 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1125 NFSV4CL_LOCKNAMELEN);
1127 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1128 NFSV4CL_LOCKNAMELEN);
1129 nlp->nfsl_seqid = 0;
1130 nlp->nfsl_lockflags = flags;
1131 nlp->nfsl_inprog = NULL;
1132 nfscl_lockinit(&nlp->nfsl_rwlock);
1133 LIST_INIT(&nlp->nfsl_lock);
1135 nlp->nfsl_open = NULL;
1136 nfsstatsv1.cllocallockowners++;
1138 nlp->nfsl_open = op;
1139 nfsstatsv1.cllockowners++;
1141 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1148 * Now, update the byte ranges for locks.
1150 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1156 nfscl_clrelease(clp);
1159 * Serial modifications on the lock owner for multiple threads
1160 * for the same process using a read/write lock.
1163 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1169 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1171 FREE((caddr_t)nlop, M_NFSCLLOCK);
1173 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1180 * Called to unlock a byte range, for LockU.
1183 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1184 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1185 struct nfsclclient *clp, void *id, int flags,
1186 struct nfscllockowner **lpp, int *dorpcp)
1188 struct nfscllockowner *lp;
1189 struct nfsclowner *owp;
1190 struct nfsclopen *op;
1191 struct nfscllock *nlop, *other_lop = NULL;
1192 struct nfscldeleg *dp;
1194 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1202 * Might need these, so MALLOC them now, to
1203 * avoid a tsleep() in MALLOC later.
1205 MALLOC(nlop, struct nfscllock *,
1206 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1207 nlop->nfslo_type = F_UNLCK;
1208 nlop->nfslo_first = off;
1209 if (len == NFS64BITSSET) {
1210 nlop->nfslo_end = NFS64BITSSET;
1212 nlop->nfslo_end = off + len;
1213 if (nlop->nfslo_end <= nlop->nfslo_first) {
1214 FREE((caddr_t)nlop, M_NFSCLLOCK);
1215 return (NFSERR_INVAL);
1219 MALLOC(other_lop, struct nfscllock *,
1220 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1223 nfscl_filllockowner(id, own, flags);
1227 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1228 np->n_fhp->nfh_len);
1231 * First, unlock any local regions on a delegation.
1234 /* Look for this lockowner. */
1235 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1236 if (!NFSBCMP(lp->nfsl_owner, own,
1237 NFSV4CL_LOCKNAMELEN))
1241 /* Use other_lop, so nlop is still available */
1242 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1246 * Now, find a matching open/lockowner that hasn't already been done,
1247 * as marked by nfsl_inprog.
1251 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1252 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1253 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1254 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1255 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1256 if (lp->nfsl_inprog == NULL &&
1257 !NFSBCMP(lp->nfsl_owner, own,
1258 NFSV4CL_LOCKNAMELEN)) {
1272 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1276 * Serial modifications on the lock owner for multiple
1277 * threads for the same process using a read/write lock.
1279 lp->nfsl_inprog = p;
1280 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1285 FREE((caddr_t)nlop, M_NFSCLLOCK);
1287 FREE((caddr_t)other_lop, M_NFSCLLOCK);
1292 * Release all lockowners marked in progess for this process and file.
1295 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1296 void *id, int flags)
1298 struct nfsclowner *owp;
1299 struct nfsclopen *op;
1300 struct nfscllockowner *lp;
1302 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1305 nfscl_filllockowner(id, own, flags);
1307 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1308 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1309 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1310 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1311 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1312 if (lp->nfsl_inprog == p &&
1313 !NFSBCMP(lp->nfsl_owner, own,
1314 NFSV4CL_LOCKNAMELEN)) {
1315 lp->nfsl_inprog = NULL;
1316 nfscl_lockunlock(&lp->nfsl_rwlock);
1322 nfscl_clrelease(clp);
1327 * Called to find out if any bytes within the byte range specified are
1328 * write locked by the calling process. Used to determine if flushing
1329 * is required before a LockU.
1330 * If in doubt, return 1, so the flush will occur.
1333 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1334 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1336 struct nfsclowner *owp;
1337 struct nfscllockowner *lp;
1338 struct nfsclopen *op;
1339 struct nfsclclient *clp;
1340 struct nfscllock *lop;
1341 struct nfscldeleg *dp;
1344 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1348 switch (fl->l_whence) {
1352 * Caller is responsible for adding any necessary offset
1353 * when SEEK_CUR is used.
1358 off = np->n_size + fl->l_start;
1363 if (fl->l_len != 0) {
1364 end = off + fl->l_len;
1371 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
1374 nfscl_filllockowner(id, own, flags);
1378 * First check the delegation locks.
1380 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1382 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1383 if (!NFSBCMP(lp->nfsl_owner, own,
1384 NFSV4CL_LOCKNAMELEN))
1388 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1389 if (lop->nfslo_first >= end)
1391 if (lop->nfslo_end <= off)
1393 if (lop->nfslo_type == F_WRLCK) {
1394 nfscl_clrelease(clp);
1403 * Now, check state against the server.
1405 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1406 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1407 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1408 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1409 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1410 if (!NFSBCMP(lp->nfsl_owner, own,
1411 NFSV4CL_LOCKNAMELEN))
1415 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1416 if (lop->nfslo_first >= end)
1418 if (lop->nfslo_end <= off)
1420 if (lop->nfslo_type == F_WRLCK) {
1421 nfscl_clrelease(clp);
1430 nfscl_clrelease(clp);
1436 * Release a byte range lock owner structure.
1439 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1441 struct nfsclclient *clp;
1446 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1447 if (error != 0 && candelete &&
1448 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1449 nfscl_freelockowner(lp, 0);
1451 nfscl_lockunlock(&lp->nfsl_rwlock);
1452 nfscl_clrelease(clp);
1457 * Free up an open structure and any associated byte range lock structures.
1460 nfscl_freeopen(struct nfsclopen *op, int local)
1463 LIST_REMOVE(op, nfso_list);
1464 nfscl_freealllocks(&op->nfso_lock, local);
1465 FREE((caddr_t)op, M_NFSCLOPEN);
1467 nfsstatsv1.cllocalopens--;
1469 nfsstatsv1.clopens--;
1473 * Free up all lock owners and associated locks.
1476 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1478 struct nfscllockowner *lp, *nlp;
1480 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1481 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1483 nfscl_freelockowner(lp, local);
1488 * Called for an Open when NFSERR_EXPIRED is received from the server.
1489 * If there are no byte range locks nor a Share Deny lost, try to do a
1490 * fresh Open. Otherwise, free the open.
1493 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1494 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1496 struct nfscllockowner *lp;
1497 struct nfscldeleg *dp;
1498 int mustdelete = 0, error;
1501 * Look for any byte range lock(s).
1503 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1504 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1511 * If no byte range lock(s) nor a Share deny, try to re-open.
1513 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1514 newnfs_copycred(&op->nfso_cred, cred);
1516 error = nfsrpc_reopen(nmp, op->nfso_fh,
1517 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1521 FREE((caddr_t)dp, M_NFSCLDELEG);
1526 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1527 op->nfso_fhlen, cred, p, &dp);
1531 * If a byte range lock or Share deny or couldn't re-open, free it.
1534 nfscl_freeopen(op, 0);
1535 return (mustdelete);
1539 * Free up an open owner structure.
1542 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1545 LIST_REMOVE(owp, nfsow_list);
1546 FREE((caddr_t)owp, M_NFSCLOWNER);
1548 nfsstatsv1.cllocalopenowners--;
1550 nfsstatsv1.clopenowners--;
1554 * Free up a byte range lock owner structure.
1557 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1559 struct nfscllock *lop, *nlop;
1561 LIST_REMOVE(lp, nfsl_list);
1562 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1563 nfscl_freelock(lop, local);
1565 FREE((caddr_t)lp, M_NFSCLLOCKOWNER);
1567 nfsstatsv1.cllocallockowners--;
1569 nfsstatsv1.cllockowners--;
1573 * Free up a byte range lock structure.
1576 nfscl_freelock(struct nfscllock *lop, int local)
1579 LIST_REMOVE(lop, nfslo_list);
1580 FREE((caddr_t)lop, M_NFSCLLOCK);
1582 nfsstatsv1.cllocallocks--;
1584 nfsstatsv1.cllocks--;
1588 * Clean out the state related to a delegation.
1591 nfscl_cleandeleg(struct nfscldeleg *dp)
1593 struct nfsclowner *owp, *nowp;
1594 struct nfsclopen *op;
1596 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1597 op = LIST_FIRST(&owp->nfsow_open);
1599 if (LIST_NEXT(op, nfso_list) != NULL)
1600 panic("nfscleandel");
1601 nfscl_freeopen(op, 1);
1603 nfscl_freeopenowner(owp, 1);
1605 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1609 * Free a delegation.
1612 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1615 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1616 LIST_REMOVE(dp, nfsdl_hash);
1617 FREE((caddr_t)dp, M_NFSCLDELEG);
1618 nfsstatsv1.cldelegates--;
1623 * Free up all state related to this client structure.
1626 nfscl_cleanclient(struct nfsclclient *clp)
1628 struct nfsclowner *owp, *nowp;
1629 struct nfsclopen *op, *nop;
1631 /* Now, all the OpenOwners, etc. */
1632 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1633 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1634 nfscl_freeopen(op, 0);
1636 nfscl_freeopenowner(owp, 0);
1641 * Called when an NFSERR_EXPIRED is received from the server.
1644 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1645 struct ucred *cred, NFSPROC_T *p)
1647 struct nfsclowner *owp, *nowp, *towp;
1648 struct nfsclopen *op, *nop, *top;
1649 struct nfscldeleg *dp, *ndp;
1650 int ret, printed = 0;
1653 * First, merge locally issued Opens into the list for the server.
1655 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1656 while (dp != NULL) {
1657 ndp = TAILQ_NEXT(dp, nfsdl_list);
1658 owp = LIST_FIRST(&dp->nfsdl_owner);
1659 while (owp != NULL) {
1660 nowp = LIST_NEXT(owp, nfsow_list);
1661 op = LIST_FIRST(&owp->nfsow_open);
1663 if (LIST_NEXT(op, nfso_list) != NULL)
1665 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1666 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1667 NFSV4CL_LOCKNAMELEN))
1671 /* Merge opens in */
1672 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1673 if (top->nfso_fhlen == op->nfso_fhlen &&
1674 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1676 top->nfso_mode |= op->nfso_mode;
1677 top->nfso_opencnt += op->nfso_opencnt;
1682 /* Just add the open to the owner list */
1683 LIST_REMOVE(op, nfso_list);
1684 op->nfso_own = towp;
1685 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1686 nfsstatsv1.cllocalopens--;
1687 nfsstatsv1.clopens++;
1690 /* Just add the openowner to the client list */
1691 LIST_REMOVE(owp, nfsow_list);
1692 owp->nfsow_clp = clp;
1693 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1694 nfsstatsv1.cllocalopenowners--;
1695 nfsstatsv1.clopenowners++;
1696 nfsstatsv1.cllocalopens--;
1697 nfsstatsv1.clopens++;
1702 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1704 printf("nfsv4 expired locks lost\n");
1706 nfscl_cleandeleg(dp);
1707 nfscl_freedeleg(&clp->nfsc_deleg, dp);
1710 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1714 * Now, try and reopen against the server.
1716 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1717 owp->nfsow_seqid = 0;
1718 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1719 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1720 if (ret && !printed) {
1722 printf("nfsv4 expired locks lost\n");
1725 if (LIST_EMPTY(&owp->nfsow_open))
1726 nfscl_freeopenowner(owp, 0);
1731 * This function must be called after the process represented by "own" has
1732 * exited. Must be called with CLSTATE lock held.
1735 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1737 struct nfsclowner *owp, *nowp;
1738 struct nfscllockowner *lp, *nlp;
1739 struct nfscldeleg *dp;
1741 /* First, get rid of local locks on delegations. */
1742 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1743 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1744 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1745 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1747 nfscl_freelockowner(lp, 1);
1751 owp = LIST_FIRST(&clp->nfsc_owner);
1752 while (owp != NULL) {
1753 nowp = LIST_NEXT(owp, nfsow_list);
1754 if (!NFSBCMP(owp->nfsow_owner, own,
1755 NFSV4CL_LOCKNAMELEN)) {
1757 * If there are children that haven't closed the
1758 * file descriptors yet, the opens will still be
1759 * here. For that case, let the renew thread clear
1760 * out the OpenOwner later.
1762 if (LIST_EMPTY(&owp->nfsow_open))
1763 nfscl_freeopenowner(owp, 0);
1765 owp->nfsow_defunct = 1;
1772 * Find open/lock owners for processes that have exited.
1775 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1777 struct nfsclowner *owp, *nowp;
1778 struct nfsclopen *op;
1779 struct nfscllockowner *lp, *nlp;
1780 struct nfscldeleg *dp;
1784 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1785 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1786 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1787 if (LIST_EMPTY(&lp->nfsl_lock))
1788 nfscl_emptylockowner(lp, lhp);
1791 if (nfscl_procdoesntexist(owp->nfsow_owner))
1792 nfscl_cleanup_common(clp, owp->nfsow_owner);
1796 * For the single open_owner case, these lock owners need to be
1797 * checked to see if they still exist separately.
1798 * This is because nfscl_procdoesntexist() never returns true for
1799 * the single open_owner so that the above doesn't ever call
1800 * nfscl_cleanup_common().
1802 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1803 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1804 if (nfscl_procdoesntexist(lp->nfsl_owner))
1805 nfscl_cleanup_common(clp, lp->nfsl_owner);
1809 NFSPROCLISTUNLOCK();
1813 * Take the empty lock owner and move it to the local lhp list if the
1814 * associated process no longer exists.
1817 nfscl_emptylockowner(struct nfscllockowner *lp,
1818 struct nfscllockownerfhhead *lhp)
1820 struct nfscllockownerfh *lfhp, *mylfhp;
1821 struct nfscllockowner *nlp;
1824 /* If not a Posix lock owner, just return. */
1825 if ((lp->nfsl_lockflags & F_POSIX) == 0)
1831 * First, search to see if this lock owner is already in the list.
1832 * If it is, then the associated process no longer exists.
1834 SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
1835 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
1836 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
1839 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
1840 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
1841 NFSV4CL_LOCKNAMELEN))
1844 /* If not found, check if process still exists. */
1845 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
1848 /* Move the lock owner over to the local list. */
1849 if (mylfhp == NULL) {
1850 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
1854 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
1855 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
1856 mylfhp->nfslfh_len);
1857 LIST_INIT(&mylfhp->nfslfh_lock);
1858 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
1860 LIST_REMOVE(lp, nfsl_list);
1861 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
1864 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
1866 * Called from nfs umount to free up the clientid.
1869 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1871 struct nfsclclient *clp;
1876 * For the case that matters, this is the thread that set
1877 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
1878 * done to ensure that any thread executing nfscl_getcl() after
1879 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
1880 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
1881 * explanation, courtesy of Alan Cox.
1882 * What follows is a snippet from Alan Cox's email at:
1883 * http://docs.FreeBSD.org/cgi/
1884 * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
1886 * 1. Set MNTK_UNMOUNTF
1887 * 2. Acquire a standard FreeBSD mutex "m".
1888 * 3. Update some data structures.
1889 * 4. Release mutex "m".
1891 * Then, other threads that acquire "m" after step 4 has occurred will
1892 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
1893 * step 2 may or may not see MNTK_UNMOUNTF as set.
1896 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
1904 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1905 panic("nfscl umount");
1908 * First, handshake with the nfscl renew thread, to terminate
1911 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1912 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1913 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
1917 * Now, get the exclusive lock on the client state, so
1918 * that no uses of the state are still in progress.
1921 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1922 NFSCLSTATEMUTEXPTR, NULL);
1923 } while (!igotlock);
1927 * Free up all the state. It will expire on the server, but
1928 * maybe we should do a SetClientId/SetClientIdConfirm so
1929 * the server throws it away?
1931 LIST_REMOVE(clp, nfsc_list);
1932 nfscl_delegreturnall(clp, p);
1933 cred = newnfs_getcred();
1934 if (NFSHASNFSV4N(nmp)) {
1935 (void)nfsrpc_destroysession(nmp, clp, cred, p);
1936 (void)nfsrpc_destroyclient(nmp, clp, cred, p);
1938 (void)nfsrpc_setclient(nmp, clp, 0, cred, p);
1939 nfscl_cleanclient(clp);
1942 free(clp, M_NFSCLCLIENT);
1948 * This function is called when a server replies with NFSERR_STALECLIENTID
1949 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
1950 * doing Opens and Locks with reclaim. If these fail, it deletes the
1951 * corresponding state.
1954 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
1956 struct nfsclowner *owp, *nowp;
1957 struct nfsclopen *op, *nop;
1958 struct nfscllockowner *lp, *nlp;
1959 struct nfscllock *lop, *nlop;
1960 struct nfscldeleg *dp, *ndp, *tdp;
1961 struct nfsmount *nmp;
1962 struct ucred *tcred;
1963 struct nfsclopenhead extra_open;
1964 struct nfscldeleghead extra_deleg;
1967 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1968 int i, igotlock = 0, error, trycnt, firstlock;
1969 struct nfscllayout *lyp, *nlyp;
1972 * First, lock the client structure, so everyone else will
1973 * block when trying to use state.
1976 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
1978 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1979 NFSCLSTATEMUTEXPTR, NULL);
1980 } while (!igotlock);
1983 nmp = clp->nfsc_nmp;
1985 panic("nfscl recover");
1988 * For now, just get rid of all layouts. There may be a need
1989 * to do LayoutCommit Ops with reclaim == true later.
1991 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1992 nfscl_freelayout(lyp);
1993 TAILQ_INIT(&clp->nfsc_layout);
1994 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
1995 LIST_INIT(&clp->nfsc_layouthash[i]);
1999 error = nfsrpc_setclient(nmp, clp, 1, cred, p);
2000 } while ((error == NFSERR_STALECLIENTID ||
2001 error == NFSERR_BADSESSION ||
2002 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2005 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2006 NFSCLFLAGS_RECVRINPROG);
2007 wakeup(&clp->nfsc_flags);
2008 nfsv4_unlock(&clp->nfsc_lock, 0);
2012 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2013 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2016 * Mark requests already queued on the server, so that they don't
2017 * initiate another recovery cycle. Any requests already in the
2018 * queue that handle state information will have the old stale
2019 * clientid/stateid and will get a NFSERR_STALESTATEID,
2020 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2021 * This will be translated to NFSERR_STALEDONTRECOVER when
2022 * R_DONTRECOVER is set.
2025 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2026 if (rep->r_nmp == nmp)
2027 rep->r_flags |= R_DONTRECOVER;
2032 * Now, mark all delegations "need reclaim".
2034 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2035 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2037 TAILQ_INIT(&extra_deleg);
2038 LIST_INIT(&extra_open);
2040 * Now traverse the state lists, doing Open and Lock Reclaims.
2042 tcred = newnfs_getcred();
2043 owp = LIST_FIRST(&clp->nfsc_owner);
2044 while (owp != NULL) {
2045 nowp = LIST_NEXT(owp, nfsow_list);
2046 owp->nfsow_seqid = 0;
2047 op = LIST_FIRST(&owp->nfsow_open);
2048 while (op != NULL) {
2049 nop = LIST_NEXT(op, nfso_list);
2050 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2051 /* Search for a delegation to reclaim with the open */
2052 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2053 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2055 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2056 mode = NFSV4OPEN_ACCESSWRITE;
2057 delegtype = NFSV4OPEN_DELEGATEWRITE;
2059 mode = NFSV4OPEN_ACCESSREAD;
2060 delegtype = NFSV4OPEN_DELEGATEREAD;
2062 if ((op->nfso_mode & mode) == mode &&
2063 op->nfso_fhlen == dp->nfsdl_fhlen &&
2064 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2069 delegtype = NFSV4OPEN_DELEGATENONE;
2070 newnfs_copycred(&op->nfso_cred, tcred);
2071 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2072 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2073 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2076 /* Handle any replied delegation */
2077 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2078 || NFSMNT_RDONLY(nmp->nm_mountp))) {
2079 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2080 mode = NFSV4OPEN_ACCESSWRITE;
2082 mode = NFSV4OPEN_ACCESSREAD;
2083 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2084 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2086 if ((op->nfso_mode & mode) == mode &&
2087 op->nfso_fhlen == dp->nfsdl_fhlen &&
2088 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2090 dp->nfsdl_stateid = ndp->nfsdl_stateid;
2091 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2092 dp->nfsdl_ace = ndp->nfsdl_ace;
2093 dp->nfsdl_change = ndp->nfsdl_change;
2094 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2095 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2096 dp->nfsdl_flags |= NFSCLDL_RECALL;
2097 FREE((caddr_t)ndp, M_NFSCLDELEG);
2104 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2106 /* and reclaim all byte range locks */
2107 lp = LIST_FIRST(&op->nfso_lock);
2108 while (lp != NULL) {
2109 nlp = LIST_NEXT(lp, nfsl_list);
2112 lop = LIST_FIRST(&lp->nfsl_lock);
2113 while (lop != NULL) {
2114 nlop = LIST_NEXT(lop, nfslo_list);
2115 if (lop->nfslo_end == NFS64BITSSET)
2118 len = lop->nfslo_end - lop->nfslo_first;
2119 error = nfscl_trylock(nmp, NULL,
2120 op->nfso_fh, op->nfso_fhlen, lp,
2121 firstlock, 1, lop->nfslo_first, len,
2122 lop->nfslo_type, tcred, p);
2124 nfscl_freelock(lop, 0);
2129 /* If no locks, but a lockowner, just delete it. */
2130 if (LIST_EMPTY(&lp->nfsl_lock))
2131 nfscl_freelockowner(lp, 0);
2136 if (error != 0 && error != NFSERR_BADSESSION)
2137 nfscl_freeopen(op, 0);
2144 * Now, try and get any delegations not yet reclaimed by cobbling
2145 * to-gether an appropriate open.
2148 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2149 while (dp != NULL) {
2150 ndp = TAILQ_NEXT(dp, nfsdl_list);
2151 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2153 MALLOC(nowp, struct nfsclowner *,
2154 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2156 * Name must be as long an largest possible
2157 * NFSV4CL_LOCKNAMELEN. 12 for now.
2159 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2160 NFSV4CL_LOCKNAMELEN);
2161 LIST_INIT(&nowp->nfsow_open);
2162 nowp->nfsow_clp = clp;
2163 nowp->nfsow_seqid = 0;
2164 nowp->nfsow_defunct = 0;
2165 nfscl_lockinit(&nowp->nfsow_rwlock);
2168 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2169 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
2170 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2171 nop->nfso_own = nowp;
2172 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2173 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2174 delegtype = NFSV4OPEN_DELEGATEWRITE;
2176 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2177 delegtype = NFSV4OPEN_DELEGATEREAD;
2179 nop->nfso_opencnt = 0;
2180 nop->nfso_posixlock = 1;
2181 nop->nfso_fhlen = dp->nfsdl_fhlen;
2182 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2183 LIST_INIT(&nop->nfso_lock);
2184 nop->nfso_stateid.seqid = 0;
2185 nop->nfso_stateid.other[0] = 0;
2186 nop->nfso_stateid.other[1] = 0;
2187 nop->nfso_stateid.other[2] = 0;
2188 newnfs_copycred(&dp->nfsdl_cred, tcred);
2189 newnfs_copyincred(tcred, &nop->nfso_cred);
2191 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2192 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2193 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2194 delegtype, tcred, p);
2196 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2197 mode = NFSV4OPEN_ACCESSWRITE;
2199 mode = NFSV4OPEN_ACCESSREAD;
2200 if ((nop->nfso_mode & mode) == mode &&
2201 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2202 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2204 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2205 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2206 dp->nfsdl_ace = tdp->nfsdl_ace;
2207 dp->nfsdl_change = tdp->nfsdl_change;
2208 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2209 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2210 dp->nfsdl_flags |= NFSCLDL_RECALL;
2211 FREE((caddr_t)tdp, M_NFSCLDELEG);
2213 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2219 FREE((caddr_t)nop, M_NFSCLOPEN);
2221 * Couldn't reclaim it, so throw the state
2224 nfscl_cleandeleg(dp);
2225 nfscl_freedeleg(&clp->nfsc_deleg, dp);
2227 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2234 * Now, get rid of extra Opens and Delegations.
2236 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2238 newnfs_copycred(&op->nfso_cred, tcred);
2239 error = nfscl_tryclose(op, tcred, nmp, p);
2240 if (error == NFSERR_GRACE)
2241 (void) nfs_catnap(PZERO, error, "nfsexcls");
2242 } while (error == NFSERR_GRACE);
2243 LIST_REMOVE(op, nfso_list);
2244 FREE((caddr_t)op, M_NFSCLOPEN);
2247 FREE((caddr_t)nowp, M_NFSCLOWNER);
2249 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2251 newnfs_copycred(&dp->nfsdl_cred, tcred);
2252 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2253 if (error == NFSERR_GRACE)
2254 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2255 } while (error == NFSERR_GRACE);
2256 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2257 FREE((caddr_t)dp, M_NFSCLDELEG);
2260 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2261 if (NFSHASNFSV4N(nmp))
2262 (void)nfsrpc_reclaimcomplete(nmp, cred, p);
2265 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2266 wakeup(&clp->nfsc_flags);
2267 nfsv4_unlock(&clp->nfsc_lock, 0);
2273 * This function is called when a server replies with NFSERR_EXPIRED.
2274 * It deletes all state for the client and does a fresh SetClientId/confirm.
2275 * XXX Someday it should post a signal to the process(es) that hold the
2276 * state, so they know that lock state has been lost.
2279 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2281 struct nfsmount *nmp;
2283 int igotlock = 0, error, trycnt;
2286 * If the clientid has gone away or a new SetClientid has already
2287 * been done, just return ok.
2289 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2293 * First, lock the client structure, so everyone else will
2294 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2295 * that only one thread does the work.
2298 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2300 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2301 NFSCLSTATEMUTEXPTR, NULL);
2302 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2303 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2305 nfsv4_unlock(&clp->nfsc_lock, 0);
2309 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2312 nmp = clp->nfsc_nmp;
2314 panic("nfscl expired");
2315 cred = newnfs_getcred();
2318 error = nfsrpc_setclient(nmp, clp, 0, cred, p);
2319 } while ((error == NFSERR_STALECLIENTID ||
2320 error == NFSERR_BADSESSION ||
2321 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2324 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2327 * Expire the state for the client.
2329 nfscl_expireclient(clp, nmp, cred, p);
2331 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2332 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2334 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2335 wakeup(&clp->nfsc_flags);
2336 nfsv4_unlock(&clp->nfsc_lock, 0);
2343 * This function inserts a lock in the list after insert_lop.
2346 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2347 struct nfscllock *insert_lop, int local)
2350 if ((struct nfscllockowner *)insert_lop == lp)
2351 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2353 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2355 nfsstatsv1.cllocallocks++;
2357 nfsstatsv1.cllocks++;
2361 * This function updates the locking for a lock owner and given file. It
2362 * maintains a list of lock ranges ordered on increasing file offset that
2363 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2364 * It always adds new_lop to the list and sometimes uses the one pointed
2366 * Returns 1 if the locks were modified, 0 otherwise.
2369 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2370 struct nfscllock **other_lopp, int local)
2372 struct nfscllock *new_lop = *new_lopp;
2373 struct nfscllock *lop, *tlop, *ilop;
2374 struct nfscllock *other_lop;
2375 int unlock = 0, modified = 0;
2379 * Work down the list until the lock is merged.
2381 if (new_lop->nfslo_type == F_UNLCK)
2383 ilop = (struct nfscllock *)lp;
2384 lop = LIST_FIRST(&lp->nfsl_lock);
2385 while (lop != NULL) {
2387 * Only check locks for this file that aren't before the start of
2390 if (lop->nfslo_end >= new_lop->nfslo_first) {
2391 if (new_lop->nfslo_end < lop->nfslo_first) {
2393 * If the new lock ends before the start of the
2394 * current lock's range, no merge, just insert
2399 if (new_lop->nfslo_type == lop->nfslo_type ||
2400 (new_lop->nfslo_first <= lop->nfslo_first &&
2401 new_lop->nfslo_end >= lop->nfslo_end)) {
2403 * This lock can be absorbed by the new lock/unlock.
2404 * This happens when it covers the entire range
2405 * of the old lock or is contiguous
2406 * with the old lock and is of the same type or an
2409 if (new_lop->nfslo_type != lop->nfslo_type ||
2410 new_lop->nfslo_first != lop->nfslo_first ||
2411 new_lop->nfslo_end != lop->nfslo_end)
2413 if (lop->nfslo_first < new_lop->nfslo_first)
2414 new_lop->nfslo_first = lop->nfslo_first;
2415 if (lop->nfslo_end > new_lop->nfslo_end)
2416 new_lop->nfslo_end = lop->nfslo_end;
2418 lop = LIST_NEXT(lop, nfslo_list);
2419 nfscl_freelock(tlop, local);
2424 * All these cases are for contiguous locks that are not the
2425 * same type, so they can't be merged.
2427 if (new_lop->nfslo_first <= lop->nfslo_first) {
2429 * This case is where the new lock overlaps with the
2430 * first part of the old lock. Move the start of the
2431 * old lock to just past the end of the new lock. The
2432 * new lock will be inserted in front of the old, since
2433 * ilop hasn't been updated. (We are done now.)
2435 if (lop->nfslo_first != new_lop->nfslo_end) {
2436 lop->nfslo_first = new_lop->nfslo_end;
2441 if (new_lop->nfslo_end >= lop->nfslo_end) {
2443 * This case is where the new lock overlaps with the
2444 * end of the old lock's range. Move the old lock's
2445 * end to just before the new lock's first and insert
2446 * the new lock after the old lock.
2447 * Might not be done yet, since the new lock could
2448 * overlap further locks with higher ranges.
2450 if (lop->nfslo_end != new_lop->nfslo_first) {
2451 lop->nfslo_end = new_lop->nfslo_first;
2455 lop = LIST_NEXT(lop, nfslo_list);
2459 * The final case is where the new lock's range is in the
2460 * middle of the current lock's and splits the current lock
2461 * up. Use *other_lopp to handle the second part of the
2462 * split old lock range. (We are done now.)
2463 * For unlock, we use new_lop as other_lop and tmp, since
2464 * other_lop and new_lop are the same for this case.
2465 * We noted the unlock case above, so we don't need
2466 * new_lop->nfslo_type any longer.
2468 tmp = new_lop->nfslo_first;
2470 other_lop = new_lop;
2473 other_lop = *other_lopp;
2476 other_lop->nfslo_first = new_lop->nfslo_end;
2477 other_lop->nfslo_end = lop->nfslo_end;
2478 other_lop->nfslo_type = lop->nfslo_type;
2479 lop->nfslo_end = tmp;
2480 nfscl_insertlock(lp, other_lop, lop, local);
2486 lop = LIST_NEXT(lop, nfslo_list);
2492 * Insert the new lock in the list at the appropriate place.
2495 nfscl_insertlock(lp, new_lop, ilop, local);
2503 * This function must be run as a kernel thread.
2504 * It does Renew Ops and recovery, when required.
2507 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2509 struct nfsclowner *owp, *nowp;
2510 struct nfsclopen *op;
2511 struct nfscllockowner *lp, *nlp;
2512 struct nfscldeleghead dh;
2513 struct nfscldeleg *dp, *ndp;
2516 int error, cbpathdown, islept, igotlock, ret, clearok;
2517 uint32_t recover_done_time = 0;
2519 static time_t prevsec = 0;
2520 struct nfscllockownerfh *lfhp, *nlfhp;
2521 struct nfscllockownerfhhead lfh;
2522 struct nfscllayout *lyp, *nlyp;
2523 struct nfscldevinfo *dip, *ndip;
2524 struct nfscllayouthead rlh;
2525 struct nfsclrecalllayout *recallp;
2526 struct nfsclds *dsp;
2528 cred = newnfs_getcred();
2530 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2533 newnfs_setroot(cred);
2535 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2537 * Only allow one recover within 1/2 of the lease
2538 * duration (nfsc_renew).
2540 if (recover_done_time < NFSD_MONOSEC) {
2541 recover_done_time = NFSD_MONOSEC +
2543 NFSCL_DEBUG(1, "Doing recovery..\n");
2544 nfscl_recover(clp, cred, p);
2546 NFSCL_DEBUG(1, "Clear Recovery dt=%u ms=%jd\n",
2547 recover_done_time, (intmax_t)NFSD_MONOSEC);
2549 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2553 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2554 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2555 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2556 clidrev = clp->nfsc_clientidrev;
2557 error = nfsrpc_renew(clp, NULL, cred, p);
2558 if (error == NFSERR_CBPATHDOWN)
2560 else if (error == NFSERR_STALECLIENTID ||
2561 error == NFSERR_BADSESSION) {
2563 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2565 } else if (error == NFSERR_EXPIRED)
2566 (void) nfscl_hasexpired(clp, clidrev, p);
2570 if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2571 /* Do renews for any DS sessions. */
2572 NFSLOCKMNT(clp->nfsc_nmp);
2573 /* Skip first entry, since the MDS is handled above. */
2574 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2576 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2577 while (dsp != NULL) {
2578 if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2579 dsp->nfsclds_sess.nfsess_defunct == 0) {
2580 dsp->nfsclds_expire = NFSD_MONOSEC +
2582 NFSUNLOCKMNT(clp->nfsc_nmp);
2583 (void)nfsrpc_renew(clp, dsp, cred, p);
2586 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2588 NFSUNLOCKMNT(clp->nfsc_nmp);
2594 /* It's a Total Recall! */
2595 nfscl_totalrecall(clp);
2598 * Now, handle defunct owners.
2600 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2601 if (LIST_EMPTY(&owp->nfsow_open)) {
2602 if (owp->nfsow_defunct != 0)
2603 nfscl_freeopenowner(owp, 0);
2608 * Do the recall on any delegations. To avoid trouble, always
2609 * come back up here after having slept.
2613 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2614 while (dp != NULL) {
2615 ndp = TAILQ_NEXT(dp, nfsdl_list);
2616 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2618 * Wait for outstanding I/O ops to be done.
2620 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2622 nfsv4_unlock(&clp->nfsc_lock, 0);
2625 dp->nfsdl_rwlock.nfslock_lock |=
2627 (void) nfsmsleep(&dp->nfsdl_rwlock,
2628 NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2633 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2634 &islept, NFSCLSTATEMUTEXPTR, NULL);
2639 newnfs_copycred(&dp->nfsdl_cred, cred);
2640 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2643 nfscl_cleandeleg(dp);
2644 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2646 LIST_REMOVE(dp, nfsdl_hash);
2647 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2649 nfsstatsv1.cldelegates--;
2657 * Clear out old delegations, if we are above the high water
2658 * mark. Only clear out ones with no state related to them.
2659 * The tailq list is in LRU order.
2661 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2662 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2663 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2664 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2665 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2666 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2667 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2668 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2670 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2671 op = LIST_FIRST(&owp->nfsow_open);
2678 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2679 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2686 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2687 LIST_REMOVE(dp, nfsdl_hash);
2688 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2690 nfsstatsv1.cldelegates--;
2696 nfsv4_unlock(&clp->nfsc_lock, 0);
2699 * Do the recall on any layouts. To avoid trouble, always
2700 * come back up here after having slept.
2704 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2705 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2707 * Wait for outstanding I/O ops to be done.
2709 if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2710 (lyp->nfsly_lock.nfslock_lock &
2711 NFSV4LOCK_LOCK) != 0) {
2712 lyp->nfsly_lock.nfslock_lock |=
2714 (void)nfsmsleep(&lyp->nfsly_lock,
2715 NFSCLSTATEMUTEXPTR, PZERO, "nfslyp",
2719 /* Move the layout to the recall list. */
2720 TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2722 LIST_REMOVE(lyp, nfsly_hash);
2723 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2725 /* Handle any layout commits. */
2726 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2727 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2728 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2730 NFSCL_DEBUG(3, "do layoutcommit\n");
2731 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2739 /* Now, look for stale layouts. */
2740 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2741 while (lyp != NULL) {
2742 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2743 if (lyp->nfsly_timestamp < NFSD_MONOSEC &&
2744 (lyp->nfsly_flags & NFSLY_RECALL) == 0 &&
2745 lyp->nfsly_lock.nfslock_usecnt == 0 &&
2746 lyp->nfsly_lock.nfslock_lock == 0) {
2747 NFSCL_DEBUG(4, "ret stale lay=%d\n",
2749 recallp = malloc(sizeof(*recallp),
2750 M_NFSLAYRECALL, M_NOWAIT);
2751 if (recallp == NULL)
2753 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2754 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2755 lyp->nfsly_stateid.seqid, recallp);
2761 * Free up any unreferenced device info structures.
2763 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
2764 if (dip->nfsdi_layoutrefs == 0 &&
2765 dip->nfsdi_refcnt == 0) {
2766 NFSCL_DEBUG(4, "freeing devinfo\n");
2767 LIST_REMOVE(dip, nfsdi_list);
2768 nfscl_freedevinfo(dip);
2773 /* Do layout return(s), as required. */
2774 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
2775 TAILQ_REMOVE(&rlh, lyp, nfsly_list);
2776 NFSCL_DEBUG(4, "ret layout\n");
2777 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
2778 nfscl_freelayout(lyp);
2782 * Delegreturn any delegations cleaned out or recalled.
2784 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2785 newnfs_copycred(&dp->nfsdl_cred, cred);
2786 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2787 TAILQ_REMOVE(&dh, dp, nfsdl_list);
2788 FREE((caddr_t)dp, M_NFSCLDELEG);
2793 * Call nfscl_cleanupkext() once per second to check for
2794 * open/lock owners where the process has exited.
2796 mytime = NFSD_MONOSEC;
2797 if (prevsec != mytime) {
2799 nfscl_cleanupkext(clp, &lfh);
2803 * Do a ReleaseLockOwner for all lock owners where the
2804 * associated process no longer exists, as found by
2805 * nfscl_cleanupkext().
2807 newnfs_setroot(cred);
2808 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
2809 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
2811 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
2812 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
2814 nfscl_freelockowner(lp, 0);
2821 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2822 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
2824 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2825 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2828 wakeup((caddr_t)clp);
2836 * Initiate state recovery. Called when NFSERR_STALECLIENTID,
2837 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
2840 nfscl_initiate_recovery(struct nfsclclient *clp)
2846 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2848 wakeup((caddr_t)clp);
2852 * Dump out the state stuff for debugging.
2855 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2856 int lockowner, int locks)
2858 struct nfsclclient *clp;
2859 struct nfsclowner *owp;
2860 struct nfsclopen *op;
2861 struct nfscllockowner *lp;
2862 struct nfscllock *lop;
2863 struct nfscldeleg *dp;
2867 printf("nfscl dumpstate NULL clp\n");
2871 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2872 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2873 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2874 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2875 owp->nfsow_owner[0], owp->nfsow_owner[1],
2876 owp->nfsow_owner[2], owp->nfsow_owner[3],
2878 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2880 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2881 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2882 op->nfso_stateid.other[2], op->nfso_opencnt,
2884 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2886 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2887 lp->nfsl_owner[0], lp->nfsl_owner[1],
2888 lp->nfsl_owner[2], lp->nfsl_owner[3],
2890 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2891 lp->nfsl_stateid.other[2]);
2892 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2895 printf("lck typ=%d fst=%ju end=%ju\n",
2896 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2897 (intmax_t)lop->nfslo_end);
2899 printf("lck typ=%d fst=%qd end=%qd\n",
2900 lop->nfslo_type, lop->nfslo_first,
2908 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2909 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2910 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2911 owp->nfsow_owner[0], owp->nfsow_owner[1],
2912 owp->nfsow_owner[2], owp->nfsow_owner[3],
2914 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2916 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2917 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2918 op->nfso_stateid.other[2], op->nfso_opencnt,
2920 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2922 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2923 lp->nfsl_owner[0], lp->nfsl_owner[1],
2924 lp->nfsl_owner[2], lp->nfsl_owner[3],
2926 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2927 lp->nfsl_stateid.other[2]);
2928 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2931 printf("lck typ=%d fst=%ju end=%ju\n",
2932 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2933 (intmax_t)lop->nfslo_end);
2935 printf("lck typ=%d fst=%qd end=%qd\n",
2936 lop->nfslo_type, lop->nfslo_first,
2947 * Check for duplicate open owners and opens.
2948 * (Only used as a diagnostic aid.)
2951 nfscl_dupopen(vnode_t vp, int dupopens)
2953 struct nfsclclient *clp;
2954 struct nfsclowner *owp, *owp2;
2955 struct nfsclopen *op, *op2;
2958 clp = VFSTONFS(vnode_mount(vp))->nm_clp;
2960 printf("nfscl dupopen NULL clp\n");
2963 nfhp = VTONFS(vp)->n_fhp;
2967 * First, search for duplicate owners.
2968 * These should never happen!
2970 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2971 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2973 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
2974 NFSV4CL_LOCKNAMELEN)) {
2976 printf("DUP OWNER\n");
2977 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
2984 * Now, search for duplicate stateids.
2985 * These shouldn't happen, either.
2987 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2988 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2989 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2990 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2992 (op->nfso_stateid.other[0] != 0 ||
2993 op->nfso_stateid.other[1] != 0 ||
2994 op->nfso_stateid.other[2] != 0) &&
2995 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
2996 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
2997 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
2999 printf("DUP STATEID\n");
3000 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
3010 * Now search for duplicate opens.
3011 * Duplicate opens for the same owner
3012 * should never occur. Other duplicates are
3013 * possible and are checked for if "dupopens"
3016 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3017 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3018 if (nfhp->nfh_len == op2->nfso_fhlen &&
3019 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3020 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3021 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3022 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3023 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3024 (!NFSBCMP(op->nfso_own->nfsow_owner,
3025 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3027 if (!NFSBCMP(op->nfso_own->nfsow_owner,
3028 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3030 printf("BADDUP OPEN\n");
3033 printf("DUP OPEN\n");
3035 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
3048 * During close, find an open that needs to be dereferenced and
3049 * dereference it. If there are no more opens for this file,
3050 * log a message to that effect.
3051 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
3052 * on the file's vnode.
3053 * This is the safe way, since it is difficult to identify
3054 * which open the close is for and I/O can be performed after the
3055 * close(2) system call when a file is mmap'd.
3056 * If it returns 0 for success, there will be a referenced
3057 * clp returned via clpp.
3060 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3062 struct nfsclclient *clp;
3063 struct nfsclowner *owp;
3064 struct nfsclopen *op;
3065 struct nfscldeleg *dp;
3069 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
3074 nfhp = VTONFS(vp)->n_fhp;
3078 * First, look for one under a delegation that was locally issued
3079 * and just decrement the opencnt for it. Since all my Opens against
3080 * the server are DENY_NONE, I don't see a problem with hanging
3081 * onto them. (It is much easier to use one of the extant Opens
3082 * that I already have on the server when a Delegation is recalled
3083 * than to do fresh Opens.) Someday, I might need to rethink this, but.
3085 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3087 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3088 op = LIST_FIRST(&owp->nfsow_open);
3091 * Since a delegation is for a file, there
3092 * should never be more than one open for
3095 if (LIST_NEXT(op, nfso_list) != NULL)
3096 panic("nfscdeleg opens");
3097 if (notdecr && op->nfso_opencnt > 0) {
3106 /* Now process the opens against the server. */
3107 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3108 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3109 if (op->nfso_fhlen == nfhp->nfh_len &&
3110 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3112 /* Found an open, decrement cnt if possible */
3113 if (notdecr && op->nfso_opencnt > 0) {
3118 * There are more opens, so just return.
3120 if (op->nfso_opencnt > 0) {
3129 printf("nfscl: never fnd open\n");
3134 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3136 struct nfsclclient *clp;
3137 struct nfsclowner *owp, *nowp;
3138 struct nfsclopen *op;
3139 struct nfscldeleg *dp;
3141 struct nfsclrecalllayout *recallp;
3144 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
3149 nfhp = VTONFS(vp)->n_fhp;
3150 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3153 * First get rid of the local Open structures, which should be no
3156 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3158 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3159 op = LIST_FIRST(&owp->nfsow_open);
3161 KASSERT((op->nfso_opencnt == 0),
3162 ("nfscl: bad open cnt on deleg"));
3163 nfscl_freeopen(op, 1);
3165 nfscl_freeopenowner(owp, 1);
3169 /* Return any layouts marked return on close. */
3170 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp);
3172 /* Now process the opens against the server. */
3174 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3175 op = LIST_FIRST(&owp->nfsow_open);
3176 while (op != NULL) {
3177 if (op->nfso_fhlen == nfhp->nfh_len &&
3178 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3180 /* Found an open, close it. */
3181 KASSERT((op->nfso_opencnt == 0),
3182 ("nfscl: bad open cnt on server"));
3184 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
3189 op = LIST_NEXT(op, nfso_list);
3194 * recallp has been set NULL by nfscl_retoncloselayout() if it was
3195 * used by the function, but calling free() with a NULL pointer is ok.
3197 free(recallp, M_NFSLAYRECALL);
3202 * Return all delegations on this client.
3203 * (Must be called with client sleep lock.)
3206 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
3208 struct nfscldeleg *dp, *ndp;
3211 cred = newnfs_getcred();
3212 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3213 nfscl_cleandeleg(dp);
3214 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3215 nfscl_freedeleg(&clp->nfsc_deleg, dp);
3221 * Do a callback RPC.
3224 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3226 int clist, gotseq_ok, i, j, k, op, rcalls;
3228 struct nfsclclient *clp;
3229 struct nfscldeleg *dp = NULL;
3230 int numops, taglen = -1, error = 0, trunc;
3231 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3232 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3238 nfsattrbit_t attrbits, rattrbits;
3239 nfsv4stateid_t stateid;
3240 uint32_t seqid, slotid = 0, highslot, cachethis;
3241 uint8_t sessionid[NFSX_V4SESSIONID];
3243 struct nfscllayout *lyp;
3244 uint64_t filesid[2], len, off;
3245 int changed, gotone, laytype, recalltype;
3247 struct nfsclrecalllayout *recallp = NULL;
3248 struct nfsclsession *tsep;
3252 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3253 taglen = fxdr_unsigned(int, *tl);
3258 if (taglen <= NFSV4_SMALLSTR)
3261 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3262 error = nfsrv_mtostr(nd, tagstr, taglen);
3264 if (taglen > NFSV4_SMALLSTR)
3265 free(tagstr, M_TEMP);
3269 (void) nfsm_strtom(nd, tag, taglen);
3270 if (taglen > NFSV4_SMALLSTR) {
3271 free(tagstr, M_TEMP);
3273 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3274 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3275 minorvers = fxdr_unsigned(u_int32_t, *tl++);
3276 if (minorvers != NFSV4_MINORVERSION && minorvers != NFSV41_MINORVERSION)
3277 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3278 cbident = fxdr_unsigned(u_int32_t, *tl++);
3282 numops = fxdr_unsigned(int, *tl);
3284 * Loop around doing the sub ops.
3286 for (i = 0; i < numops; i++) {
3287 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3288 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3290 op = fxdr_unsigned(int, *tl);
3291 if (op < NFSV4OP_CBGETATTR ||
3292 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3293 (op > NFSV4OP_CBNOTIFYDEVID &&
3294 minorvers == NFSV41_MINORVERSION)) {
3295 nd->nd_repstat = NFSERR_OPILLEGAL;
3296 *repp = nfscl_errmap(nd, minorvers);
3300 nd->nd_procnum = op;
3301 if (op < NFSV41_CBNOPS)
3302 nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
3304 case NFSV4OP_CBGETATTR:
3305 NFSCL_DEBUG(4, "cbgetattr\n");
3308 error = nfsm_getfh(nd, &nfhp);
3310 error = nfsrv_getattrbits(nd, &attrbits,
3312 if (error == 0 && i == 0 &&
3313 minorvers != NFSV4_MINORVERSION)
3314 error = NFSERR_OPNOTINSESS;
3316 mp = nfscl_getmnt(minorvers, sessionid, cbident,
3319 error = NFSERR_SERVERFAULT;
3322 error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3323 nfhp->nfh_len, p, &np);
3328 NFSZERO_ATTRBIT(&rattrbits);
3330 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3333 if (NFSISSET_ATTRBIT(&attrbits,
3336 va.va_size = np->n_size;
3340 NFSSETBIT_ATTRBIT(&rattrbits,
3343 if (NFSISSET_ATTRBIT(&attrbits,
3344 NFSATTRBIT_CHANGE)) {
3348 (np->n_flag & NDELEGMOD))
3350 NFSSETBIT_ATTRBIT(&rattrbits,
3354 error = NFSERR_SERVERFAULT;
3362 FREE((caddr_t)nfhp, M_NFSFH);
3364 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3365 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3368 case NFSV4OP_CBRECALL:
3369 NFSCL_DEBUG(4, "cbrecall\n");
3370 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3372 stateid.seqid = *tl++;
3373 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3375 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3376 trunc = fxdr_unsigned(int, *tl);
3377 error = nfsm_getfh(nd, &nfhp);
3378 if (error == 0 && i == 0 &&
3379 minorvers != NFSV4_MINORVERSION)
3380 error = NFSERR_OPNOTINSESS;
3383 if (minorvers == NFSV4_MINORVERSION)
3384 clp = nfscl_getclnt(cbident);
3386 clp = nfscl_getclntsess(sessionid);
3388 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3390 if (dp != NULL && (dp->nfsdl_flags &
3391 NFSCLDL_DELEGRET) == 0) {
3394 wakeup((caddr_t)clp);
3397 error = NFSERR_SERVERFAULT;
3402 FREE((caddr_t)nfhp, M_NFSFH);
3404 case NFSV4OP_CBLAYOUTRECALL:
3405 NFSCL_DEBUG(4, "cblayrec\n");
3407 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3408 laytype = fxdr_unsigned(int, *tl++);
3409 iomode = fxdr_unsigned(uint32_t, *tl++);
3410 if (newnfs_true == *tl++)
3414 recalltype = fxdr_unsigned(int, *tl);
3415 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3417 if (laytype != NFSLAYOUT_NFSV4_1_FILES)
3418 error = NFSERR_NOMATCHLAYOUT;
3419 else if (recalltype == NFSLAYOUTRETURN_FILE) {
3420 error = nfsm_getfh(nd, &nfhp);
3421 NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3424 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3426 off = fxdr_hyper(tl); tl += 2;
3427 len = fxdr_hyper(tl); tl += 2;
3428 stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3429 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3430 if (minorvers == NFSV4_MINORVERSION)
3431 error = NFSERR_NOTSUPP;
3433 error = NFSERR_OPNOTINSESS;
3436 clp = nfscl_getclntsess(sessionid);
3437 NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3439 lyp = nfscl_findlayout(clp,
3442 NFSCL_DEBUG(4, "cblyp=%p\n",
3446 NFSLY_FILES) != 0 &&
3447 !NFSBCMP(stateid.other,
3448 lyp->nfsly_stateid.other,
3449 NFSX_STATEIDOTHER)) {
3463 NFSERR_NOMATCHLAYOUT;
3465 error = NFSERR_NOMATCHLAYOUT;
3468 free(nfhp, M_NFSFH);
3469 } else if (recalltype == NFSLAYOUTRETURN_FSID) {
3470 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3471 filesid[0] = fxdr_hyper(tl); tl += 2;
3472 filesid[1] = fxdr_hyper(tl); tl += 2;
3475 clp = nfscl_getclntsess(sessionid);
3477 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3479 if (lyp->nfsly_filesid[0] ==
3481 lyp->nfsly_filesid[1] ==
3488 lyp->nfsly_stateid.seqid,
3497 error = NFSERR_NOMATCHLAYOUT;
3499 error = NFSERR_NOMATCHLAYOUT;
3501 } else if (recalltype == NFSLAYOUTRETURN_ALL) {
3504 clp = nfscl_getclntsess(sessionid);
3506 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3508 error = nfscl_layoutrecall(
3509 recalltype, lyp, iomode, 0,
3511 lyp->nfsly_stateid.seqid,
3519 error = NFSERR_NOMATCHLAYOUT;
3521 error = NFSERR_NOMATCHLAYOUT;
3524 error = NFSERR_NOMATCHLAYOUT;
3525 if (recallp != NULL) {
3526 free(recallp, M_NFSLAYRECALL);
3530 case NFSV4OP_CBSEQUENCE:
3531 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3533 bcopy(tl, sessionid, NFSX_V4SESSIONID);
3534 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3535 seqid = fxdr_unsigned(uint32_t, *tl++);
3536 slotid = fxdr_unsigned(uint32_t, *tl++);
3537 highslot = fxdr_unsigned(uint32_t, *tl++);
3539 /* Throw away the referring call stuff. */
3540 clist = fxdr_unsigned(int, *tl);
3541 for (j = 0; j < clist; j++) {
3542 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3544 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3545 rcalls = fxdr_unsigned(int, *tl);
3546 for (k = 0; k < rcalls; k++) {
3547 NFSM_DISSECT(tl, uint32_t *,
3553 clp = nfscl_getclntsess(sessionid);
3555 error = NFSERR_SERVERFAULT;
3557 error = NFSERR_SEQUENCEPOS;
3559 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3560 error = nfsv4_seqsession(seqid, slotid,
3561 highslot, tsep->nfsess_cbslots, &rep,
3562 tsep->nfsess_backslots);
3565 if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3569 * Handle a reply for a retried
3570 * callback. The reply will be
3571 * re-inserted in the session cache
3572 * by the nfsv4_seqsess_cacherep() call
3575 KASSERT(error == NFSERR_REPLYFROMCACHE,
3576 ("cbsequence: non-NULL rep"));
3577 NFSCL_DEBUG(4, "Got cbretry\n");
3578 m_freem(nd->nd_mreq);
3583 NFSM_BUILD(tl, uint32_t *,
3584 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3585 bcopy(sessionid, tl, NFSX_V4SESSIONID);
3586 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3587 *tl++ = txdr_unsigned(seqid);
3588 *tl++ = txdr_unsigned(slotid);
3589 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3590 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3594 if (i == 0 && minorvers == NFSV41_MINORVERSION)
3595 error = NFSERR_OPNOTINSESS;
3597 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
3598 error = NFSERR_NOTSUPP;
3603 if (error == EBADRPC || error == NFSERR_BADXDR) {
3604 nd->nd_repstat = NFSERR_BADXDR;
3606 nd->nd_repstat = error;
3611 if (nd->nd_repstat) {
3612 *repp = nfscl_errmap(nd, minorvers);
3615 *repp = 0; /* NFS4_OK */
3618 if (recallp != NULL)
3619 free(recallp, M_NFSLAYRECALL);
3621 if (error == EBADRPC || error == NFSERR_BADXDR)
3622 nd->nd_repstat = NFSERR_BADXDR;
3624 printf("nfsv4 comperr1=%d\n", error);
3627 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3631 *retopsp = txdr_unsigned(retops);
3633 *nd->nd_errp = nfscl_errmap(nd, minorvers);
3635 if (gotseq_ok != 0) {
3636 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
3638 clp = nfscl_getclntsess(sessionid);
3640 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3641 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
3652 * Generate the next cbident value. Basically just increment a static value
3653 * and then check that it isn't already in the list, if it has wrapped around.
3656 nfscl_nextcbident(void)
3658 struct nfsclclient *clp;
3660 static u_int32_t nextcbident = 0;
3661 static int haswrapped = 0;
3664 if (nextcbident == 0)
3668 * Search the clientid list for one already using this cbident.
3673 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3674 if (clp->nfsc_cbident == nextcbident) {
3684 return (nextcbident);
3688 * Get the mount point related to a given cbident or session and busy it.
3691 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
3692 struct nfsclclient **clpp)
3694 struct nfsclclient *clp;
3697 struct nfsclsession *tsep;
3701 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3702 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3703 if (minorvers == NFSV4_MINORVERSION) {
3704 if (clp->nfsc_cbident == cbident)
3706 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3714 mp = clp->nfsc_nmp->nm_mountp;
3717 error = vfs_busy(mp, 0);
3726 * Get the clientid pointer related to a given cbident.
3728 static struct nfsclclient *
3729 nfscl_getclnt(u_int32_t cbident)
3731 struct nfsclclient *clp;
3733 LIST_FOREACH(clp, &nfsclhead, nfsc_list)
3734 if (clp->nfsc_cbident == cbident)
3740 * Get the clientid pointer related to a given sessionid.
3742 static struct nfsclclient *
3743 nfscl_getclntsess(uint8_t *sessionid)
3745 struct nfsclclient *clp;
3746 struct nfsclsession *tsep;
3748 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3749 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3750 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3758 * Search for a lock conflict locally on the client. A conflict occurs if
3759 * - not same owner and overlapping byte range and at least one of them is
3760 * a write lock or this is an unlock.
3763 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
3764 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
3765 struct nfscllock **lopp)
3767 struct nfsclowner *owp;
3768 struct nfsclopen *op;
3772 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3776 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3777 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3778 if (op->nfso_fhlen == fhlen &&
3779 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
3780 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
3791 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3792 u_int8_t *own, struct nfscllock **lopp)
3794 struct nfscllockowner *lp;
3795 struct nfscllock *lop;
3797 LIST_FOREACH(lp, lhp, nfsl_list) {
3798 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3799 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3800 if (lop->nfslo_first >= nlop->nfslo_end)
3802 if (lop->nfslo_end <= nlop->nfslo_first)
3804 if (lop->nfslo_type == F_WRLCK ||
3805 nlop->nfslo_type == F_WRLCK ||
3806 nlop->nfslo_type == F_UNLCK) {
3809 return (NFSERR_DENIED);
3818 * Check for a local conflicting lock.
3821 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3822 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
3824 struct nfscllock *lop, nlck;
3825 struct nfscldeleg *dp;
3827 u_int8_t own[NFSV4CL_LOCKNAMELEN];
3830 nlck.nfslo_type = fl->l_type;
3831 nlck.nfslo_first = off;
3832 if (len == NFS64BITSSET) {
3833 nlck.nfslo_end = NFS64BITSSET;
3835 nlck.nfslo_end = off + len;
3836 if (nlck.nfslo_end <= nlck.nfslo_first)
3837 return (NFSERR_INVAL);
3840 nfscl_filllockowner(id, own, flags);
3842 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3843 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
3844 &nlck, own, dp, &lop);
3846 fl->l_whence = SEEK_SET;
3847 fl->l_start = lop->nfslo_first;
3848 if (lop->nfslo_end == NFS64BITSSET)
3851 fl->l_len = lop->nfslo_end - lop->nfslo_first;
3852 fl->l_pid = (pid_t)0;
3853 fl->l_type = lop->nfslo_type;
3854 error = -1; /* no RPC required */
3855 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3856 fl->l_type == F_RDLCK)) {
3858 * The delegation ensures that there isn't a conflicting
3859 * lock on the server, so return -1 to indicate an RPC
3862 fl->l_type = F_UNLCK;
3870 * Handle Recall of a delegation.
3871 * The clp must be exclusive locked when this is called.
3874 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3875 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
3876 int called_from_renewthread)
3878 struct nfsclowner *owp, *lowp, *nowp;
3879 struct nfsclopen *op, *lop;
3880 struct nfscllockowner *lp;
3881 struct nfscllock *lckp;
3883 int error = 0, ret, gotvp = 0;
3887 * First, get a vnode for the file. This is needed to do RPCs.
3889 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3890 dp->nfsdl_fhlen, p, &np);
3893 * File isn't open, so nothing to move over to the
3903 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3906 * Ok, if it's a write delegation, flush data to the server, so
3907 * that close/open consistency is retained.
3911 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3912 np->n_flag |= NDELEGRECALL;
3914 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
3916 np->n_flag &= ~NDELEGRECALL;
3918 NFSINVALATTRCACHE(np);
3920 if (ret == EIO && called_from_renewthread != 0) {
3922 * If the flush failed with EIO for the renew thread,
3923 * return now, so that the dirty buffer will be flushed
3932 * Now, for each openowner with opens issued locally, move them
3933 * over to state against the server.
3935 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3936 lop = LIST_FIRST(&lowp->nfsow_open);
3938 if (LIST_NEXT(lop, nfso_list) != NULL)
3939 panic("nfsdlg mult opens");
3941 * Look for the same openowner against the server.
3943 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3944 if (!NFSBCMP(lowp->nfsow_owner,
3945 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3946 newnfs_copycred(&dp->nfsdl_cred, cred);
3947 ret = nfscl_moveopen(vp, clp, nmp, lop,
3949 if (ret == NFSERR_STALECLIENTID ||
3950 ret == NFSERR_STALEDONTRECOVER ||
3951 ret == NFSERR_BADSESSION) {
3957 nfscl_freeopen(lop, 1);
3966 * If no openowner found, create one and get an open
3970 MALLOC(nowp, struct nfsclowner *,
3971 sizeof (struct nfsclowner), M_NFSCLOWNER,
3973 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
3974 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
3975 dp->nfsdl_fhlen, NULL, NULL);
3976 newnfs_copycred(&dp->nfsdl_cred, cred);
3977 ret = nfscl_moveopen(vp, clp, nmp, lop,
3980 nfscl_freeopenowner(owp, 0);
3981 if (ret == NFSERR_STALECLIENTID ||
3982 ret == NFSERR_STALEDONTRECOVER ||
3983 ret == NFSERR_BADSESSION) {
3989 nfscl_freeopen(lop, 1);
3999 * Now, get byte range locks for any locks done locally.
4001 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4002 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4003 newnfs_copycred(&dp->nfsdl_cred, cred);
4004 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4005 if (ret == NFSERR_STALESTATEID ||
4006 ret == NFSERR_STALEDONTRECOVER ||
4007 ret == NFSERR_STALECLIENTID ||
4008 ret == NFSERR_BADSESSION) {
4023 * Move a locally issued open over to an owner on the state list.
4024 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4025 * returns with it unlocked.
4028 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4029 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4030 struct ucred *cred, NFSPROC_T *p)
4032 struct nfsclopen *op, *nop;
4033 struct nfscldeleg *ndp;
4035 int error = 0, newone;
4038 * First, look for an appropriate open, If found, just increment the
4041 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4042 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4043 op->nfso_fhlen == lop->nfso_fhlen &&
4044 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4045 op->nfso_opencnt += lop->nfso_opencnt;
4046 nfscl_freeopen(lop, 1);
4051 /* No appropriate open, so we have to do one against the server. */
4053 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
4054 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4056 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4057 lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4059 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
4060 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4061 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4064 nfscl_freeopen(op, 0);
4066 op->nfso_mode |= lop->nfso_mode;
4067 op->nfso_opencnt += lop->nfso_opencnt;
4068 nfscl_freeopen(lop, 1);
4071 FREE((caddr_t)nop, M_NFSCLOPEN);
4074 * What should I do with the returned delegation, since the
4075 * delegation is being recalled? For now, just printf and
4078 printf("Moveopen returned deleg\n");
4079 FREE((caddr_t)ndp, M_NFSCLDELEG);
4085 * Recall all delegations on this client.
4088 nfscl_totalrecall(struct nfsclclient *clp)
4090 struct nfscldeleg *dp;
4092 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4093 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4094 dp->nfsdl_flags |= NFSCLDL_RECALL;
4099 * Relock byte ranges. Called for delegation recall and state expiry.
4102 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4103 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4106 struct nfscllockowner *nlp;
4109 u_int32_t clidrev = 0;
4110 int error, newone, donelocally;
4112 off = lop->nfslo_first;
4113 len = lop->nfslo_end - lop->nfslo_first;
4114 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4115 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4116 lp->nfsl_openowner, &nlp, &newone, &donelocally);
4117 if (error || donelocally)
4119 if (nmp->nm_clp != NULL)
4120 clidrev = nmp->nm_clp->nfsc_clientidrev;
4123 nfhp = VTONFS(vp)->n_fhp;
4124 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4125 nfhp->nfh_len, nlp, newone, 0, off,
4126 len, lop->nfslo_type, cred, p);
4128 nfscl_freelockowner(nlp, 0);
4133 * Called to re-open a file. Basically get a vnode for the file handle
4134 * and then call nfsrpc_openrpc() to do the rest.
4137 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4138 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4139 struct ucred *cred, NFSPROC_T *p)
4145 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4149 if (np->n_v4 != NULL) {
4150 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4151 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4152 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4162 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4163 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4167 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4168 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4169 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4170 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4175 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4176 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4178 if (error == NFSERR_DELAY)
4179 (void) nfs_catnap(PZERO, error, "nfstryop");
4180 } while (error == NFSERR_DELAY);
4181 if (error == EAUTH || error == EACCES) {
4182 /* Try again using system credentials */
4183 newnfs_setroot(cred);
4185 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4186 newfhlen, mode, op, name, namelen, ndpp, reclaim,
4187 delegtype, cred, p, 1, 0);
4188 if (error == NFSERR_DELAY)
4189 (void) nfs_catnap(PZERO, error, "nfstryop");
4190 } while (error == NFSERR_DELAY);
4196 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4197 * NFSERR_DELAY. Also, retry with system credentials, if the provided
4201 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4202 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4203 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4205 struct nfsrv_descript nfsd, *nd = &nfsd;
4209 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4210 reclaim, off, len, type, cred, p, 0);
4211 if (!error && nd->nd_repstat == NFSERR_DELAY)
4212 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4214 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4216 error = nd->nd_repstat;
4217 if (error == EAUTH || error == EACCES) {
4218 /* Try again using root credentials */
4219 newnfs_setroot(cred);
4221 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4222 newone, reclaim, off, len, type, cred, p, 1);
4223 if (!error && nd->nd_repstat == NFSERR_DELAY)
4224 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4226 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4228 error = nd->nd_repstat;
4234 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4235 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4239 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4240 struct nfsmount *nmp, NFSPROC_T *p)
4245 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4246 if (error == NFSERR_DELAY)
4247 (void) nfs_catnap(PZERO, error, "nfstrydp");
4248 } while (error == NFSERR_DELAY);
4249 if (error == EAUTH || error == EACCES) {
4250 /* Try again using system credentials */
4251 newnfs_setroot(cred);
4253 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4254 if (error == NFSERR_DELAY)
4255 (void) nfs_catnap(PZERO, error, "nfstrydp");
4256 } while (error == NFSERR_DELAY);
4262 * Try a close against the server. Just call nfsrpc_closerpc(),
4263 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4267 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4268 struct nfsmount *nmp, NFSPROC_T *p)
4270 struct nfsrv_descript nfsd, *nd = &nfsd;
4274 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4275 if (error == NFSERR_DELAY)
4276 (void) nfs_catnap(PZERO, error, "nfstrycl");
4277 } while (error == NFSERR_DELAY);
4278 if (error == EAUTH || error == EACCES) {
4279 /* Try again using system credentials */
4280 newnfs_setroot(cred);
4282 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4283 if (error == NFSERR_DELAY)
4284 (void) nfs_catnap(PZERO, error, "nfstrycl");
4285 } while (error == NFSERR_DELAY);
4291 * Decide if a delegation on a file permits close without flushing writes
4292 * to the server. This might be a big performance win in some environments.
4293 * (Not useful until the client does caching on local stable storage.)
4296 nfscl_mustflush(vnode_t vp)
4298 struct nfsclclient *clp;
4299 struct nfscldeleg *dp;
4301 struct nfsmount *nmp;
4304 nmp = VFSTONFS(vnode_mount(vp));
4305 if (!NFSHASNFSV4(nmp))
4308 clp = nfscl_findcl(nmp);
4313 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4314 if (dp != NULL && (dp->nfsdl_flags &
4315 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4317 (dp->nfsdl_sizelimit >= np->n_size ||
4318 !NFSHASSTRICT3530(nmp))) {
4327 * See if a (write) delegation exists for this file.
4330 nfscl_nodeleg(vnode_t vp, int writedeleg)
4332 struct nfsclclient *clp;
4333 struct nfscldeleg *dp;
4335 struct nfsmount *nmp;
4338 nmp = VFSTONFS(vnode_mount(vp));
4339 if (!NFSHASNFSV4(nmp))
4342 clp = nfscl_findcl(nmp);
4347 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4349 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4350 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4360 * Look for an associated delegation that should be DelegReturned.
4363 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4365 struct nfsclclient *clp;
4366 struct nfscldeleg *dp;
4367 struct nfsclowner *owp;
4368 struct nfscllockowner *lp;
4369 struct nfsmount *nmp;
4372 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4374 nmp = VFSTONFS(vnode_mount(vp));
4378 * Loop around waiting for:
4379 * - outstanding I/O operations on delegations to complete
4380 * - for a delegation on vp that has state, lock the client and
4382 * - return delegation with no state
4385 clp = nfscl_findcl(nmp);
4390 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4391 np->n_fhp->nfh_len);
4394 * Wait for outstanding I/O ops to be done.
4396 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4398 nfsv4_unlock(&clp->nfsc_lock, 0);
4401 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4402 (void) nfsmsleep(&dp->nfsdl_rwlock,
4403 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4407 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4408 if (!LIST_EMPTY(&owp->nfsow_open)) {
4414 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4415 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4421 if (needsrecall && !triedrecall) {
4422 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4425 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4426 &islept, NFSCLSTATEMUTEXPTR, NULL);
4433 cred = newnfs_getcred();
4434 newnfs_copycred(&dp->nfsdl_cred, cred);
4435 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0);
4439 nfsv4_unlock(&clp->nfsc_lock, 0);
4443 *stp = dp->nfsdl_stateid;
4445 nfscl_cleandeleg(dp);
4446 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4449 nfsv4_unlock(&clp->nfsc_lock, 0);
4456 * Look for associated delegation(s) that should be DelegReturned.
4459 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4460 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4462 struct nfsclclient *clp;
4463 struct nfscldeleg *dp;
4464 struct nfsclowner *owp;
4465 struct nfscllockowner *lp;
4466 struct nfsmount *nmp;
4469 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4471 nmp = VFSTONFS(vnode_mount(fvp));
4476 * Loop around waiting for:
4477 * - outstanding I/O operations on delegations to complete
4478 * - for a delegation on fvp that has state, lock the client and
4480 * - return delegation(s) with no state.
4483 clp = nfscl_findcl(nmp);
4489 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4490 np->n_fhp->nfh_len);
4491 if (dp != NULL && *gotfdp == 0) {
4493 * Wait for outstanding I/O ops to be done.
4495 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4497 nfsv4_unlock(&clp->nfsc_lock, 0);
4500 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4501 (void) nfsmsleep(&dp->nfsdl_rwlock,
4502 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4506 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4507 if (!LIST_EMPTY(&owp->nfsow_open)) {
4513 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4514 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4520 if (needsrecall && !triedrecall) {
4521 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4524 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4525 &islept, NFSCLSTATEMUTEXPTR, NULL);
4532 cred = newnfs_getcred();
4533 newnfs_copycred(&dp->nfsdl_cred, cred);
4534 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0);
4538 nfsv4_unlock(&clp->nfsc_lock, 0);
4542 *fstp = dp->nfsdl_stateid;
4545 nfscl_cleandeleg(dp);
4546 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4549 nfsv4_unlock(&clp->nfsc_lock, 0);
4554 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4555 np->n_fhp->nfh_len);
4556 if (dp != NULL && *gottdp == 0) {
4558 * Wait for outstanding I/O ops to be done.
4560 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4561 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4562 (void) nfsmsleep(&dp->nfsdl_rwlock,
4563 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4566 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4567 if (!LIST_EMPTY(&owp->nfsow_open)) {
4572 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4573 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4578 *tstp = dp->nfsdl_stateid;
4581 nfscl_cleandeleg(dp);
4582 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4591 * Get a reference on the clientid associated with the mount point.
4592 * Return 1 if success, 0 otherwise.
4595 nfscl_getref(struct nfsmount *nmp)
4597 struct nfsclclient *clp;
4600 clp = nfscl_findcl(nmp);
4605 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL);
4611 * Release a reference on a clientid acquired with the above call.
4614 nfscl_relref(struct nfsmount *nmp)
4616 struct nfsclclient *clp;
4619 clp = nfscl_findcl(nmp);
4624 nfsv4_relref(&clp->nfsc_lock);
4629 * Save the size attribute in the delegation, since the nfsnode
4633 nfscl_reclaimnode(vnode_t vp)
4635 struct nfsclclient *clp;
4636 struct nfscldeleg *dp;
4637 struct nfsnode *np = VTONFS(vp);
4638 struct nfsmount *nmp;
4640 nmp = VFSTONFS(vnode_mount(vp));
4641 if (!NFSHASNFSV4(nmp))
4644 clp = nfscl_findcl(nmp);
4649 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4650 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4651 dp->nfsdl_size = np->n_size;
4656 * Get the saved size attribute in the delegation, since it is a
4657 * newly allocated nfsnode.
4660 nfscl_newnode(vnode_t vp)
4662 struct nfsclclient *clp;
4663 struct nfscldeleg *dp;
4664 struct nfsnode *np = VTONFS(vp);
4665 struct nfsmount *nmp;
4667 nmp = VFSTONFS(vnode_mount(vp));
4668 if (!NFSHASNFSV4(nmp))
4671 clp = nfscl_findcl(nmp);
4676 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4677 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4678 np->n_size = dp->nfsdl_size;
4683 * If there is a valid write delegation for this file, set the modtime
4684 * to the local clock time.
4687 nfscl_delegmodtime(vnode_t vp)
4689 struct nfsclclient *clp;
4690 struct nfscldeleg *dp;
4691 struct nfsnode *np = VTONFS(vp);
4692 struct nfsmount *nmp;
4694 nmp = VFSTONFS(vnode_mount(vp));
4695 if (!NFSHASNFSV4(nmp))
4698 clp = nfscl_findcl(nmp);
4703 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4704 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4705 nanotime(&dp->nfsdl_modtime);
4706 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4712 * If there is a valid write delegation for this file with a modtime set,
4713 * put that modtime in mtime.
4716 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4718 struct nfsclclient *clp;
4719 struct nfscldeleg *dp;
4720 struct nfsnode *np = VTONFS(vp);
4721 struct nfsmount *nmp;
4723 nmp = VFSTONFS(vnode_mount(vp));
4724 if (!NFSHASNFSV4(nmp))
4727 clp = nfscl_findcl(nmp);
4732 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4734 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4735 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4736 *mtime = dp->nfsdl_modtime;
4741 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
4743 short *defaulterrp, *errp;
4745 if (!nd->nd_repstat)
4747 if (nd->nd_procnum == NFSPROC_NOOP)
4748 return (txdr_unsigned(nd->nd_repstat & 0xffff));
4749 if (nd->nd_repstat == EBADRPC)
4750 return (txdr_unsigned(NFSERR_BADXDR));
4751 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4752 nd->nd_repstat == NFSERR_OPILLEGAL)
4753 return (txdr_unsigned(nd->nd_repstat));
4754 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
4755 minorvers > NFSV4_MINORVERSION) {
4756 /* NFSv4.n error. */
4757 return (txdr_unsigned(nd->nd_repstat));
4759 if (nd->nd_procnum < NFSV4OP_CBNOPS)
4760 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4762 return (txdr_unsigned(nd->nd_repstat));
4764 if (*errp == (short)nd->nd_repstat)
4765 return (txdr_unsigned(nd->nd_repstat));
4766 return (txdr_unsigned(*defaulterrp));
4770 * Called to find/add a layout to a client.
4771 * This function returns the layout with a refcnt (shared lock) upon
4772 * success (returns 0) or with no lock/refcnt on the layout when an
4773 * error is returned.
4774 * If a layout is passed in via lypp, it is locked (exclusively locked).
4777 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4778 nfsv4stateid_t *stateidp, int retonclose,
4779 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
4780 struct ucred *cred, NFSPROC_T *p)
4782 struct nfsclclient *clp;
4783 struct nfscllayout *lyp, *tlyp;
4784 struct nfsclflayout *flp;
4785 struct nfsnode *np = VTONFS(vp);
4787 int layout_passed_in;
4789 mp = nmp->nm_mountp;
4790 layout_passed_in = 1;
4794 layout_passed_in = 0;
4795 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
4802 if (layout_passed_in != 0)
4803 nfsv4_unlock(&lyp->nfsly_lock, 0);
4806 free(tlyp, M_NFSLAYOUT);
4811 * Although no lyp was passed in, another thread might have
4812 * allocated one. If one is found, just increment it's ref
4813 * count and return it.
4815 lyp = nfscl_findlayout(clp, fhp, fhlen);
4819 lyp->nfsly_stateid.seqid = stateidp->seqid;
4820 lyp->nfsly_stateid.other[0] = stateidp->other[0];
4821 lyp->nfsly_stateid.other[1] = stateidp->other[1];
4822 lyp->nfsly_stateid.other[2] = stateidp->other[2];
4823 lyp->nfsly_lastbyte = 0;
4824 LIST_INIT(&lyp->nfsly_flayread);
4825 LIST_INIT(&lyp->nfsly_flayrw);
4826 LIST_INIT(&lyp->nfsly_recall);
4827 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
4828 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
4829 lyp->nfsly_clp = clp;
4830 lyp->nfsly_flags = (retonclose != 0) ?
4831 (NFSLY_FILES | NFSLY_RETONCLOSE) : NFSLY_FILES;
4832 lyp->nfsly_fhlen = fhlen;
4833 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
4834 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4835 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
4837 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4840 if (retonclose != 0)
4841 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
4842 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4843 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4844 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4846 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
4847 if (NFSCL_FORCEDISM(mp)) {
4850 free(tlyp, M_NFSLAYOUT);
4855 lyp->nfsly_stateid.seqid = stateidp->seqid;
4857 /* Merge the new list of File Layouts into the list. */
4858 flp = LIST_FIRST(fhlp);
4860 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
4861 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
4863 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
4865 if (layout_passed_in != 0)
4866 nfsv4_unlock(&lyp->nfsly_lock, 1);
4869 free(tlyp, M_NFSLAYOUT);
4874 * Search for a layout by MDS file handle.
4875 * If one is found, it is returned with a refcnt (shared lock) iff
4876 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
4879 struct nfscllayout *
4880 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
4881 uint64_t off, struct nfsclflayout **retflpp, int *recalledp)
4883 struct nfscllayout *lyp;
4885 int error, igotlock;
4887 mp = clp->nfsc_nmp->nm_mountp;
4891 lyp = nfscl_findlayout(clp, fhp, fhlen);
4893 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
4894 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4895 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4896 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4897 error = nfscl_findlayoutforio(lyp, off,
4898 NFSV4OPEN_ACCESSREAD, retflpp);
4900 nfsv4_getref(&lyp->nfsly_lock, NULL,
4901 NFSCLSTATEMUTEXPTR, mp);
4904 igotlock = nfsv4_lock(&lyp->nfsly_lock,
4905 1, NULL, NFSCLSTATEMUTEXPTR, mp);
4906 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
4909 if (NFSCL_FORCEDISM(mp)) {
4923 * Search for a layout by MDS file handle. If one is found, mark in to be
4924 * recalled, if it already marked "return on close".
4927 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
4928 int fhlen, struct nfsclrecalllayout **recallpp)
4930 struct nfscllayout *lyp;
4933 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vnode_mount(vp))) ||
4934 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
4935 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
4937 lyp = nfscl_findlayout(clp, fhp, fhlen);
4938 if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE |
4939 NFSLY_RECALL)) == NFSLY_RETONCLOSE) {
4941 if (!LIST_EMPTY(&lyp->nfsly_flayread))
4942 iomode |= NFSLAYOUTIOMODE_READ;
4943 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
4944 iomode |= NFSLAYOUTIOMODE_RW;
4945 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
4946 0, UINT64_MAX, lyp->nfsly_stateid.seqid, *recallpp);
4947 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
4953 * Dereference a layout.
4956 nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
4961 nfsv4_unlock(&lyp->nfsly_lock, 0);
4963 nfsv4_relref(&lyp->nfsly_lock);
4968 * Search for a devinfo by deviceid. If one is found, return it after
4969 * acquiring a reference count on it.
4971 struct nfscldevinfo *
4972 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
4973 struct nfscldevinfo *dip)
4978 dip = nfscl_finddevinfo(clp, deviceid);
4980 dip->nfsdi_refcnt++;
4986 * Dereference a devinfo structure.
4989 nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
4992 dip->nfsdi_refcnt--;
4993 if (dip->nfsdi_refcnt == 0)
4994 wakeup(&dip->nfsdi_refcnt);
4998 * Dereference a devinfo structure.
5001 nfscl_reldevinfo(struct nfscldevinfo *dip)
5005 nfscl_reldevinfo_locked(dip);
5010 * Find a layout for this file handle. Return NULL upon failure.
5012 static struct nfscllayout *
5013 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5015 struct nfscllayout *lyp;
5017 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5018 if (lyp->nfsly_fhlen == fhlen &&
5019 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5025 * Find a devinfo for this deviceid. Return NULL upon failure.
5027 static struct nfscldevinfo *
5028 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5030 struct nfscldevinfo *dip;
5032 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5033 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5040 * Merge the new file layout list into the main one, maintaining it in
5041 * increasing offset order.
5044 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5045 struct nfsclflayouthead *newfhlp)
5047 struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5049 flp = LIST_FIRST(fhlp);
5051 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5052 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5054 flp = LIST_NEXT(flp, nfsfl_list);
5056 if (prevflp == NULL)
5057 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5059 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5065 * Add this nfscldevinfo to the client, if it doesn't already exist.
5066 * This function consumes the structure pointed at by dip, if not NULL.
5069 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip,
5070 struct nfsclflayout *flp)
5072 struct nfsclclient *clp;
5073 struct nfscldevinfo *tdip;
5080 free(dip, M_NFSDEVINFO);
5083 tdip = nfscl_finddevinfo(clp, flp->nfsfl_dev);
5085 tdip->nfsdi_layoutrefs++;
5086 flp->nfsfl_devp = tdip;
5087 nfscl_reldevinfo_locked(tdip);
5090 free(dip, M_NFSDEVINFO);
5094 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5095 dip->nfsdi_layoutrefs = 1;
5096 flp->nfsfl_devp = dip;
5105 * Free up a layout structure and associated file layout structure(s).
5108 nfscl_freelayout(struct nfscllayout *layp)
5110 struct nfsclflayout *flp, *nflp;
5111 struct nfsclrecalllayout *rp, *nrp;
5113 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5114 LIST_REMOVE(flp, nfsfl_list);
5115 nfscl_freeflayout(flp);
5117 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5118 LIST_REMOVE(flp, nfsfl_list);
5119 nfscl_freeflayout(flp);
5121 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5122 LIST_REMOVE(rp, nfsrecly_list);
5123 free(rp, M_NFSLAYRECALL);
5126 free(layp, M_NFSLAYOUT);
5130 * Free up a file layout structure.
5133 nfscl_freeflayout(struct nfsclflayout *flp)
5137 for (i = 0; i < flp->nfsfl_fhcnt; i++)
5138 free(flp->nfsfl_fh[i], M_NFSFH);
5139 if (flp->nfsfl_devp != NULL)
5140 flp->nfsfl_devp->nfsdi_layoutrefs--;
5141 free(flp, M_NFSFLAYOUT);
5145 * Free up a file layout devinfo structure.
5148 nfscl_freedevinfo(struct nfscldevinfo *dip)
5151 free(dip, M_NFSDEVINFO);
5155 * Mark any layouts that match as recalled.
5158 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5159 uint64_t off, uint64_t len, uint32_t stateseqid,
5160 struct nfsclrecalllayout *recallp)
5162 struct nfsclrecalllayout *rp, *orp;
5164 recallp->nfsrecly_recalltype = recalltype;
5165 recallp->nfsrecly_iomode = iomode;
5166 recallp->nfsrecly_stateseqid = stateseqid;
5167 recallp->nfsrecly_off = off;
5168 recallp->nfsrecly_len = len;
5170 * Order the list as file returns first, followed by fsid and any
5171 * returns, both in increasing stateseqid order.
5172 * Note that the seqids wrap around, so 1 is after 0xffffffff.
5173 * (I'm not sure this is correct because I find RFC5661 confusing
5174 * on this, but hopefully it will work ok.)
5177 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5179 if ((recalltype == NFSLAYOUTRETURN_FILE &&
5180 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5181 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5182 (recalltype != NFSLAYOUTRETURN_FILE &&
5183 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5184 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5185 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5191 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5194 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5196 lyp->nfsly_flags |= NFSLY_RECALL;
5201 * Compare the two seqids for ordering. The trick is that the seqids can
5202 * wrap around from 0xffffffff->0, so check for the cases where one
5203 * has wrapped around.
5204 * Return 1 if seqid1 comes before seqid2, 0 otherwise.
5207 nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5210 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5211 /* seqid2 has wrapped around. */
5213 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5214 /* seqid1 has wrapped around. */
5216 if (seqid1 <= seqid2)
5222 * Do a layout return for each of the recalls.
5225 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5226 struct ucred *cred, NFSPROC_T *p)
5228 struct nfsclrecalllayout *rp;
5229 nfsv4stateid_t stateid;
5231 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5232 stateid.seqid = lyp->nfsly_stateid.seqid;
5233 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5234 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5235 lyp->nfsly_fhlen, 0, NFSLAYOUT_NFSV4_1_FILES,
5236 rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5237 rp->nfsrecly_off, rp->nfsrecly_len,
5238 &stateid, 0, NULL, cred, p, NULL);
5243 * Do the layout commit for a file layout.
5246 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5247 struct ucred *cred, NFSPROC_T *p)
5249 struct nfsclflayout *flp;
5253 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5254 if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5255 len = flp->nfsfl_end - flp->nfsfl_off;
5256 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5257 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5258 lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5259 NFSLAYOUT_NFSV4_1_FILES, 0, NULL, cred, p, NULL);
5260 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5261 if (error == NFSERR_NOTSUPP) {
5262 /* If not supported, don't bother doing it. */
5264 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5273 * Commit all layouts for a file (vnode).
5276 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5278 struct nfsclclient *clp;
5279 struct nfscllayout *lyp;
5280 struct nfsnode *np = VTONFS(vp);
5282 struct nfsmount *nmp;
5284 mp = vnode_mount(vp);
5286 if (NFSHASNOLAYOUTCOMMIT(nmp))
5294 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5299 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5300 if (NFSCL_FORCEDISM(mp)) {
5305 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5306 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5308 NFSCL_DEBUG(4, "do layoutcommit2\n");
5309 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5313 nfsv4_relref(&lyp->nfsly_lock);