2 * Copyright (c) 2009 Rick Macklem, University of Guelph
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 * These functions implement the client side state handling for NFSv4.
33 * NFSv4 state handling:
34 * - A lockowner is used to determine lock contention, so it
35 * corresponds directly to a Posix pid. (1 to 1 mapping)
36 * - The correct granularity of an OpenOwner is not nearly so
37 * obvious. An OpenOwner does the following:
38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39 * - is used to check for Open/Share contention (not applicable to
40 * this client, since all Opens are Deny_None)
41 * As such, I considered both extreme.
42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
43 * all Open, Close and Lock (with a new lockowner) Ops.
44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
45 * every Open, for most servers.
46 * So, I chose to use the same mapping as I did for LockOwnwers.
47 * The main concern here is that you can end up with multiple Opens
48 * for the same File Handle, but on different OpenOwners (opens
49 * inherited from parents, grandparents...) and you do not know
50 * which of these the vnodeop close applies to. This is handled by
51 * delaying the Close Op(s) until all of the Opens have been closed.
52 * (It is not yet obvious if this is the correct granularity.)
53 * - How the code handles serialization:
54 * - For the ClientId, it uses an exclusive lock while getting its
55 * SetClientId and during recovery. Otherwise, it uses a shared
56 * lock via a reference count.
57 * - For the rest of the data structures, it uses an SMP mutex
58 * (once the nfs client is SMP safe) and doesn't sleep while
59 * manipulating the linked lists.
60 * - The serialization of Open/Close/Lock/LockU falls out in the
61 * "wash", since OpenOwners and LockOwners are both mapped from
62 * Posix pid. In other words, there is only one Posix pid using
63 * any given owner, so that owner is serialized. (If you change
64 * the granularity of the OpenOwner, then code must be added to
65 * serialize Ops on the OpenOwner.)
66 * - When to get rid of OpenOwners and LockOwners.
67 * - The function nfscl_cleanup_common() is executed after a process exits.
68 * It goes through the client list looking for all Open and Lock Owners.
69 * When one is found, it is marked "defunct" or in the case of
70 * an OpenOwner without any Opens, freed.
71 * The renew thread scans for defunct Owners and gets rid of them,
72 * if it can. The LockOwners will also be deleted when the
73 * associated Open is closed.
74 * - If the LockU or Close Op(s) fail during close in a way
75 * that could be recovered upon retry, they are relinked to the
76 * ClientId's defunct open list and retried by the renew thread
77 * until they succeed or an unmount/recovery occurs.
78 * (Since we are done with them, they do not need to be recovered.)
82 #include <fs/nfs/nfsport.h>
87 extern struct nfsstats newnfsstats;
88 extern struct nfsreqhead nfsd_reqq;
92 struct nfsclhead nfsclhead; /* Head of clientid list */
93 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
94 #endif /* !APPLEKEXT */
96 static int nfscl_delegcnt = 0;
97 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
98 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **);
99 static void nfscl_clrelease(struct nfsclclient *);
100 static void nfscl_cleanclient(struct nfsclclient *);
101 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
102 struct ucred *, NFSPROC_T *);
103 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
104 struct nfsmount *, struct ucred *, NFSPROC_T *);
105 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
106 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
107 struct nfscllock *, int);
108 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
109 struct nfscllock **, int);
110 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
111 static u_int32_t nfscl_nextcbident(void);
112 static mount_t nfscl_getmnt(u_int32_t);
113 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
115 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
116 u_int8_t *, struct nfscllock **);
117 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
118 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
119 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
120 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
121 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
122 struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *);
123 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
124 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
125 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
126 static void nfscl_totalrecall(struct nfsclclient *);
127 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
128 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
129 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
130 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
131 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
132 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
133 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
134 struct ucred *, NFSPROC_T *);
135 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
136 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
137 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
138 static int nfscl_errmap(struct nfsrv_descript *);
139 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
140 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
141 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int);
142 static void nfscl_freeopenowner(struct nfsclowner *, int);
143 static void nfscl_cleandeleg(struct nfscldeleg *);
144 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
145 struct nfsmount *, NFSPROC_T *);
147 static short nfscberr_null[] = {
152 static short nfscberr_getattr[] = {
161 static short nfscberr_recall[] = {
171 static short *nfscl_cberrmap[] = {
179 #define NETFAMILY(clp) \
180 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
183 * Called for an open operation.
184 * If the nfhp argument is NULL, just get an openowner.
187 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
188 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
189 struct nfsclopen **opp, int *newonep, int *retp, int lockit)
191 struct nfsclclient *clp;
192 struct nfsclowner *owp, *nowp;
193 struct nfsclopen *op = NULL, *nop = NULL;
194 struct nfscldeleg *dp;
195 struct nfsclownerhead *ohp;
196 u_int8_t own[NFSV4CL_LOCKNAMELEN];
207 * Might need one or both of these, so MALLOC them now, to
208 * avoid a tsleep() in MALLOC later.
210 MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner),
211 M_NFSCLOWNER, M_WAITOK);
213 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
214 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
215 ret = nfscl_getcl(vp, cred, p, &clp);
217 FREE((caddr_t)nowp, M_NFSCLOWNER);
219 FREE((caddr_t)nop, M_NFSCLOPEN);
224 * Get the Open iff it already exists.
225 * If none found, add the new one or return error, depending upon
228 nfscl_filllockowner(p->td_proc, own, F_POSIX);
231 /* First check the delegation list */
232 if (nfhp != NULL && usedeleg) {
233 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
234 if (dp->nfsdl_fhlen == fhlen &&
235 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
236 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
237 (dp->nfsdl_flags & NFSCLDL_WRITE))
246 ohp = &dp->nfsdl_owner;
248 ohp = &clp->nfsc_owner;
249 /* Now, search for an openowner */
250 LIST_FOREACH(owp, ohp, nfsow_list) {
251 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
256 * Create a new open, as required.
258 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
262 * Serialize modifications to the open owner for multiple threads
263 * within the same process using a read/write sleep lock.
266 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
269 FREE((caddr_t)nowp, M_NFSCLOWNER);
271 FREE((caddr_t)nop, M_NFSCLOPEN);
277 if (nfhp != NULL && dp != NULL && nop == NULL)
278 /* new local open on delegation */
279 *retp = NFSCLOPEN_SETCRED;
281 *retp = NFSCLOPEN_OK;
285 * Now, check the mode on the open and return the appropriate
288 if (op != NULL && (amode & ~(op->nfso_mode))) {
289 op->nfso_mode |= amode;
290 if (retp != NULL && dp == NULL)
291 *retp = NFSCLOPEN_DOOPEN;
297 * Create a new open, as required.
300 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
301 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
302 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
305 struct nfsclowner *owp = *owpp, *nowp;
306 struct nfsclopen *op, *nop;
316 if (owp == NULL && nowp != NULL) {
317 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
318 LIST_INIT(&nowp->nfsow_open);
319 nowp->nfsow_clp = clp;
320 nowp->nfsow_seqid = 0;
321 nowp->nfsow_defunct = 0;
322 nfscl_lockinit(&nowp->nfsow_rwlock);
324 newnfsstats.cllocalopenowners++;
325 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
327 newnfsstats.clopenowners++;
328 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
336 /* If an fhp has been specified, create an Open as well. */
338 /* and look for the correct open, based upon FH */
339 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
340 if (op->nfso_fhlen == fhlen &&
341 !NFSBCMP(op->nfso_fh, fhp, fhlen))
344 if (op == NULL && nop != NULL) {
347 nop->nfso_opencnt = 0;
348 nop->nfso_posixlock = 1;
349 nop->nfso_fhlen = fhlen;
350 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
351 LIST_INIT(&nop->nfso_lock);
352 nop->nfso_stateid.seqid = 0;
353 nop->nfso_stateid.other[0] = 0;
354 nop->nfso_stateid.other[1] = 0;
355 nop->nfso_stateid.other[2] = 0;
357 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
358 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
360 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
361 newnfsstats.cllocalopens++;
363 newnfsstats.clopens++;
365 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
377 * Called to find/add a delegation to a client.
380 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
381 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
383 struct nfscldeleg *dp = *dpp, *tdp;
386 * First, if we have received a Read delegation for a file on a
387 * read/write file system, just return it, because they aren't
390 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
391 (dp->nfsdl_flags & NFSCLDL_READ)) {
392 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
393 FREE((caddr_t)dp, M_NFSCLDELEG);
398 /* Look for the correct deleg, based upon FH */
400 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
404 return (NFSERR_BADSTATEID);
407 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
408 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
410 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
411 newnfsstats.cldelegates++;
415 * Delegation already exists, what do we do if a new one??
418 printf("Deleg already exists!\n");
419 FREE((caddr_t)dp, M_NFSCLDELEG);
430 * Find a delegation for this file handle. Return NULL upon failure.
432 static struct nfscldeleg *
433 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
435 struct nfscldeleg *dp;
437 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
438 if (dp->nfsdl_fhlen == fhlen &&
439 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
446 * Get a stateid for an I/O operation. First, look for an open and iff
447 * found, return either a lockowner stateid or the open stateid.
448 * If no Open is found, just return error and the special stateid of all zeros.
451 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
452 struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
455 struct nfsclclient *clp;
456 struct nfsclowner *owp;
457 struct nfsclopen *op = NULL;
458 struct nfscllockowner *lp;
459 struct nfscldeleg *dp;
461 u_int8_t own[NFSV4CL_LOCKNAMELEN];
466 * Initially, just set the special stateid of all zeros.
469 stateidp->other[0] = 0;
470 stateidp->other[1] = 0;
471 stateidp->other[2] = 0;
472 if (vnode_vtype(vp) != VREG)
476 clp = nfscl_findcl(VFSTONFS(vnode_mount(vp)));
483 * Wait for recovery to complete.
485 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
486 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
487 PZERO, "nfsrecvr", NULL);
490 * First, look for a delegation.
492 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
493 if (dp->nfsdl_fhlen == fhlen &&
494 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
495 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
496 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
497 stateidp->seqid = dp->nfsdl_stateid.seqid;
498 stateidp->other[0] = dp->nfsdl_stateid.other[0];
499 stateidp->other[1] = dp->nfsdl_stateid.other[1];
500 stateidp->other[2] = dp->nfsdl_stateid.other[2];
501 if (!(np->n_flag & NDELEGRECALL)) {
502 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
504 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
506 dp->nfsdl_timestamp = NFSD_MONOSEC +
508 dp->nfsdl_rwlock.nfslock_usecnt++;
509 *lckpp = (void *)&dp->nfsdl_rwlock;
520 * If p != NULL, we want to search the parentage tree
521 * for a matching OpenOwner and use that.
523 nfscl_filllockowner(p->td_proc, own, F_POSIX);
525 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own,
527 if (error == 0 && lp != NULL) {
529 lp->nfsl_stateid.seqid;
531 lp->nfsl_stateid.other[0];
533 lp->nfsl_stateid.other[1];
535 lp->nfsl_stateid.other[2];
541 /* If not found, just look for any OpenOwner that will work. */
543 owp = LIST_FIRST(&clp->nfsc_owner);
544 while (!done && owp != NULL) {
545 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
546 if (op->nfso_fhlen == fhlen &&
547 !NFSBCMP(op->nfso_fh, nfhp, fhlen) &&
548 (mode & op->nfso_mode) == mode) {
554 owp = LIST_NEXT(owp, nfsow_list);
560 /* for read aheads or write behinds, use the open cred */
561 newnfs_copycred(&op->nfso_cred, cred);
565 * No lock stateid, so return the open stateid.
567 stateidp->seqid = op->nfso_stateid.seqid;
568 stateidp->other[0] = op->nfso_stateid.other[0];
569 stateidp->other[1] = op->nfso_stateid.other[1];
570 stateidp->other[2] = op->nfso_stateid.other[2];
576 * Search for a matching file, mode and, optionally, lockowner.
579 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
580 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode,
581 struct nfscllockowner **lpp, struct nfsclopen **opp)
583 struct nfsclowner *owp;
584 struct nfsclopen *op, *rop, *rop2;
585 struct nfscllockowner *lp;
591 * rop will be set to the open to be returned. There are three
592 * variants of this, all for an open of the correct file:
593 * 1 - A match of lockown.
594 * 2 - A match of the openown, when no lockown match exists.
595 * 3 - A match for any open, if no openown or lockown match exists.
596 * Looking for #2 over #3 probably isn't necessary, but since
597 * RFC3530 is vague w.r.t. the relationship between openowners and
598 * lockowners, I think this is the safer way to go.
603 /* Search the client list */
604 owp = LIST_FIRST(ohp);
605 while (owp != NULL && keep_looping != 0) {
606 /* and look for the correct open */
607 op = LIST_FIRST(&owp->nfsow_open);
608 while (op != NULL && keep_looping != 0) {
609 if (op->nfso_fhlen == fhlen &&
610 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
611 && (op->nfso_mode & mode) == mode) {
613 /* Now look for a matching lockowner. */
614 LIST_FOREACH(lp, &op->nfso_lock,
616 if (!NFSBCMP(lp->nfsl_owner,
618 NFSV4CL_LOCKNAMELEN)) {
626 if (rop == NULL && !NFSBCMP(owp->nfsow_owner,
627 openown, NFSV4CL_LOCKNAMELEN)) {
635 op = LIST_NEXT(op, nfso_list);
637 owp = LIST_NEXT(owp, nfsow_list);
648 * Release use of an open owner. Called when open operations are done
649 * with the open owner.
652 nfscl_ownerrelease(struct nfsclowner *owp, __unused int error,
653 __unused int candelete, int unlocked)
660 nfscl_lockunlock(&owp->nfsow_rwlock);
661 nfscl_clrelease(owp->nfsow_clp);
666 * Release use of an open structure under an open owner.
669 nfscl_openrelease(struct nfsclopen *op, int error, int candelete)
671 struct nfsclclient *clp;
672 struct nfsclowner *owp;
678 nfscl_lockunlock(&owp->nfsow_rwlock);
679 clp = owp->nfsow_clp;
680 if (error && candelete && op->nfso_opencnt == 0)
681 nfscl_freeopen(op, 0);
682 nfscl_clrelease(clp);
687 * Called to get a clientid structure. It will optionally lock the
688 * client data structures to do the SetClientId/SetClientId_confirm,
689 * but will release that lock and return the clientid with a refernce
691 * If the "cred" argument is NULL, a new clientid should not be created.
692 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
694 * It always clpp with a reference count on it, unless returning an error.
697 nfscl_getcl(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
698 struct nfsclclient **clpp)
700 struct nfsclclient *clp;
701 struct nfsclclient *newclp = NULL;
703 struct nfsmount *nmp;
704 char uuid[HOSTUUIDLEN];
705 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
708 mp = vnode_mount(vp);
711 getcredhostuuid(cred, uuid, sizeof uuid);
712 idlen = strlen(uuid);
714 idlen += sizeof (u_int64_t);
716 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
717 MALLOC(newclp, struct nfsclclient *,
718 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
723 * If a forced dismount is already in progress, don't
724 * allocate a new clientid and get out now. For the case where
725 * clp != NULL, this is a harmless optimization.
727 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
730 free(newclp, M_NFSCLCLIENT);
735 if (newclp == NULL) {
740 NFSBZERO((caddr_t)clp, sizeof(struct nfsclclient) + idlen - 1);
741 clp->nfsc_idlen = idlen;
742 LIST_INIT(&clp->nfsc_owner);
743 TAILQ_INIT(&clp->nfsc_deleg);
744 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
745 LIST_INIT(&clp->nfsc_deleghash[i]);
746 clp->nfsc_flags = NFSCLFLAGS_INITED;
747 clp->nfsc_clientidrev = 1;
748 clp->nfsc_cbident = nfscl_nextcbident();
749 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
751 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
755 nfscl_start_renewthread(clp);
759 FREE((caddr_t)newclp, M_NFSCLCLIENT);
762 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
763 (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0)
764 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
765 NFSCLSTATEMUTEXPTR, mp);
767 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
768 if (igotlock == 0 && (mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
770 * Both nfsv4_lock() and nfsv4_getref() know to check
771 * for MNTK_UNMOUNTF and return without sleeping to
772 * wait for the exclusive lock to be released, since it
773 * might be held by nfscl_umount() and we need to get out
774 * now for that case and not wait until nfscl_umount()
783 * If it needs a clientid, do the setclientid now.
785 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
787 panic("nfscl_clget");
788 if (p == NULL || cred == NULL) {
790 nfsv4_unlock(&clp->nfsc_lock, 0);
795 * If RFC3530 Sec. 14.2.33 is taken literally,
796 * NFSERR_CLIDINUSE will be returned persistently for the
797 * case where a new mount of the same file system is using
798 * a different principal. In practice, NFSERR_CLIDINUSE is
799 * only returned when there is outstanding unexpired state
800 * on the clientid. As such, try for twice the lease
801 * interval, if we know what that is. Otherwise, make a
803 * The case of returning NFSERR_STALECLIENTID is far less
804 * likely, but might occur if there is a significant delay
805 * between doing the SetClientID and SetClientIDConfirm Ops,
806 * such that the server throws away the clientid before
807 * receiving the SetClientIDConfirm.
809 if (clp->nfsc_renew > 0)
810 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
812 clidinusedelay = 120;
815 error = nfsrpc_setclient(VFSTONFS(vnode_mount(vp)),
817 if (error == NFSERR_STALECLIENTID ||
818 error == NFSERR_STALEDONTRECOVER ||
819 error == NFSERR_CLIDINUSE) {
820 (void) nfs_catnap(PZERO, error, "nfs_setcl");
822 } while (((error == NFSERR_STALECLIENTID ||
823 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
824 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
827 nfsv4_unlock(&clp->nfsc_lock, 0);
831 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
835 nfsv4_unlock(&clp->nfsc_lock, 1);
844 * Get a reference to a clientid and return it, if valid.
846 APPLESTATIC struct nfsclclient *
847 nfscl_findcl(struct nfsmount *nmp)
849 struct nfsclclient *clp;
852 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
858 * Release the clientid structure. It may be locked or reference counted.
861 nfscl_clrelease(struct nfsclclient *clp)
864 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
865 nfsv4_unlock(&clp->nfsc_lock, 0);
867 nfsv4_relref(&clp->nfsc_lock);
871 * External call for nfscl_clrelease.
874 nfscl_clientrelease(struct nfsclclient *clp)
878 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
879 nfsv4_unlock(&clp->nfsc_lock, 0);
881 nfsv4_relref(&clp->nfsc_lock);
886 * Called when wanting to lock a byte region.
889 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
890 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
891 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
892 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
894 struct nfscllockowner *lp;
895 struct nfsclopen *op;
896 struct nfsclclient *clp;
897 struct nfscllockowner *nlp;
898 struct nfscllock *nlop, *otherlop;
899 struct nfscldeleg *dp = NULL, *ldp = NULL;
900 struct nfscllockownerhead *lhp = NULL;
902 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
904 int error = 0, ret, donelocally = 0;
907 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
916 * Might need these, so MALLOC them now, to
917 * avoid a tsleep() in MALLOC later.
919 MALLOC(nlp, struct nfscllockowner *,
920 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
921 MALLOC(otherlop, struct nfscllock *,
922 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
923 MALLOC(nlop, struct nfscllock *,
924 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
925 nlop->nfslo_type = type;
926 nlop->nfslo_first = off;
927 if (len == NFS64BITSSET) {
928 nlop->nfslo_end = NFS64BITSSET;
930 nlop->nfslo_end = off + len;
931 if (nlop->nfslo_end <= nlop->nfslo_first)
932 error = NFSERR_INVAL;
939 error = nfscl_getcl(vp, cred, p, &clp);
942 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
943 FREE((caddr_t)otherlop, M_NFSCLLOCK);
944 FREE((caddr_t)nlop, M_NFSCLLOCK);
951 openownp = ropenownp;
953 nfscl_filllockowner(id, own, flags);
955 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
961 * First, search for a delegation. If one exists for this file,
962 * the lock can be done locally against it, so long as there
963 * isn't a local lock conflict.
965 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
967 /* Just sanity check for correct type of delegation */
968 if (dp != NULL && ((dp->nfsdl_flags &
969 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
971 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
975 /* Now, find an open and maybe a lockowner. */
976 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
977 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
979 ret = nfscl_getopen(&clp->nfsc_owner,
980 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
981 ownp, mode, NULL, &op);
983 lhp = &dp->nfsdl_lock;
984 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
985 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
986 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
994 * Get the related Open and maybe lockowner.
996 error = nfscl_getopen(&clp->nfsc_owner,
997 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
998 ownp, mode, &lp, &op);
1000 lhp = &op->nfso_lock;
1002 if (!error && !recovery)
1003 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1004 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1007 nfscl_clrelease(clp);
1010 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1011 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1012 FREE((caddr_t)nlop, M_NFSCLLOCK);
1017 * Ok, see if a lockowner exists and create one, as required.
1020 LIST_FOREACH(lp, lhp, nfsl_list) {
1021 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1025 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1027 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1028 NFSV4CL_LOCKNAMELEN);
1030 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1031 NFSV4CL_LOCKNAMELEN);
1032 nlp->nfsl_seqid = 0;
1033 nlp->nfsl_defunct = 0;
1034 nlp->nfsl_inprog = NULL;
1035 nfscl_lockinit(&nlp->nfsl_rwlock);
1036 LIST_INIT(&nlp->nfsl_lock);
1038 nlp->nfsl_open = NULL;
1039 newnfsstats.cllocallockowners++;
1041 nlp->nfsl_open = op;
1042 newnfsstats.cllockowners++;
1044 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1051 * Now, update the byte ranges for locks.
1053 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1059 nfscl_clrelease(clp);
1062 * Serial modifications on the lock owner for multiple threads
1063 * for the same process using a read/write lock.
1066 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1072 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1074 FREE((caddr_t)nlop, M_NFSCLLOCK);
1076 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1083 * Called to unlock a byte range, for LockU.
1086 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1087 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1088 struct nfsclclient *clp, void *id, int flags,
1089 struct nfscllockowner **lpp, int *dorpcp)
1091 struct nfscllockowner *lp;
1092 struct nfsclowner *owp;
1093 struct nfsclopen *op;
1094 struct nfscllock *nlop, *other_lop = NULL;
1095 struct nfscldeleg *dp;
1097 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1105 * Might need these, so MALLOC them now, to
1106 * avoid a tsleep() in MALLOC later.
1108 MALLOC(nlop, struct nfscllock *,
1109 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1110 nlop->nfslo_type = F_UNLCK;
1111 nlop->nfslo_first = off;
1112 if (len == NFS64BITSSET) {
1113 nlop->nfslo_end = NFS64BITSSET;
1115 nlop->nfslo_end = off + len;
1116 if (nlop->nfslo_end <= nlop->nfslo_first) {
1117 FREE((caddr_t)nlop, M_NFSCLLOCK);
1118 return (NFSERR_INVAL);
1122 MALLOC(other_lop, struct nfscllock *,
1123 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1126 nfscl_filllockowner(id, own, flags);
1130 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1131 np->n_fhp->nfh_len);
1134 * First, unlock any local regions on a delegation.
1137 /* Look for this lockowner. */
1138 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1139 if (!NFSBCMP(lp->nfsl_owner, own,
1140 NFSV4CL_LOCKNAMELEN))
1144 /* Use other_lop, so nlop is still available */
1145 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1149 * Now, find a matching open/lockowner that hasn't already been done,
1150 * as marked by nfsl_inprog.
1154 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1155 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1156 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1157 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1158 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1159 if (lp->nfsl_inprog == NULL &&
1160 !NFSBCMP(lp->nfsl_owner, own,
1161 NFSV4CL_LOCKNAMELEN)) {
1175 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1179 * Serial modifications on the lock owner for multiple
1180 * threads for the same process using a read/write lock.
1182 lp->nfsl_inprog = p;
1183 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1188 FREE((caddr_t)nlop, M_NFSCLLOCK);
1190 FREE((caddr_t)other_lop, M_NFSCLLOCK);
1195 * Release all lockowners marked in progess for this process and file.
1198 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1199 void *id, int flags)
1201 struct nfsclowner *owp;
1202 struct nfsclopen *op;
1203 struct nfscllockowner *lp;
1205 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1208 nfscl_filllockowner(id, own, flags);
1210 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1211 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1212 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1213 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1214 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1215 if (lp->nfsl_inprog == p &&
1216 !NFSBCMP(lp->nfsl_owner, own,
1217 NFSV4CL_LOCKNAMELEN)) {
1218 lp->nfsl_inprog = NULL;
1219 nfscl_lockunlock(&lp->nfsl_rwlock);
1225 nfscl_clrelease(clp);
1230 * Called to find out if any bytes within the byte range specified are
1231 * write locked by the calling process. Used to determine if flushing
1232 * is required before a LockU.
1233 * If in doubt, return 1, so the flush will occur.
1236 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1237 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1239 struct nfsclowner *owp;
1240 struct nfscllockowner *lp;
1241 struct nfsclopen *op;
1242 struct nfsclclient *clp;
1243 struct nfscllock *lop;
1244 struct nfscldeleg *dp;
1247 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1251 switch (fl->l_whence) {
1255 * Caller is responsible for adding any necessary offset
1256 * when SEEK_CUR is used.
1261 off = np->n_size + fl->l_start;
1266 if (fl->l_len != 0) {
1267 end = off + fl->l_len;
1274 error = nfscl_getcl(vp, cred, p, &clp);
1277 nfscl_filllockowner(id, own, flags);
1281 * First check the delegation locks.
1283 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1285 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1286 if (!NFSBCMP(lp->nfsl_owner, own,
1287 NFSV4CL_LOCKNAMELEN))
1291 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1292 if (lop->nfslo_first >= end)
1294 if (lop->nfslo_end <= off)
1296 if (lop->nfslo_type == F_WRLCK) {
1297 nfscl_clrelease(clp);
1306 * Now, check state against the server.
1308 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1309 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1310 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1311 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1312 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1313 if (!NFSBCMP(lp->nfsl_owner, own,
1314 NFSV4CL_LOCKNAMELEN))
1318 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1319 if (lop->nfslo_first >= end)
1321 if (lop->nfslo_end <= off)
1323 if (lop->nfslo_type == F_WRLCK) {
1324 nfscl_clrelease(clp);
1333 nfscl_clrelease(clp);
1339 * Release a byte range lock owner structure.
1342 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1344 struct nfsclclient *clp;
1349 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1350 if (error != 0 && candelete &&
1351 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1352 nfscl_freelockowner(lp, 0);
1354 nfscl_lockunlock(&lp->nfsl_rwlock);
1355 nfscl_clrelease(clp);
1360 * Free up an open structure and any associated byte range lock structures.
1363 nfscl_freeopen(struct nfsclopen *op, int local)
1366 LIST_REMOVE(op, nfso_list);
1367 nfscl_freealllocks(&op->nfso_lock, local);
1368 FREE((caddr_t)op, M_NFSCLOPEN);
1370 newnfsstats.cllocalopens--;
1372 newnfsstats.clopens--;
1376 * Free up all lock owners and associated locks.
1379 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1381 struct nfscllockowner *lp, *nlp;
1383 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1384 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1386 nfscl_freelockowner(lp, local);
1391 * Called for an Open when NFSERR_EXPIRED is received from the server.
1392 * If there are no byte range locks nor a Share Deny lost, try to do a
1393 * fresh Open. Otherwise, free the open.
1396 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1397 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1399 struct nfscllockowner *lp;
1400 struct nfscldeleg *dp;
1401 int mustdelete = 0, error;
1404 * Look for any byte range lock(s).
1406 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1407 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1414 * If no byte range lock(s) nor a Share deny, try to re-open.
1416 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1417 newnfs_copycred(&op->nfso_cred, cred);
1419 error = nfsrpc_reopen(nmp, op->nfso_fh,
1420 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1424 FREE((caddr_t)dp, M_NFSCLDELEG);
1429 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1430 op->nfso_fhlen, cred, p, &dp);
1434 * If a byte range lock or Share deny or couldn't re-open, free it.
1437 nfscl_freeopen(op, 0);
1438 return (mustdelete);
1442 * Free up an open owner structure.
1445 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1448 LIST_REMOVE(owp, nfsow_list);
1449 FREE((caddr_t)owp, M_NFSCLOWNER);
1451 newnfsstats.cllocalopenowners--;
1453 newnfsstats.clopenowners--;
1457 * Free up a byte range lock owner structure.
1460 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1462 struct nfscllock *lop, *nlop;
1464 LIST_REMOVE(lp, nfsl_list);
1465 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1466 nfscl_freelock(lop, local);
1468 FREE((caddr_t)lp, M_NFSCLLOCKOWNER);
1470 newnfsstats.cllocallockowners--;
1472 newnfsstats.cllockowners--;
1476 * Free up a byte range lock structure.
1479 nfscl_freelock(struct nfscllock *lop, int local)
1482 LIST_REMOVE(lop, nfslo_list);
1483 FREE((caddr_t)lop, M_NFSCLLOCK);
1485 newnfsstats.cllocallocks--;
1487 newnfsstats.cllocks--;
1491 * Clean out the state related to a delegation.
1494 nfscl_cleandeleg(struct nfscldeleg *dp)
1496 struct nfsclowner *owp, *nowp;
1497 struct nfsclopen *op;
1499 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1500 op = LIST_FIRST(&owp->nfsow_open);
1502 if (LIST_NEXT(op, nfso_list) != NULL)
1503 panic("nfscleandel");
1504 nfscl_freeopen(op, 1);
1506 nfscl_freeopenowner(owp, 1);
1508 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1512 * Free a delegation.
1515 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1518 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1519 LIST_REMOVE(dp, nfsdl_hash);
1520 FREE((caddr_t)dp, M_NFSCLDELEG);
1521 newnfsstats.cldelegates--;
1526 * Free up all state related to this client structure.
1529 nfscl_cleanclient(struct nfsclclient *clp)
1531 struct nfsclowner *owp, *nowp;
1532 struct nfsclopen *op, *nop;
1534 /* Now, all the OpenOwners, etc. */
1535 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1536 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1537 nfscl_freeopen(op, 0);
1539 nfscl_freeopenowner(owp, 0);
1544 * Called when an NFSERR_EXPIRED is received from the server.
1547 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1548 struct ucred *cred, NFSPROC_T *p)
1550 struct nfsclowner *owp, *nowp, *towp;
1551 struct nfsclopen *op, *nop, *top;
1552 struct nfscldeleg *dp, *ndp;
1553 int ret, printed = 0;
1556 * First, merge locally issued Opens into the list for the server.
1558 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1559 while (dp != NULL) {
1560 ndp = TAILQ_NEXT(dp, nfsdl_list);
1561 owp = LIST_FIRST(&dp->nfsdl_owner);
1562 while (owp != NULL) {
1563 nowp = LIST_NEXT(owp, nfsow_list);
1564 op = LIST_FIRST(&owp->nfsow_open);
1566 if (LIST_NEXT(op, nfso_list) != NULL)
1568 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1569 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1570 NFSV4CL_LOCKNAMELEN))
1574 /* Merge opens in */
1575 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1576 if (top->nfso_fhlen == op->nfso_fhlen &&
1577 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1579 top->nfso_mode |= op->nfso_mode;
1580 top->nfso_opencnt += op->nfso_opencnt;
1585 /* Just add the open to the owner list */
1586 LIST_REMOVE(op, nfso_list);
1587 op->nfso_own = towp;
1588 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1589 newnfsstats.cllocalopens--;
1590 newnfsstats.clopens++;
1593 /* Just add the openowner to the client list */
1594 LIST_REMOVE(owp, nfsow_list);
1595 owp->nfsow_clp = clp;
1596 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1597 newnfsstats.cllocalopenowners--;
1598 newnfsstats.clopenowners++;
1599 newnfsstats.cllocalopens--;
1600 newnfsstats.clopens++;
1605 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1607 printf("nfsv4 expired locks lost\n");
1609 nfscl_cleandeleg(dp);
1610 nfscl_freedeleg(&clp->nfsc_deleg, dp);
1613 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1617 * Now, try and reopen against the server.
1619 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1620 owp->nfsow_seqid = 0;
1621 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1622 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1623 if (ret && !printed) {
1625 printf("nfsv4 expired locks lost\n");
1628 if (LIST_EMPTY(&owp->nfsow_open))
1629 nfscl_freeopenowner(owp, 0);
1634 * This function must be called after the process represented by "own" has
1635 * exited. Must be called with CLSTATE lock held.
1638 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1640 struct nfsclowner *owp, *nowp;
1641 struct nfsclopen *op;
1642 struct nfscllockowner *lp, *nlp;
1643 struct nfscldeleg *dp;
1645 /* First, get rid of local locks on delegations. */
1646 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1647 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1648 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1649 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1651 nfscl_freelockowner(lp, 1);
1655 owp = LIST_FIRST(&clp->nfsc_owner);
1656 while (owp != NULL) {
1657 nowp = LIST_NEXT(owp, nfsow_list);
1658 if (!NFSBCMP(owp->nfsow_owner, own,
1659 NFSV4CL_LOCKNAMELEN)) {
1661 * If there are children that haven't closed the
1662 * file descriptors yet, the opens will still be
1663 * here. For that case, let the renew thread clear
1664 * out the OpenOwner later.
1666 if (LIST_EMPTY(&owp->nfsow_open))
1667 nfscl_freeopenowner(owp, 0);
1669 owp->nfsow_defunct = 1;
1671 /* look for lockowners on other opens */
1672 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1673 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1674 if (!NFSBCMP(lp->nfsl_owner, own,
1675 NFSV4CL_LOCKNAMELEN))
1676 lp->nfsl_defunct = 1;
1685 * Find open/lock owners for processes that have exited.
1688 nfscl_cleanupkext(struct nfsclclient *clp)
1690 struct nfsclowner *owp, *nowp;
1694 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1695 if (nfscl_procdoesntexist(owp->nfsow_owner))
1696 nfscl_cleanup_common(clp, owp->nfsow_owner);
1699 NFSPROCLISTUNLOCK();
1702 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
1704 * Called from nfs umount to free up the clientid.
1707 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1709 struct nfsclclient *clp;
1714 * For the case that matters, this is the thread that set
1715 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
1716 * done to ensure that any thread executing nfscl_getcl() after
1717 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
1718 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
1719 * explanation, courtesy of Alan Cox.
1720 * What follows is a snippet from Alan Cox's email at:
1721 * http://docs.FreeBSD.org/cgi/
1722 * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
1724 * 1. Set MNTK_UNMOUNTF
1725 * 2. Acquire a standard FreeBSD mutex "m".
1726 * 3. Update some data structures.
1727 * 4. Release mutex "m".
1729 * Then, other threads that acquire "m" after step 4 has occurred will
1730 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
1731 * step 2 may or may not see MNTK_UNMOUNTF as set.
1734 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
1742 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1743 panic("nfscl umount");
1746 * First, handshake with the nfscl renew thread, to terminate
1749 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1750 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1751 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
1755 * Now, get the exclusive lock on the client state, so
1756 * that no uses of the state are still in progress.
1759 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1760 NFSCLSTATEMUTEXPTR, NULL);
1761 } while (!igotlock);
1765 * Free up all the state. It will expire on the server, but
1766 * maybe we should do a SetClientId/SetClientIdConfirm so
1767 * the server throws it away?
1769 LIST_REMOVE(clp, nfsc_list);
1770 nfscl_delegreturnall(clp, p);
1771 cred = newnfs_getcred();
1772 (void) nfsrpc_setclient(nmp, clp, cred, p);
1773 nfscl_cleanclient(clp);
1776 FREE((caddr_t)clp, M_NFSCLCLIENT);
1782 * This function is called when a server replies with NFSERR_STALECLIENTID
1783 * or NFSERR_STALESTATEID. It traverses the clientid lists, doing Opens
1784 * and Locks with reclaim. If these fail, it deletes the corresponding state.
1787 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
1789 struct nfsclowner *owp, *nowp;
1790 struct nfsclopen *op, *nop;
1791 struct nfscllockowner *lp, *nlp;
1792 struct nfscllock *lop, *nlop;
1793 struct nfscldeleg *dp, *ndp, *tdp;
1794 struct nfsmount *nmp;
1795 struct ucred *tcred;
1796 struct nfsclopenhead extra_open;
1797 struct nfscldeleghead extra_deleg;
1800 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1801 int igotlock = 0, error, trycnt, firstlock, s;
1804 * First, lock the client structure, so everyone else will
1805 * block when trying to use state.
1808 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
1810 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1811 NFSCLSTATEMUTEXPTR, NULL);
1812 } while (!igotlock);
1815 nmp = clp->nfsc_nmp;
1817 panic("nfscl recover");
1820 error = nfsrpc_setclient(nmp, clp, cred, p);
1821 } while ((error == NFSERR_STALECLIENTID ||
1822 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
1824 nfscl_cleanclient(clp);
1826 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
1827 NFSCLFLAGS_RECOVER | NFSCLFLAGS_RECVRINPROG);
1828 wakeup(&clp->nfsc_flags);
1829 nfsv4_unlock(&clp->nfsc_lock, 0);
1833 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1834 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
1837 * Mark requests already queued on the server, so that they don't
1838 * initiate another recovery cycle. Any requests already in the
1839 * queue that handle state information will have the old stale
1840 * clientid/stateid and will get a NFSERR_STALESTATEID or
1841 * NFSERR_STALECLIENTID reply from the server. This will be
1842 * translated to NFSERR_STALEDONTRECOVER when R_DONTRECOVER is set.
1846 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
1847 if (rep->r_nmp == nmp)
1848 rep->r_flags |= R_DONTRECOVER;
1854 * Now, mark all delegations "need reclaim".
1856 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
1857 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
1859 TAILQ_INIT(&extra_deleg);
1860 LIST_INIT(&extra_open);
1862 * Now traverse the state lists, doing Open and Lock Reclaims.
1864 tcred = newnfs_getcred();
1865 owp = LIST_FIRST(&clp->nfsc_owner);
1866 while (owp != NULL) {
1867 nowp = LIST_NEXT(owp, nfsow_list);
1868 owp->nfsow_seqid = 0;
1869 op = LIST_FIRST(&owp->nfsow_open);
1870 while (op != NULL) {
1871 nop = LIST_NEXT(op, nfso_list);
1872 if (error != NFSERR_NOGRACE) {
1873 /* Search for a delegation to reclaim with the open */
1874 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1875 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1877 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
1878 mode = NFSV4OPEN_ACCESSWRITE;
1879 delegtype = NFSV4OPEN_DELEGATEWRITE;
1881 mode = NFSV4OPEN_ACCESSREAD;
1882 delegtype = NFSV4OPEN_DELEGATEREAD;
1884 if ((op->nfso_mode & mode) == mode &&
1885 op->nfso_fhlen == dp->nfsdl_fhlen &&
1886 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
1891 delegtype = NFSV4OPEN_DELEGATENONE;
1892 newnfs_copycred(&op->nfso_cred, tcred);
1893 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
1894 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
1895 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
1898 /* Handle any replied delegation */
1899 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
1900 || NFSMNT_RDONLY(nmp->nm_mountp))) {
1901 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
1902 mode = NFSV4OPEN_ACCESSWRITE;
1904 mode = NFSV4OPEN_ACCESSREAD;
1905 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1906 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
1908 if ((op->nfso_mode & mode) == mode &&
1909 op->nfso_fhlen == dp->nfsdl_fhlen &&
1910 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
1912 dp->nfsdl_stateid = ndp->nfsdl_stateid;
1913 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
1914 dp->nfsdl_ace = ndp->nfsdl_ace;
1915 dp->nfsdl_change = ndp->nfsdl_change;
1916 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
1917 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
1918 dp->nfsdl_flags |= NFSCLDL_RECALL;
1919 FREE((caddr_t)ndp, M_NFSCLDELEG);
1926 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
1928 /* and reclaim all byte range locks */
1929 lp = LIST_FIRST(&op->nfso_lock);
1930 while (lp != NULL) {
1931 nlp = LIST_NEXT(lp, nfsl_list);
1934 lop = LIST_FIRST(&lp->nfsl_lock);
1935 while (lop != NULL) {
1936 nlop = LIST_NEXT(lop, nfslo_list);
1937 if (lop->nfslo_end == NFS64BITSSET)
1940 len = lop->nfslo_end - lop->nfslo_first;
1941 if (error != NFSERR_NOGRACE)
1942 error = nfscl_trylock(nmp, NULL,
1943 op->nfso_fh, op->nfso_fhlen, lp,
1944 firstlock, 1, lop->nfslo_first, len,
1945 lop->nfslo_type, tcred, p);
1947 nfscl_freelock(lop, 0);
1952 /* If no locks, but a lockowner, just delete it. */
1953 if (LIST_EMPTY(&lp->nfsl_lock))
1954 nfscl_freelockowner(lp, 0);
1958 nfscl_freeopen(op, 0);
1967 * Now, try and get any delegations not yet reclaimed by cobbling
1968 * to-gether an appropriate open.
1971 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1972 while (dp != NULL) {
1973 ndp = TAILQ_NEXT(dp, nfsdl_list);
1974 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
1976 MALLOC(nowp, struct nfsclowner *,
1977 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
1979 * Name must be as long an largest possible
1980 * NFSV4CL_LOCKNAMELEN. 12 for now.
1982 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
1983 NFSV4CL_LOCKNAMELEN);
1984 LIST_INIT(&nowp->nfsow_open);
1985 nowp->nfsow_clp = clp;
1986 nowp->nfsow_seqid = 0;
1987 nowp->nfsow_defunct = 0;
1988 nfscl_lockinit(&nowp->nfsow_rwlock);
1991 if (error != NFSERR_NOGRACE) {
1992 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
1993 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
1994 nop->nfso_own = nowp;
1995 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
1996 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
1997 delegtype = NFSV4OPEN_DELEGATEWRITE;
1999 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2000 delegtype = NFSV4OPEN_DELEGATEREAD;
2002 nop->nfso_opencnt = 0;
2003 nop->nfso_posixlock = 1;
2004 nop->nfso_fhlen = dp->nfsdl_fhlen;
2005 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2006 LIST_INIT(&nop->nfso_lock);
2007 nop->nfso_stateid.seqid = 0;
2008 nop->nfso_stateid.other[0] = 0;
2009 nop->nfso_stateid.other[1] = 0;
2010 nop->nfso_stateid.other[2] = 0;
2011 newnfs_copycred(&dp->nfsdl_cred, tcred);
2012 newnfs_copyincred(tcred, &nop->nfso_cred);
2014 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2015 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2016 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2017 delegtype, tcred, p);
2019 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2020 mode = NFSV4OPEN_ACCESSWRITE;
2022 mode = NFSV4OPEN_ACCESSREAD;
2023 if ((nop->nfso_mode & mode) == mode &&
2024 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2025 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2027 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2028 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2029 dp->nfsdl_ace = tdp->nfsdl_ace;
2030 dp->nfsdl_change = tdp->nfsdl_change;
2031 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2032 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2033 dp->nfsdl_flags |= NFSCLDL_RECALL;
2034 FREE((caddr_t)tdp, M_NFSCLDELEG);
2036 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2042 FREE((caddr_t)nop, M_NFSCLOPEN);
2044 * Couldn't reclaim it, so throw the state
2047 nfscl_cleandeleg(dp);
2048 nfscl_freedeleg(&clp->nfsc_deleg, dp);
2050 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2057 * Now, get rid of extra Opens and Delegations.
2059 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2061 newnfs_copycred(&op->nfso_cred, tcred);
2062 error = nfscl_tryclose(op, tcred, nmp, p);
2063 if (error == NFSERR_GRACE)
2064 (void) nfs_catnap(PZERO, error, "nfsexcls");
2065 } while (error == NFSERR_GRACE);
2066 LIST_REMOVE(op, nfso_list);
2067 FREE((caddr_t)op, M_NFSCLOPEN);
2070 FREE((caddr_t)nowp, M_NFSCLOWNER);
2072 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2074 newnfs_copycred(&dp->nfsdl_cred, tcred);
2075 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2076 if (error == NFSERR_GRACE)
2077 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2078 } while (error == NFSERR_GRACE);
2079 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2080 FREE((caddr_t)dp, M_NFSCLDELEG);
2084 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2085 wakeup(&clp->nfsc_flags);
2086 nfsv4_unlock(&clp->nfsc_lock, 0);
2092 * This function is called when a server replies with NFSERR_EXPIRED.
2093 * It deletes all state for the client and does a fresh SetClientId/confirm.
2094 * XXX Someday it should post a signal to the process(es) that hold the
2095 * state, so they know that lock state has been lost.
2098 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2100 struct nfsmount *nmp;
2102 int igotlock = 0, error, trycnt;
2105 * If the clientid has gone away or a new SetClientid has already
2106 * been done, just return ok.
2108 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2112 * First, lock the client structure, so everyone else will
2113 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2114 * that only one thread does the work.
2117 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2119 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2120 NFSCLSTATEMUTEXPTR, NULL);
2121 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2122 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2124 nfsv4_unlock(&clp->nfsc_lock, 0);
2128 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2131 nmp = clp->nfsc_nmp;
2133 panic("nfscl expired");
2134 cred = newnfs_getcred();
2137 error = nfsrpc_setclient(nmp, clp, cred, p);
2138 } while ((error == NFSERR_STALECLIENTID ||
2139 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2142 * Clear out any state.
2144 nfscl_cleanclient(clp);
2146 clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID |
2147 NFSCLFLAGS_RECOVER);
2150 * Expire the state for the client.
2152 nfscl_expireclient(clp, nmp, cred, p);
2154 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2155 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2157 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2158 wakeup(&clp->nfsc_flags);
2159 nfsv4_unlock(&clp->nfsc_lock, 0);
2166 * This function inserts a lock in the list after insert_lop.
2169 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2170 struct nfscllock *insert_lop, int local)
2173 if ((struct nfscllockowner *)insert_lop == lp)
2174 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2176 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2178 newnfsstats.cllocallocks++;
2180 newnfsstats.cllocks++;
2184 * This function updates the locking for a lock owner and given file. It
2185 * maintains a list of lock ranges ordered on increasing file offset that
2186 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2187 * It always adds new_lop to the list and sometimes uses the one pointed
2189 * Returns 1 if the locks were modified, 0 otherwise.
2192 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2193 struct nfscllock **other_lopp, int local)
2195 struct nfscllock *new_lop = *new_lopp;
2196 struct nfscllock *lop, *tlop, *ilop;
2197 struct nfscllock *other_lop;
2198 int unlock = 0, modified = 0;
2202 * Work down the list until the lock is merged.
2204 if (new_lop->nfslo_type == F_UNLCK)
2206 ilop = (struct nfscllock *)lp;
2207 lop = LIST_FIRST(&lp->nfsl_lock);
2208 while (lop != NULL) {
2210 * Only check locks for this file that aren't before the start of
2213 if (lop->nfslo_end >= new_lop->nfslo_first) {
2214 if (new_lop->nfslo_end < lop->nfslo_first) {
2216 * If the new lock ends before the start of the
2217 * current lock's range, no merge, just insert
2222 if (new_lop->nfslo_type == lop->nfslo_type ||
2223 (new_lop->nfslo_first <= lop->nfslo_first &&
2224 new_lop->nfslo_end >= lop->nfslo_end)) {
2226 * This lock can be absorbed by the new lock/unlock.
2227 * This happens when it covers the entire range
2228 * of the old lock or is contiguous
2229 * with the old lock and is of the same type or an
2232 if (new_lop->nfslo_type != lop->nfslo_type ||
2233 new_lop->nfslo_first != lop->nfslo_first ||
2234 new_lop->nfslo_end != lop->nfslo_end)
2236 if (lop->nfslo_first < new_lop->nfslo_first)
2237 new_lop->nfslo_first = lop->nfslo_first;
2238 if (lop->nfslo_end > new_lop->nfslo_end)
2239 new_lop->nfslo_end = lop->nfslo_end;
2241 lop = LIST_NEXT(lop, nfslo_list);
2242 nfscl_freelock(tlop, local);
2247 * All these cases are for contiguous locks that are not the
2248 * same type, so they can't be merged.
2250 if (new_lop->nfslo_first <= lop->nfslo_first) {
2252 * This case is where the new lock overlaps with the
2253 * first part of the old lock. Move the start of the
2254 * old lock to just past the end of the new lock. The
2255 * new lock will be inserted in front of the old, since
2256 * ilop hasn't been updated. (We are done now.)
2258 if (lop->nfslo_first != new_lop->nfslo_end) {
2259 lop->nfslo_first = new_lop->nfslo_end;
2264 if (new_lop->nfslo_end >= lop->nfslo_end) {
2266 * This case is where the new lock overlaps with the
2267 * end of the old lock's range. Move the old lock's
2268 * end to just before the new lock's first and insert
2269 * the new lock after the old lock.
2270 * Might not be done yet, since the new lock could
2271 * overlap further locks with higher ranges.
2273 if (lop->nfslo_end != new_lop->nfslo_first) {
2274 lop->nfslo_end = new_lop->nfslo_first;
2278 lop = LIST_NEXT(lop, nfslo_list);
2282 * The final case is where the new lock's range is in the
2283 * middle of the current lock's and splits the current lock
2284 * up. Use *other_lopp to handle the second part of the
2285 * split old lock range. (We are done now.)
2286 * For unlock, we use new_lop as other_lop and tmp, since
2287 * other_lop and new_lop are the same for this case.
2288 * We noted the unlock case above, so we don't need
2289 * new_lop->nfslo_type any longer.
2291 tmp = new_lop->nfslo_first;
2293 other_lop = new_lop;
2296 other_lop = *other_lopp;
2299 other_lop->nfslo_first = new_lop->nfslo_end;
2300 other_lop->nfslo_end = lop->nfslo_end;
2301 other_lop->nfslo_type = lop->nfslo_type;
2302 lop->nfslo_end = tmp;
2303 nfscl_insertlock(lp, other_lop, lop, local);
2309 lop = LIST_NEXT(lop, nfslo_list);
2315 * Insert the new lock in the list at the appropriate place.
2318 nfscl_insertlock(lp, new_lop, ilop, local);
2326 * This function must be run as a kernel thread.
2327 * It does Renew Ops and recovery, when required.
2330 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2332 struct nfsclowner *owp, *nowp;
2333 struct nfsclopen *op;
2334 struct nfscllockowner *lp, *nlp, *olp;
2335 struct nfscldeleghead dh;
2336 struct nfscllockownerhead lh;
2337 struct nfscldeleg *dp, *ndp;
2340 int error, cbpathdown, islept, igotlock, ret, clearok;
2341 uint32_t recover_done_time = 0;
2342 struct timespec mytime;
2343 static time_t prevsec = 0;
2345 cred = newnfs_getcred();
2347 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2350 newnfs_setroot(cred);
2352 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2354 * Only allow one recover within 1/2 of the lease
2355 * duration (nfsc_renew).
2357 if (recover_done_time < NFSD_MONOSEC) {
2358 recover_done_time = NFSD_MONOSEC +
2360 nfscl_recover(clp, cred, p);
2363 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2367 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2368 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2369 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2370 clidrev = clp->nfsc_clientidrev;
2371 error = nfsrpc_renew(clp, cred, p);
2372 if (error == NFSERR_CBPATHDOWN)
2374 else if (error == NFSERR_STALECLIENTID) {
2376 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2378 } else if (error == NFSERR_EXPIRED)
2379 (void) nfscl_hasexpired(clp, clidrev, p);
2386 /* It's a Total Recall! */
2387 nfscl_totalrecall(clp);
2390 * Now, handle defunct owners.
2392 owp = LIST_FIRST(&clp->nfsc_owner);
2393 while (owp != NULL) {
2394 nowp = LIST_NEXT(owp, nfsow_list);
2395 if (LIST_EMPTY(&owp->nfsow_open)) {
2396 if (owp->nfsow_defunct)
2397 nfscl_freeopenowner(owp, 0);
2399 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2400 lp = LIST_FIRST(&op->nfso_lock);
2401 while (lp != NULL) {
2402 nlp = LIST_NEXT(lp, nfsl_list);
2403 if (lp->nfsl_defunct &&
2404 LIST_EMPTY(&lp->nfsl_lock)) {
2405 LIST_FOREACH(olp, &lh, nfsl_list) {
2406 if (!NFSBCMP(olp->nfsl_owner,
2407 lp->nfsl_owner,NFSV4CL_LOCKNAMELEN))
2411 LIST_REMOVE(lp, nfsl_list);
2412 LIST_INSERT_HEAD(&lh, lp, nfsl_list);
2414 nfscl_freelockowner(lp, 0);
2424 /* and release defunct lock owners */
2425 LIST_FOREACH_SAFE(lp, &lh, nfsl_list, nlp) {
2426 nfscl_freelockowner(lp, 0);
2430 * Do the recall on any delegations. To avoid trouble, always
2431 * come back up here after having slept.
2435 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2436 while (dp != NULL) {
2437 ndp = TAILQ_NEXT(dp, nfsdl_list);
2438 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2440 * Wait for outstanding I/O ops to be done.
2442 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2444 nfsv4_unlock(&clp->nfsc_lock, 0);
2447 dp->nfsdl_rwlock.nfslock_lock |=
2449 (void) nfsmsleep(&dp->nfsdl_rwlock,
2450 NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2455 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2456 &islept, NFSCLSTATEMUTEXPTR, NULL);
2461 newnfs_copycred(&dp->nfsdl_cred, cred);
2462 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2465 nfscl_cleandeleg(dp);
2466 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2468 LIST_REMOVE(dp, nfsdl_hash);
2469 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2471 newnfsstats.cldelegates--;
2479 * Clear out old delegations, if we are above the high water
2480 * mark. Only clear out ones with no state related to them.
2481 * The tailq list is in LRU order.
2483 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2484 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2485 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2486 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2487 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2488 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2489 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2490 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2492 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2493 op = LIST_FIRST(&owp->nfsow_open);
2500 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2501 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2508 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2509 LIST_REMOVE(dp, nfsdl_hash);
2510 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2512 newnfsstats.cldelegates--;
2518 nfsv4_unlock(&clp->nfsc_lock, 0);
2522 * Delegreturn any delegations cleaned out or recalled.
2524 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2525 newnfs_copycred(&dp->nfsdl_cred, cred);
2526 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2527 TAILQ_REMOVE(&dh, dp, nfsdl_list);
2528 FREE((caddr_t)dp, M_NFSCLDELEG);
2532 * Call nfscl_cleanupkext() once per second to check for
2533 * open/lock owners where the process has exited.
2535 NFSGETNANOTIME(&mytime);
2536 if (prevsec != mytime.tv_sec) {
2537 prevsec = mytime.tv_sec;
2538 nfscl_cleanupkext(clp);
2542 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2543 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
2545 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2546 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2549 wakeup((caddr_t)clp);
2557 * Initiate state recovery. Called when NFSERR_STALECLIENTID or
2558 * NFSERR_STALESTATEID is received.
2561 nfscl_initiate_recovery(struct nfsclclient *clp)
2567 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2569 wakeup((caddr_t)clp);
2573 * Dump out the state stuff for debugging.
2576 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2577 int lockowner, int locks)
2579 struct nfsclclient *clp;
2580 struct nfsclowner *owp;
2581 struct nfsclopen *op;
2582 struct nfscllockowner *lp;
2583 struct nfscllock *lop;
2584 struct nfscldeleg *dp;
2588 printf("nfscl dumpstate NULL clp\n");
2592 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2593 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2594 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2595 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2596 owp->nfsow_owner[0], owp->nfsow_owner[1],
2597 owp->nfsow_owner[2], owp->nfsow_owner[3],
2599 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2601 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2602 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2603 op->nfso_stateid.other[2], op->nfso_opencnt,
2605 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2607 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2608 lp->nfsl_owner[0], lp->nfsl_owner[1],
2609 lp->nfsl_owner[2], lp->nfsl_owner[3],
2611 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2612 lp->nfsl_stateid.other[2]);
2613 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2616 printf("lck typ=%d fst=%ju end=%ju\n",
2617 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2618 (intmax_t)lop->nfslo_end);
2620 printf("lck typ=%d fst=%qd end=%qd\n",
2621 lop->nfslo_type, lop->nfslo_first,
2629 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2630 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2631 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2632 owp->nfsow_owner[0], owp->nfsow_owner[1],
2633 owp->nfsow_owner[2], owp->nfsow_owner[3],
2635 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2637 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2638 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2639 op->nfso_stateid.other[2], op->nfso_opencnt,
2641 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2643 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2644 lp->nfsl_owner[0], lp->nfsl_owner[1],
2645 lp->nfsl_owner[2], lp->nfsl_owner[3],
2647 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2648 lp->nfsl_stateid.other[2]);
2649 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2652 printf("lck typ=%d fst=%ju end=%ju\n",
2653 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2654 (intmax_t)lop->nfslo_end);
2656 printf("lck typ=%d fst=%qd end=%qd\n",
2657 lop->nfslo_type, lop->nfslo_first,
2668 * Check for duplicate open owners and opens.
2669 * (Only used as a diagnostic aid.)
2672 nfscl_dupopen(vnode_t vp, int dupopens)
2674 struct nfsclclient *clp;
2675 struct nfsclowner *owp, *owp2;
2676 struct nfsclopen *op, *op2;
2679 clp = VFSTONFS(vnode_mount(vp))->nm_clp;
2681 printf("nfscl dupopen NULL clp\n");
2684 nfhp = VTONFS(vp)->n_fhp;
2688 * First, search for duplicate owners.
2689 * These should never happen!
2691 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2692 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2694 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
2695 NFSV4CL_LOCKNAMELEN)) {
2697 printf("DUP OWNER\n");
2698 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
2705 * Now, search for duplicate stateids.
2706 * These shouldn't happen, either.
2708 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2709 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2710 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2711 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2713 (op->nfso_stateid.other[0] != 0 ||
2714 op->nfso_stateid.other[1] != 0 ||
2715 op->nfso_stateid.other[2] != 0) &&
2716 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
2717 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
2718 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
2720 printf("DUP STATEID\n");
2721 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
2731 * Now search for duplicate opens.
2732 * Duplicate opens for the same owner
2733 * should never occur. Other duplicates are
2734 * possible and are checked for if "dupopens"
2737 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2738 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2739 if (nfhp->nfh_len == op2->nfso_fhlen &&
2740 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
2741 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2742 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2743 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
2744 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
2745 (!NFSBCMP(op->nfso_own->nfsow_owner,
2746 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
2748 if (!NFSBCMP(op->nfso_own->nfsow_owner,
2749 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
2751 printf("BADDUP OPEN\n");
2754 printf("DUP OPEN\n");
2756 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
2769 * During close, find an open that needs to be dereferenced and
2770 * dereference it. If there are no more opens for this file,
2771 * log a message to that effect.
2772 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
2773 * on the file's vnode.
2774 * This is the safe way, since it is difficult to identify
2775 * which open the close is for and I/O can be performed after the
2776 * close(2) system call when a file is mmap'd.
2777 * If it returns 0 for success, there will be a referenced
2778 * clp returned via clpp.
2781 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
2783 struct nfsclclient *clp;
2784 struct nfsclowner *owp;
2785 struct nfsclopen *op;
2786 struct nfscldeleg *dp;
2790 error = nfscl_getcl(vp, NULL, NULL, &clp);
2795 nfhp = VTONFS(vp)->n_fhp;
2799 * First, look for one under a delegation that was locally issued
2800 * and just decrement the opencnt for it. Since all my Opens against
2801 * the server are DENY_NONE, I don't see a problem with hanging
2802 * onto them. (It is much easier to use one of the extant Opens
2803 * that I already have on the server when a Delegation is recalled
2804 * than to do fresh Opens.) Someday, I might need to rethink this, but.
2806 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2808 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2809 op = LIST_FIRST(&owp->nfsow_open);
2812 * Since a delegation is for a file, there
2813 * should never be more than one open for
2816 if (LIST_NEXT(op, nfso_list) != NULL)
2817 panic("nfscdeleg opens");
2818 if (notdecr && op->nfso_opencnt > 0) {
2827 /* Now process the opens against the server. */
2828 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2829 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2830 if (op->nfso_fhlen == nfhp->nfh_len &&
2831 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2833 /* Found an open, decrement cnt if possible */
2834 if (notdecr && op->nfso_opencnt > 0) {
2839 * There are more opens, so just return.
2841 if (op->nfso_opencnt > 0) {
2850 printf("nfscl: never fnd open\n");
2855 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
2857 struct nfsclclient *clp;
2858 struct nfsclowner *owp, *nowp;
2859 struct nfsclopen *op;
2860 struct nfscldeleg *dp;
2864 error = nfscl_getcl(vp, NULL, NULL, &clp);
2869 nfhp = VTONFS(vp)->n_fhp;
2872 * First get rid of the local Open structures, which should be no
2875 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
2877 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
2878 op = LIST_FIRST(&owp->nfsow_open);
2880 KASSERT((op->nfso_opencnt == 0),
2881 ("nfscl: bad open cnt on deleg"));
2882 nfscl_freeopen(op, 1);
2884 nfscl_freeopenowner(owp, 1);
2888 /* Now process the opens against the server. */
2890 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2891 op = LIST_FIRST(&owp->nfsow_open);
2892 while (op != NULL) {
2893 if (op->nfso_fhlen == nfhp->nfh_len &&
2894 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
2896 /* Found an open, close it. */
2897 KASSERT((op->nfso_opencnt == 0),
2898 ("nfscl: bad open cnt on server"));
2900 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
2905 op = LIST_NEXT(op, nfso_list);
2913 * Return all delegations on this client.
2914 * (Must be called with client sleep lock.)
2917 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
2919 struct nfscldeleg *dp, *ndp;
2922 cred = newnfs_getcred();
2923 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
2924 nfscl_cleandeleg(dp);
2925 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2926 nfscl_freedeleg(&clp->nfsc_deleg, dp);
2932 * Do a callback RPC.
2935 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
2939 struct nfsclclient *clp;
2940 struct nfscldeleg *dp = NULL;
2941 int numops, taglen = -1, error = 0, trunc, ret = 0;
2942 u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp, cbident;
2943 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
2949 nfsattrbit_t attrbits, rattrbits;
2950 nfsv4stateid_t stateid;
2953 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
2954 taglen = fxdr_unsigned(int, *tl);
2959 if (taglen <= NFSV4_SMALLSTR)
2962 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
2963 error = nfsrv_mtostr(nd, tagstr, taglen);
2965 if (taglen > NFSV4_SMALLSTR)
2966 free(tagstr, M_TEMP);
2970 (void) nfsm_strtom(nd, tag, taglen);
2971 if (taglen > NFSV4_SMALLSTR) {
2972 free(tagstr, M_TEMP);
2974 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
2975 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2976 minorvers = fxdr_unsigned(u_int32_t, *tl++);
2977 if (minorvers != NFSV4_MINORVERSION)
2978 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
2979 cbident = fxdr_unsigned(u_int32_t, *tl++);
2983 numops = fxdr_unsigned(int, *tl);
2985 * Loop around doing the sub ops.
2987 for (i = 0; i < numops; i++) {
2988 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
2989 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
2991 op = fxdr_unsigned(int, *tl);
2992 if (op < NFSV4OP_CBGETATTR || op > NFSV4OP_CBRECALL) {
2993 nd->nd_repstat = NFSERR_OPILLEGAL;
2994 *repp = nfscl_errmap(nd);
2998 nd->nd_procnum = op;
2999 newnfsstats.cbrpccnt[nd->nd_procnum]++;
3001 case NFSV4OP_CBGETATTR:
3003 error = nfsm_getfh(nd, &nfhp);
3005 error = nfsrv_getattrbits(nd, &attrbits,
3008 mp = nfscl_getmnt(cbident);
3010 error = NFSERR_SERVERFAULT;
3015 clp = nfscl_findcl(VFSTONFS(mp));
3017 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3021 error = NFSERR_SERVERFAULT;
3024 ret = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3025 nfhp->nfh_len, p, &np);
3030 FREE((caddr_t)nfhp, M_NFSFH);
3032 NFSZERO_ATTRBIT(&rattrbits);
3033 if (NFSISSET_ATTRBIT(&attrbits,
3036 va.va_size = np->n_size;
3038 va.va_size = dp->nfsdl_size;
3039 NFSSETBIT_ATTRBIT(&rattrbits,
3042 if (NFSISSET_ATTRBIT(&attrbits,
3043 NFSATTRBIT_CHANGE)) {
3044 va.va_filerev = dp->nfsdl_change;
3045 if (ret || (np->n_flag & NDELEGMOD))
3047 NFSSETBIT_ATTRBIT(&rattrbits,
3050 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3051 NULL, 0, &rattrbits, NULL, NULL, 0, 0, 0, 0,
3057 case NFSV4OP_CBRECALL:
3059 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3061 stateid.seqid = *tl++;
3062 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3064 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3065 trunc = fxdr_unsigned(int, *tl);
3066 error = nfsm_getfh(nd, &nfhp);
3068 mp = nfscl_getmnt(cbident);
3070 error = NFSERR_SERVERFAULT;
3074 clp = nfscl_findcl(VFSTONFS(mp));
3076 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3078 if (dp != NULL && (dp->nfsdl_flags &
3079 NFSCLDL_DELEGRET) == 0) {
3082 wakeup((caddr_t)clp);
3085 error = NFSERR_SERVERFAULT;
3090 FREE((caddr_t)nfhp, M_NFSFH);
3094 if (error == EBADRPC || error == NFSERR_BADXDR) {
3095 nd->nd_repstat = NFSERR_BADXDR;
3097 nd->nd_repstat = error;
3102 if (nd->nd_repstat) {
3103 *repp = nfscl_errmap(nd);
3106 *repp = 0; /* NFS4_OK */
3110 if (error == EBADRPC || error == NFSERR_BADXDR)
3111 nd->nd_repstat = NFSERR_BADXDR;
3113 printf("nfsv4 comperr1=%d\n", error);
3116 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3120 *retopsp = txdr_unsigned(retops);
3122 *nd->nd_errp = nfscl_errmap(nd);
3126 * Generate the next cbident value. Basically just increment a static value
3127 * and then check that it isn't already in the list, if it has wrapped around.
3130 nfscl_nextcbident(void)
3132 struct nfsclclient *clp;
3134 static u_int32_t nextcbident = 0;
3135 static int haswrapped = 0;
3138 if (nextcbident == 0)
3142 * Search the clientid list for one already using this cbident.
3147 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3148 if (clp->nfsc_cbident == nextcbident) {
3158 return (nextcbident);
3162 * Get the mount point related to a given cbident.
3165 nfscl_getmnt(u_int32_t cbident)
3167 struct nfsclclient *clp;
3168 struct nfsmount *nmp;
3171 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3172 if (clp->nfsc_cbident == cbident)
3179 nmp = clp->nfsc_nmp;
3181 return (nmp->nm_mountp);
3185 * Search for a lock conflict locally on the client. A conflict occurs if
3186 * - not same owner and overlapping byte range and at least one of them is
3187 * a write lock or this is an unlock.
3190 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
3191 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
3192 struct nfscllock **lopp)
3194 struct nfsclowner *owp;
3195 struct nfsclopen *op;
3199 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3203 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3204 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3205 if (op->nfso_fhlen == fhlen &&
3206 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
3207 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
3218 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3219 u_int8_t *own, struct nfscllock **lopp)
3221 struct nfscllockowner *lp;
3222 struct nfscllock *lop;
3224 LIST_FOREACH(lp, lhp, nfsl_list) {
3225 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3226 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3227 if (lop->nfslo_first >= nlop->nfslo_end)
3229 if (lop->nfslo_end <= nlop->nfslo_first)
3231 if (lop->nfslo_type == F_WRLCK ||
3232 nlop->nfslo_type == F_WRLCK ||
3233 nlop->nfslo_type == F_UNLCK) {
3236 return (NFSERR_DENIED);
3245 * Check for a local conflicting lock.
3248 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3249 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
3251 struct nfscllock *lop, nlck;
3252 struct nfscldeleg *dp;
3254 u_int8_t own[NFSV4CL_LOCKNAMELEN];
3257 nlck.nfslo_type = fl->l_type;
3258 nlck.nfslo_first = off;
3259 if (len == NFS64BITSSET) {
3260 nlck.nfslo_end = NFS64BITSSET;
3262 nlck.nfslo_end = off + len;
3263 if (nlck.nfslo_end <= nlck.nfslo_first)
3264 return (NFSERR_INVAL);
3267 nfscl_filllockowner(id, own, flags);
3269 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3270 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
3271 &nlck, own, dp, &lop);
3273 fl->l_whence = SEEK_SET;
3274 fl->l_start = lop->nfslo_first;
3275 if (lop->nfslo_end == NFS64BITSSET)
3278 fl->l_len = lop->nfslo_end - lop->nfslo_first;
3279 fl->l_pid = (pid_t)0;
3280 fl->l_type = lop->nfslo_type;
3281 error = -1; /* no RPC required */
3282 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3283 fl->l_type == F_RDLCK)) {
3285 * The delegation ensures that there isn't a conflicting
3286 * lock on the server, so return -1 to indicate an RPC
3289 fl->l_type = F_UNLCK;
3297 * Handle Recall of a delegation.
3298 * The clp must be exclusive locked when this is called.
3301 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3302 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
3303 int called_from_renewthread)
3305 struct nfsclowner *owp, *lowp, *nowp;
3306 struct nfsclopen *op, *lop;
3307 struct nfscllockowner *lp;
3308 struct nfscllock *lckp;
3310 int error = 0, ret, gotvp = 0;
3314 * First, get a vnode for the file. This is needed to do RPCs.
3316 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3317 dp->nfsdl_fhlen, p, &np);
3320 * File isn't open, so nothing to move over to the
3330 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3333 * Ok, if it's a write delegation, flush data to the server, so
3334 * that close/open consistency is retained.
3338 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3339 np->n_flag |= NDELEGRECALL;
3341 ret = ncl_flush(vp, MNT_WAIT, cred, p, 1,
3342 called_from_renewthread);
3344 np->n_flag &= ~NDELEGRECALL;
3346 NFSINVALATTRCACHE(np);
3348 if (ret == EIO && called_from_renewthread != 0) {
3350 * If the flush failed with EIO for the renew thread,
3351 * return now, so that the dirty buffer will be flushed
3360 * Now, for each openowner with opens issued locally, move them
3361 * over to state against the server.
3363 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3364 lop = LIST_FIRST(&lowp->nfsow_open);
3366 if (LIST_NEXT(lop, nfso_list) != NULL)
3367 panic("nfsdlg mult opens");
3369 * Look for the same openowner against the server.
3371 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3372 if (!NFSBCMP(lowp->nfsow_owner,
3373 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3374 newnfs_copycred(&dp->nfsdl_cred, cred);
3375 ret = nfscl_moveopen(vp, clp, nmp, lop,
3377 if (ret == NFSERR_STALECLIENTID ||
3378 ret == NFSERR_STALEDONTRECOVER) {
3384 nfscl_freeopen(lop, 1);
3393 * If no openowner found, create one and get an open
3397 MALLOC(nowp, struct nfsclowner *,
3398 sizeof (struct nfsclowner), M_NFSCLOWNER,
3400 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
3401 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
3402 dp->nfsdl_fhlen, NULL);
3403 newnfs_copycred(&dp->nfsdl_cred, cred);
3404 ret = nfscl_moveopen(vp, clp, nmp, lop,
3407 nfscl_freeopenowner(owp, 0);
3408 if (ret == NFSERR_STALECLIENTID ||
3409 ret == NFSERR_STALEDONTRECOVER) {
3415 nfscl_freeopen(lop, 1);
3425 * Now, get byte range locks for any locks done locally.
3427 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3428 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
3429 newnfs_copycred(&dp->nfsdl_cred, cred);
3430 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
3431 if (ret == NFSERR_STALESTATEID ||
3432 ret == NFSERR_STALEDONTRECOVER ||
3433 ret == NFSERR_STALECLIENTID) {
3448 * Move a locally issued open over to an owner on the state list.
3449 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
3450 * returns with it unlocked.
3453 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3454 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
3455 struct ucred *cred, NFSPROC_T *p)
3457 struct nfsclopen *op, *nop;
3458 struct nfscldeleg *ndp;
3460 int error = 0, newone;
3463 * First, look for an appropriate open, If found, just increment the
3466 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3467 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
3468 op->nfso_fhlen == lop->nfso_fhlen &&
3469 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
3470 op->nfso_opencnt += lop->nfso_opencnt;
3471 nfscl_freeopen(lop, 1);
3476 /* No appropriate open, so we have to do one against the server. */
3478 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
3479 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
3481 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
3482 lop->nfso_fh, lop->nfso_fhlen, &newone);
3484 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
3485 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
3486 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
3489 nfscl_freeopen(op, 0);
3492 newnfs_copyincred(cred, &op->nfso_cred);
3493 op->nfso_mode |= lop->nfso_mode;
3494 op->nfso_opencnt += lop->nfso_opencnt;
3495 nfscl_freeopen(lop, 1);
3498 FREE((caddr_t)nop, M_NFSCLOPEN);
3501 * What should I do with the returned delegation, since the
3502 * delegation is being recalled? For now, just printf and
3505 printf("Moveopen returned deleg\n");
3506 FREE((caddr_t)ndp, M_NFSCLDELEG);
3512 * Recall all delegations on this client.
3515 nfscl_totalrecall(struct nfsclclient *clp)
3517 struct nfscldeleg *dp;
3519 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
3520 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
3521 dp->nfsdl_flags |= NFSCLDL_RECALL;
3526 * Relock byte ranges. Called for delegation recall and state expiry.
3529 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
3530 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
3533 struct nfscllockowner *nlp;
3536 u_int32_t clidrev = 0;
3537 int error, newone, donelocally;
3539 off = lop->nfslo_first;
3540 len = lop->nfslo_end - lop->nfslo_first;
3541 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
3542 clp, 1, NULL, 0, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone,
3544 if (error || donelocally)
3546 if (nmp->nm_clp != NULL)
3547 clidrev = nmp->nm_clp->nfsc_clientidrev;
3550 nfhp = VTONFS(vp)->n_fhp;
3551 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
3552 nfhp->nfh_len, nlp, newone, 0, off,
3553 len, lop->nfslo_type, cred, p);
3555 nfscl_freelockowner(nlp, 0);
3560 * Called to re-open a file. Basically get a vnode for the file handle
3561 * and then call nfsrpc_openrpc() to do the rest.
3564 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
3565 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
3566 struct ucred *cred, NFSPROC_T *p)
3572 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
3576 if (np->n_v4 != NULL) {
3577 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
3578 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
3579 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
3589 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
3590 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
3594 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
3595 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
3596 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
3597 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
3602 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
3603 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
3605 if (error == NFSERR_DELAY)
3606 (void) nfs_catnap(PZERO, error, "nfstryop");
3607 } while (error == NFSERR_DELAY);
3608 if (error == EAUTH || error == EACCES) {
3609 /* Try again using system credentials */
3610 newnfs_setroot(cred);
3612 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
3613 newfhlen, mode, op, name, namelen, ndpp, reclaim,
3614 delegtype, cred, p, 1, 0);
3615 if (error == NFSERR_DELAY)
3616 (void) nfs_catnap(PZERO, error, "nfstryop");
3617 } while (error == NFSERR_DELAY);
3623 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
3624 * NFSERR_DELAY. Also, retry with system credentials, if the provided
3628 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
3629 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
3630 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
3632 struct nfsrv_descript nfsd, *nd = &nfsd;
3636 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
3637 reclaim, off, len, type, cred, p, 0);
3638 if (!error && nd->nd_repstat == NFSERR_DELAY)
3639 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
3641 } while (!error && nd->nd_repstat == NFSERR_DELAY);
3643 error = nd->nd_repstat;
3644 if (error == EAUTH || error == EACCES) {
3645 /* Try again using root credentials */
3646 newnfs_setroot(cred);
3648 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
3649 newone, reclaim, off, len, type, cred, p, 1);
3650 if (!error && nd->nd_repstat == NFSERR_DELAY)
3651 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
3653 } while (!error && nd->nd_repstat == NFSERR_DELAY);
3655 error = nd->nd_repstat;
3661 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
3662 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3666 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
3667 struct nfsmount *nmp, NFSPROC_T *p)
3672 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
3673 if (error == NFSERR_DELAY)
3674 (void) nfs_catnap(PZERO, error, "nfstrydp");
3675 } while (error == NFSERR_DELAY);
3676 if (error == EAUTH || error == EACCES) {
3677 /* Try again using system credentials */
3678 newnfs_setroot(cred);
3680 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
3681 if (error == NFSERR_DELAY)
3682 (void) nfs_catnap(PZERO, error, "nfstrydp");
3683 } while (error == NFSERR_DELAY);
3689 * Try a close against the server. Just call nfsrpc_closerpc(),
3690 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
3694 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
3695 struct nfsmount *nmp, NFSPROC_T *p)
3697 struct nfsrv_descript nfsd, *nd = &nfsd;
3701 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
3702 if (error == NFSERR_DELAY)
3703 (void) nfs_catnap(PZERO, error, "nfstrycl");
3704 } while (error == NFSERR_DELAY);
3705 if (error == EAUTH || error == EACCES) {
3706 /* Try again using system credentials */
3707 newnfs_setroot(cred);
3709 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
3710 if (error == NFSERR_DELAY)
3711 (void) nfs_catnap(PZERO, error, "nfstrycl");
3712 } while (error == NFSERR_DELAY);
3718 * Decide if a delegation on a file permits close without flushing writes
3719 * to the server. This might be a big performance win in some environments.
3720 * (Not useful until the client does caching on local stable storage.)
3723 nfscl_mustflush(vnode_t vp)
3725 struct nfsclclient *clp;
3726 struct nfscldeleg *dp;
3728 struct nfsmount *nmp;
3731 nmp = VFSTONFS(vnode_mount(vp));
3732 if (!NFSHASNFSV4(nmp))
3735 clp = nfscl_findcl(nmp);
3740 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3741 if (dp != NULL && (dp->nfsdl_flags &
3742 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
3744 (dp->nfsdl_sizelimit >= np->n_size ||
3745 !NFSHASSTRICT3530(nmp))) {
3754 * See if a (write) delegation exists for this file.
3757 nfscl_nodeleg(vnode_t vp, int writedeleg)
3759 struct nfsclclient *clp;
3760 struct nfscldeleg *dp;
3762 struct nfsmount *nmp;
3765 nmp = VFSTONFS(vnode_mount(vp));
3766 if (!NFSHASNFSV4(nmp))
3769 clp = nfscl_findcl(nmp);
3774 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3776 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
3777 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
3787 * Look for an associated delegation that should be DelegReturned.
3790 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
3792 struct nfsclclient *clp;
3793 struct nfscldeleg *dp;
3794 struct nfsclowner *owp;
3795 struct nfscllockowner *lp;
3796 struct nfsmount *nmp;
3799 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3801 nmp = VFSTONFS(vnode_mount(vp));
3805 * Loop around waiting for:
3806 * - outstanding I/O operations on delegations to complete
3807 * - for a delegation on vp that has state, lock the client and
3809 * - return delegation with no state
3812 clp = nfscl_findcl(nmp);
3817 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3818 np->n_fhp->nfh_len);
3821 * Wait for outstanding I/O ops to be done.
3823 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3825 nfsv4_unlock(&clp->nfsc_lock, 0);
3828 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3829 (void) nfsmsleep(&dp->nfsdl_rwlock,
3830 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3834 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3835 if (!LIST_EMPTY(&owp->nfsow_open)) {
3841 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3842 if (!LIST_EMPTY(&lp->nfsl_lock)) {
3848 if (needsrecall && !triedrecall) {
3849 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
3852 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
3853 &islept, NFSCLSTATEMUTEXPTR, NULL);
3860 cred = newnfs_getcred();
3861 newnfs_copycred(&dp->nfsdl_cred, cred);
3862 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0);
3866 nfsv4_unlock(&clp->nfsc_lock, 0);
3870 *stp = dp->nfsdl_stateid;
3872 nfscl_cleandeleg(dp);
3873 nfscl_freedeleg(&clp->nfsc_deleg, dp);
3876 nfsv4_unlock(&clp->nfsc_lock, 0);
3883 * Look for associated delegation(s) that should be DelegReturned.
3886 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
3887 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
3889 struct nfsclclient *clp;
3890 struct nfscldeleg *dp;
3891 struct nfsclowner *owp;
3892 struct nfscllockowner *lp;
3893 struct nfsmount *nmp;
3896 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
3898 nmp = VFSTONFS(vnode_mount(fvp));
3903 * Loop around waiting for:
3904 * - outstanding I/O operations on delegations to complete
3905 * - for a delegation on fvp that has state, lock the client and
3907 * - return delegation(s) with no state.
3910 clp = nfscl_findcl(nmp);
3916 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3917 np->n_fhp->nfh_len);
3918 if (dp != NULL && *gotfdp == 0) {
3920 * Wait for outstanding I/O ops to be done.
3922 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3924 nfsv4_unlock(&clp->nfsc_lock, 0);
3927 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3928 (void) nfsmsleep(&dp->nfsdl_rwlock,
3929 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3933 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3934 if (!LIST_EMPTY(&owp->nfsow_open)) {
3940 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
3941 if (!LIST_EMPTY(&lp->nfsl_lock)) {
3947 if (needsrecall && !triedrecall) {
3948 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
3951 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
3952 &islept, NFSCLSTATEMUTEXPTR, NULL);
3959 cred = newnfs_getcred();
3960 newnfs_copycred(&dp->nfsdl_cred, cred);
3961 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0);
3965 nfsv4_unlock(&clp->nfsc_lock, 0);
3969 *fstp = dp->nfsdl_stateid;
3972 nfscl_cleandeleg(dp);
3973 nfscl_freedeleg(&clp->nfsc_deleg, dp);
3976 nfsv4_unlock(&clp->nfsc_lock, 0);
3981 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3982 np->n_fhp->nfh_len);
3983 if (dp != NULL && *gottdp == 0) {
3985 * Wait for outstanding I/O ops to be done.
3987 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
3988 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
3989 (void) nfsmsleep(&dp->nfsdl_rwlock,
3990 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
3993 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3994 if (!LIST_EMPTY(&owp->nfsow_open)) {
3999 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4000 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4005 *tstp = dp->nfsdl_stateid;
4008 nfscl_cleandeleg(dp);
4009 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4018 * Get a reference on the clientid associated with the mount point.
4019 * Return 1 if success, 0 otherwise.
4022 nfscl_getref(struct nfsmount *nmp)
4024 struct nfsclclient *clp;
4027 clp = nfscl_findcl(nmp);
4032 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL);
4038 * Release a reference on a clientid acquired with the above call.
4041 nfscl_relref(struct nfsmount *nmp)
4043 struct nfsclclient *clp;
4046 clp = nfscl_findcl(nmp);
4051 nfsv4_relref(&clp->nfsc_lock);
4056 * Save the size attribute in the delegation, since the nfsnode
4060 nfscl_reclaimnode(vnode_t vp)
4062 struct nfsclclient *clp;
4063 struct nfscldeleg *dp;
4064 struct nfsnode *np = VTONFS(vp);
4065 struct nfsmount *nmp;
4067 nmp = VFSTONFS(vnode_mount(vp));
4068 if (!NFSHASNFSV4(nmp))
4071 clp = nfscl_findcl(nmp);
4076 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4077 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4078 dp->nfsdl_size = np->n_size;
4083 * Get the saved size attribute in the delegation, since it is a
4084 * newly allocated nfsnode.
4087 nfscl_newnode(vnode_t vp)
4089 struct nfsclclient *clp;
4090 struct nfscldeleg *dp;
4091 struct nfsnode *np = VTONFS(vp);
4092 struct nfsmount *nmp;
4094 nmp = VFSTONFS(vnode_mount(vp));
4095 if (!NFSHASNFSV4(nmp))
4098 clp = nfscl_findcl(nmp);
4103 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4104 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4105 np->n_size = dp->nfsdl_size;
4110 * If there is a valid write delegation for this file, set the modtime
4111 * to the local clock time.
4114 nfscl_delegmodtime(vnode_t vp)
4116 struct nfsclclient *clp;
4117 struct nfscldeleg *dp;
4118 struct nfsnode *np = VTONFS(vp);
4119 struct nfsmount *nmp;
4121 nmp = VFSTONFS(vnode_mount(vp));
4122 if (!NFSHASNFSV4(nmp))
4125 clp = nfscl_findcl(nmp);
4130 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4131 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4132 NFSGETNANOTIME(&dp->nfsdl_modtime);
4133 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4139 * If there is a valid write delegation for this file with a modtime set,
4140 * put that modtime in mtime.
4143 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4145 struct nfsclclient *clp;
4146 struct nfscldeleg *dp;
4147 struct nfsnode *np = VTONFS(vp);
4148 struct nfsmount *nmp;
4150 nmp = VFSTONFS(vnode_mount(vp));
4151 if (!NFSHASNFSV4(nmp))
4154 clp = nfscl_findcl(nmp);
4159 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4161 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4162 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4163 *mtime = dp->nfsdl_modtime;
4168 nfscl_errmap(struct nfsrv_descript *nd)
4170 short *defaulterrp, *errp;
4172 if (!nd->nd_repstat)
4174 if (nd->nd_procnum == NFSPROC_NOOP)
4175 return (txdr_unsigned(nd->nd_repstat & 0xffff));
4176 if (nd->nd_repstat == EBADRPC)
4177 return (txdr_unsigned(NFSERR_BADXDR));
4178 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4179 nd->nd_repstat == NFSERR_OPILLEGAL)
4180 return (txdr_unsigned(nd->nd_repstat));
4181 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4183 if (*errp == (short)nd->nd_repstat)
4184 return (txdr_unsigned(nd->nd_repstat));
4185 return (txdr_unsigned(*defaulterrp));