2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2009 Rick Macklem, University of Guelph
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
32 * These functions implement the client side state handling for NFSv4.
33 * NFSv4 state handling:
34 * - A lockowner is used to determine lock contention, so it
35 * corresponds directly to a Posix pid. (1 to 1 mapping)
36 * - The correct granularity of an OpenOwner is not nearly so
37 * obvious. An OpenOwner does the following:
38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39 * - is used to check for Open/Share contention (not applicable to
40 * this client, since all Opens are Deny_None)
41 * As such, I considered both extreme.
42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
43 * all Open, Close and Lock (with a new lockowner) Ops.
44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
45 * every Open, for most servers.
46 * So, I chose to use the same mapping as I did for LockOwnwers.
47 * The main concern here is that you can end up with multiple Opens
48 * for the same File Handle, but on different OpenOwners (opens
49 * inherited from parents, grandparents...) and you do not know
50 * which of these the vnodeop close applies to. This is handled by
51 * delaying the Close Op(s) until all of the Opens have been closed.
52 * (It is not yet obvious if this is the correct granularity.)
53 * - How the code handles serialization:
54 * - For the ClientId, it uses an exclusive lock while getting its
55 * SetClientId and during recovery. Otherwise, it uses a shared
56 * lock via a reference count.
57 * - For the rest of the data structures, it uses an SMP mutex
58 * (once the nfs client is SMP safe) and doesn't sleep while
59 * manipulating the linked lists.
60 * - The serialization of Open/Close/Lock/LockU falls out in the
61 * "wash", since OpenOwners and LockOwners are both mapped from
62 * Posix pid. In other words, there is only one Posix pid using
63 * any given owner, so that owner is serialized. (If you change
64 * the granularity of the OpenOwner, then code must be added to
65 * serialize Ops on the OpenOwner.)
66 * - When to get rid of OpenOwners and LockOwners.
67 * - The function nfscl_cleanup_common() is executed after a process exits.
68 * It goes through the client list looking for all Open and Lock Owners.
69 * When one is found, it is marked "defunct" or in the case of
70 * an OpenOwner without any Opens, freed.
71 * The renew thread scans for defunct Owners and gets rid of them,
72 * if it can. The LockOwners will also be deleted when the
73 * associated Open is closed.
74 * - If the LockU or Close Op(s) fail during close in a way
75 * that could be recovered upon retry, they are relinked to the
76 * ClientId's defunct open list and retried by the renew thread
77 * until they succeed or an unmount/recovery occurs.
78 * (Since we are done with them, they do not need to be recovered.)
81 #include <fs/nfs/nfsport.h>
86 extern struct nfsstatsv1 nfsstatsv1;
87 extern struct nfsreqhead nfsd_reqq;
88 extern u_int32_t newnfs_false, newnfs_true;
89 extern int nfscl_debuglevel;
90 extern int nfscl_enablecallb;
91 extern int nfs_numnfscbd;
95 struct nfsclhead nfsclhead; /* Head of clientid list */
96 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
97 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER;
99 static int nfscl_delegcnt = 0;
100 static int nfscl_layoutcnt = 0;
101 static int nfscl_getopen(struct nfsclownerhead *, struct nfsclopenhash *,
102 u_int8_t *, int, u_int8_t *, u_int8_t *, u_int32_t,
103 struct nfscllockowner **, struct nfsclopen **);
104 static bool nfscl_checkown(struct nfsclowner *, struct nfsclopen *, uint8_t *,
105 uint8_t *, struct nfscllockowner **, struct nfsclopen **,
106 struct nfsclopen **);
107 static void nfscl_clrelease(struct nfsclclient *);
108 static void nfscl_unlinkopen(struct nfsclopen *);
109 static void nfscl_cleanclient(struct nfsclclient *);
110 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
111 struct ucred *, NFSPROC_T *);
112 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
113 struct nfsmount *, struct ucred *, NFSPROC_T *);
114 static void nfscl_recover(struct nfsclclient *, bool *, struct ucred *,
116 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
117 struct nfscllock *, int);
118 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
119 struct nfscllock **, int);
120 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *,
121 struct nfscldeleghead *);
122 static u_int32_t nfscl_nextcbident(void);
123 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
124 static struct nfsclclient *nfscl_getclnt(u_int32_t);
125 static struct nfsclclient *nfscl_getclntsess(uint8_t *);
126 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
128 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
129 int, struct nfsclrecalllayout **, struct nfscllayout **);
130 static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
131 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
133 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
134 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
135 u_int8_t *, struct nfscllock **);
136 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
137 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
138 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
139 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
140 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
141 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
142 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
143 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
144 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
145 static void nfscl_totalrecall(struct nfsclclient *);
146 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
147 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
148 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
149 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
150 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
151 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
152 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
153 struct ucred *, NFSPROC_T *);
154 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
155 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
156 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *,
158 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
159 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
160 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
161 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int,
163 static void nfscl_freeopenowner(struct nfsclowner *, int);
164 static void nfscl_cleandeleg(struct nfscldeleg *);
165 static void nfscl_emptylockowner(struct nfscllockowner *,
166 struct nfscllockownerfhhead *);
167 static void nfscl_mergeflayouts(struct nfsclflayouthead *,
168 struct nfsclflayouthead *);
169 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
170 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *);
171 static int nfscl_seq(uint32_t, uint32_t);
172 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
173 struct ucred *, NFSPROC_T *);
174 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
175 struct ucred *, NFSPROC_T *);
177 static short nfscberr_null[] = {
182 static short nfscberr_getattr[] = {
191 static short nfscberr_recall[] = {
201 static short *nfscl_cberrmap[] = {
209 #define NETFAMILY(clp) \
210 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
213 * Called for an open operation.
214 * If the nfhp argument is NULL, just get an openowner.
217 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
218 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
219 struct nfsclopen **opp, int *newonep, int *retp, int lockit, bool firstref)
221 struct nfsclclient *clp;
222 struct nfsclowner *owp, *nowp;
223 struct nfsclopen *op = NULL, *nop = NULL;
224 struct nfscldeleg *dp;
225 struct nfsclownerhead *ohp;
226 u_int8_t own[NFSV4CL_LOCKNAMELEN];
237 * Might need one or both of these, so MALLOC them now, to
238 * avoid a tsleep() in MALLOC later.
240 nowp = malloc(sizeof (struct nfsclowner),
241 M_NFSCLOWNER, M_WAITOK);
243 nop = malloc(sizeof (struct nfsclopen) +
244 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
245 nop->nfso_hash.le_prev = NULL;
247 ret = nfscl_getcl(vp->v_mount, cred, p, false, firstref, &clp);
249 free(nowp, M_NFSCLOWNER);
251 free(nop, M_NFSCLOPEN);
256 * Get the Open iff it already exists.
257 * If none found, add the new one or return error, depending upon
262 /* First check the delegation list */
263 if (nfhp != NULL && usedeleg) {
264 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
265 if (dp->nfsdl_fhlen == fhlen &&
266 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
267 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
268 (dp->nfsdl_flags & NFSCLDL_WRITE))
276 /* For NFSv4.1/4.2 and this option, use a single open_owner. */
277 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
278 nfscl_filllockowner(NULL, own, F_POSIX);
280 nfscl_filllockowner(p->td_proc, own, F_POSIX);
282 ohp = &dp->nfsdl_owner;
284 ohp = &clp->nfsc_owner;
285 /* Now, search for an openowner */
286 LIST_FOREACH(owp, ohp, nfsow_list) {
287 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
292 * Create a new open, as required.
294 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
298 * Now, check the mode on the open and return the appropriate
302 if (nfhp != NULL && dp != NULL && nop == NULL)
303 /* new local open on delegation */
304 *retp = NFSCLOPEN_SETCRED;
306 *retp = NFSCLOPEN_OK;
308 if (op != NULL && (amode & ~(op->nfso_mode))) {
309 op->nfso_mode |= amode;
310 if (retp != NULL && dp == NULL)
311 *retp = NFSCLOPEN_DOOPEN;
315 * Serialize modifications to the open owner for multiple threads
316 * within the same process using a read/write sleep lock.
317 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
318 * by acquiring a shared lock. The close operations still use an
319 * exclusive lock for this case.
322 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) {
324 * Get a shared lock on the OpenOwner, but first
325 * wait for any pending exclusive lock, so that the
326 * exclusive locker gets priority.
328 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
329 NFSCLSTATEMUTEXPTR, NULL);
330 nfsv4_getref(&owp->nfsow_rwlock, NULL,
331 NFSCLSTATEMUTEXPTR, NULL);
333 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
337 free(nowp, M_NFSCLOWNER);
339 free(nop, M_NFSCLOPEN);
348 * Create a new open, as required.
351 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
352 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
353 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
354 struct ucred *cred, int *newonep)
356 struct nfsclowner *owp = *owpp, *nowp;
357 struct nfsclopen *op, *nop;
367 if (owp == NULL && nowp != NULL) {
368 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
369 LIST_INIT(&nowp->nfsow_open);
370 nowp->nfsow_clp = clp;
371 nowp->nfsow_seqid = 0;
372 nowp->nfsow_defunct = 0;
373 nfscl_lockinit(&nowp->nfsow_rwlock);
375 nfsstatsv1.cllocalopenowners++;
376 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
378 nfsstatsv1.clopenowners++;
379 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
387 /* If an fhp has been specified, create an Open as well. */
389 /* and look for the correct open, based upon FH */
390 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
391 if (op->nfso_fhlen == fhlen &&
392 !NFSBCMP(op->nfso_fh, fhp, fhlen))
395 if (op == NULL && nop != NULL) {
398 nop->nfso_opencnt = 0;
399 nop->nfso_posixlock = 1;
400 nop->nfso_fhlen = fhlen;
401 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
402 LIST_INIT(&nop->nfso_lock);
403 nop->nfso_stateid.seqid = 0;
404 nop->nfso_stateid.other[0] = 0;
405 nop->nfso_stateid.other[1] = 0;
406 nop->nfso_stateid.other[2] = 0;
407 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
408 newnfs_copyincred(cred, &nop->nfso_cred);
410 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
411 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
413 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
414 nfsstatsv1.cllocalopens++;
416 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, fhp, fhlen),
418 nfsstatsv1.clopens++;
420 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
432 * Called to find/add a delegation to a client.
435 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
436 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
438 struct nfscldeleg *dp = *dpp, *tdp;
439 struct nfsmount *nmp;
441 KASSERT(mp != NULL, ("nfscl_deleg: mp NULL"));
444 * First, if we have received a Read delegation for a file on a
445 * read/write file system, just return it, because they aren't
448 if (dp != NULL && !NFSMNT_RDONLY(mp) &&
449 (dp->nfsdl_flags & NFSCLDL_READ)) {
450 nfscl_trydelegreturn(dp, cred, nmp, p);
451 free(dp, M_NFSCLDELEG);
457 * Since a delegation might be added to the mount,
458 * set NFSMNTP_DELEGISSUED now. If a delegation already
459 * exagain ists, setting this flag is harmless.
462 nmp->nm_privflag |= NFSMNTP_DELEGISSUED;
465 /* Look for the correct deleg, based upon FH */
467 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
471 return (NFSERR_BADSTATEID);
474 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
475 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
477 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
478 nfsstatsv1.cldelegates++;
482 * Delegation already exists, what do we do if a new one??
485 printf("Deleg already exists!\n");
486 free(dp, M_NFSCLDELEG);
497 * Find a delegation for this file handle. Return NULL upon failure.
499 static struct nfscldeleg *
500 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
502 struct nfscldeleg *dp;
504 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
505 if (dp->nfsdl_fhlen == fhlen &&
506 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
513 * Get a stateid for an I/O operation. First, look for an open and iff
514 * found, return either a lockowner stateid or the open stateid.
515 * If no Open is found, just return error and the special stateid of all zeros.
518 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
519 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
522 struct nfsclclient *clp;
523 struct nfsclopen *op = NULL, *top;
524 struct nfsclopenhash *oph;
525 struct nfscllockowner *lp;
526 struct nfscldeleg *dp;
528 struct nfsmount *nmp;
529 u_int8_t own[NFSV4CL_LOCKNAMELEN], lockown[NFSV4CL_LOCKNAMELEN];
535 * Initially, just set the special stateid of all zeros.
536 * (Don't do this for a DS, since the special stateid can't be used.)
540 stateidp->other[0] = 0;
541 stateidp->other[1] = 0;
542 stateidp->other[2] = 0;
544 if (vp->v_type != VREG)
547 nmp = VFSTONFS(vp->v_mount);
550 * For "oneopenown" mounts, first check for a cached open in the
551 * NFS vnode, that can be used as a stateid. This can only be
552 * done if no delegations have been issued to the mount and no
553 * byte range file locking has been done for the file.
555 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp) && fords == 0) {
558 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0 &&
559 (np->n_flag & NMIGHTBELOCKED) == 0 &&
560 np->n_openstateid != NULL) {
563 np->n_openstateid->nfso_stateid.other[0];
565 np->n_openstateid->nfso_stateid.other[1];
567 np->n_openstateid->nfso_stateid.other[2];
577 clp = nfscl_findcl(nmp);
584 * Wait for recovery to complete.
586 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
587 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
588 PZERO, "nfsrecvr", NULL);
591 * First, look for a delegation.
593 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
594 if (dp->nfsdl_fhlen == fhlen &&
595 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
596 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
597 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
598 if (NFSHASNFSV4N(nmp))
602 dp->nfsdl_stateid.seqid;
603 stateidp->other[0] = dp->nfsdl_stateid.other[0];
604 stateidp->other[1] = dp->nfsdl_stateid.other[1];
605 stateidp->other[2] = dp->nfsdl_stateid.other[2];
606 if (!(np->n_flag & NDELEGRECALL)) {
607 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
609 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
611 dp->nfsdl_timestamp = NFSD_MONOSEC +
613 dp->nfsdl_rwlock.nfslock_usecnt++;
614 *lckpp = (void *)&dp->nfsdl_rwlock;
625 * If p != NULL, we want to search the parentage tree
626 * for a matching OpenOwner and use that.
628 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
629 nfscl_filllockowner(NULL, own, F_POSIX);
631 nfscl_filllockowner(p->td_proc, own, F_POSIX);
632 nfscl_filllockowner(p->td_proc, lockown, F_POSIX);
634 error = nfscl_getopen(NULL, clp->nfsc_openhash, nfhp, fhlen,
635 own, lockown, mode, &lp, &op);
636 if (error == 0 && lp != NULL && fords == 0) {
637 /* Don't return a lock stateid for a DS. */
638 if (NFSHASNFSV4N(nmp))
641 stateidp->seqid = lp->nfsl_stateid.seqid;
643 lp->nfsl_stateid.other[0];
645 lp->nfsl_stateid.other[1];
647 lp->nfsl_stateid.other[2];
653 /* If not found, just look for any OpenOwner that will work. */
656 oph = NFSCLOPENHASH(clp, nfhp, fhlen);
657 LIST_FOREACH(op, oph, nfso_hash) {
658 if (op->nfso_fhlen == fhlen &&
659 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
660 if (top == NULL && (op->nfso_mode &
661 NFSV4OPEN_ACCESSWRITE) != 0 &&
662 (mode & NFSV4OPEN_ACCESSREAD) != 0)
664 if ((mode & op->nfso_mode) == mode) {
665 /* LRU order the hash list. */
666 LIST_REMOVE(op, nfso_hash);
667 LIST_INSERT_HEAD(oph, op, nfso_hash);
674 NFSCL_DEBUG(2, "openmode top=%p\n", top);
675 if (top == NULL || NFSHASOPENMODE(nmp)) {
682 * For read aheads or write behinds, use the open cred.
683 * A read ahead or write behind is indicated by p == NULL.
686 newnfs_copycred(&op->nfso_cred, cred);
690 * No lock stateid, so return the open stateid.
692 if (NFSHASNFSV4N(nmp))
695 stateidp->seqid = op->nfso_stateid.seqid;
696 stateidp->other[0] = op->nfso_stateid.other[0];
697 stateidp->other[1] = op->nfso_stateid.other[1];
698 stateidp->other[2] = op->nfso_stateid.other[2];
704 * Search for a matching file, mode and, optionally, lockowner.
707 nfscl_getopen(struct nfsclownerhead *ohp, struct nfsclopenhash *ohashp,
708 u_int8_t *nfhp, int fhlen, u_int8_t *openown, u_int8_t *lockown,
709 u_int32_t mode, struct nfscllockowner **lpp, struct nfsclopen **opp)
711 struct nfsclowner *owp;
712 struct nfsclopen *op, *rop, *rop2;
713 struct nfsclopenhash *oph;
716 KASSERT(ohp == NULL || ohashp == NULL, ("nfscl_getopen: "
717 "only one of ohp and ohashp can be set"));
721 * rop will be set to the open to be returned. There are three
722 * variants of this, all for an open of the correct file:
723 * 1 - A match of lockown.
724 * 2 - A match of the openown, when no lockown match exists.
725 * 3 - A match for any open, if no openown or lockown match exists.
726 * Looking for #2 over #3 probably isn't necessary, but since
727 * RFC3530 is vague w.r.t. the relationship between openowners and
728 * lockowners, I think this is the safer way to go.
733 /* Search the client list */
734 if (ohashp == NULL) {
735 /* Search the local opens on the delegation. */
736 LIST_FOREACH(owp, ohp, nfsow_list) {
737 /* and look for the correct open */
738 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
739 if (op->nfso_fhlen == fhlen &&
740 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
741 && (op->nfso_mode & mode) == mode)
742 keep_looping = nfscl_checkown(owp, op, openown,
743 lockown, lpp, &rop, &rop2);
751 /* Search for matching opens on the hash list. */
752 oph = &ohashp[NFSCLOPENHASHFUNC(nfhp, fhlen)];
753 LIST_FOREACH(op, oph, nfso_hash) {
754 if (op->nfso_fhlen == fhlen &&
755 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
756 && (op->nfso_mode & mode) == mode)
757 keep_looping = nfscl_checkown(op->nfso_own, op,
758 openown, lockown, lpp, &rop, &rop2);
760 /* LRU order the hash list. */
761 LIST_REMOVE(op, nfso_hash);
762 LIST_INSERT_HEAD(oph, op, nfso_hash);
775 /* Check for an owner match. */
777 nfscl_checkown(struct nfsclowner *owp, struct nfsclopen *op, uint8_t *openown,
778 uint8_t *lockown, struct nfscllockowner **lpp, struct nfsclopen **ropp,
779 struct nfsclopen **ropp2)
781 struct nfscllockowner *lp;
786 /* Now look for a matching lockowner. */
787 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
788 if (!NFSBCMP(lp->nfsl_owner, lockown,
789 NFSV4CL_LOCKNAMELEN)) {
796 if (*ropp == NULL && !NFSBCMP(owp->nfsow_owner, openown,
797 NFSV4CL_LOCKNAMELEN)) {
800 keep_looping = false;
804 return (keep_looping);
808 * Release use of an open owner. Called when open operations are done
809 * with the open owner.
812 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
813 __unused int error, __unused int candelete, int unlocked)
820 if (NFSHASONEOPENOWN(nmp))
821 nfsv4_relref(&owp->nfsow_rwlock);
823 nfscl_lockunlock(&owp->nfsow_rwlock);
825 nfscl_clrelease(owp->nfsow_clp);
830 * Release use of an open structure under an open owner.
833 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
836 struct nfsclclient *clp;
837 struct nfsclowner *owp;
843 if (NFSHASONEOPENOWN(nmp))
844 nfsv4_relref(&owp->nfsow_rwlock);
846 nfscl_lockunlock(&owp->nfsow_rwlock);
847 clp = owp->nfsow_clp;
848 if (error && candelete && op->nfso_opencnt == 0)
849 nfscl_freeopen(op, 0, true);
850 nfscl_clrelease(clp);
855 * Called to get a clientid structure. It will optionally lock the
856 * client data structures to do the SetClientId/SetClientId_confirm,
857 * but will release that lock and return the clientid with a reference
859 * If the "cred" argument is NULL, a new clientid should not be created.
860 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
862 * It always clpp with a reference count on it, unless returning an error.
865 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
866 bool tryminvers, bool firstref, struct nfsclclient **clpp)
868 struct nfsclclient *clp;
869 struct nfsclclient *newclp = NULL;
870 struct nfsmount *nmp;
871 char uuid[HOSTUUIDLEN];
872 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
877 getcredhostuuid(cred, uuid, sizeof uuid);
878 idlen = strlen(uuid);
880 idlen += sizeof (u_int64_t);
882 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
884 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
889 * If a forced dismount is already in progress, don't
890 * allocate a new clientid and get out now. For the case where
891 * clp != NULL, this is a harmless optimization.
893 if (NFSCL_FORCEDISM(mp)) {
896 free(newclp, M_NFSCLCLIENT);
901 if (newclp == NULL) {
906 clp->nfsc_idlen = idlen;
907 LIST_INIT(&clp->nfsc_owner);
908 TAILQ_INIT(&clp->nfsc_deleg);
909 TAILQ_INIT(&clp->nfsc_layout);
910 LIST_INIT(&clp->nfsc_devinfo);
911 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
912 LIST_INIT(&clp->nfsc_deleghash[i]);
913 for (i = 0; i < NFSCLOPENHASHSIZE; i++)
914 LIST_INIT(&clp->nfsc_openhash[i]);
915 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
916 LIST_INIT(&clp->nfsc_layouthash[i]);
917 clp->nfsc_flags = NFSCLFLAGS_INITED;
918 clp->nfsc_clientidrev = 1;
919 clp->nfsc_cbident = nfscl_nextcbident();
920 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
922 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
927 free(newclp, M_NFSCLCLIENT);
929 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
930 !NFSCL_FORCEDISM(mp))
931 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
932 NFSCLSTATEMUTEXPTR, mp);
935 * Call nfsv4_lock() with "iwantlock == 0" on the firstref so
936 * that it will wait for a pending exclusive lock request.
937 * This gives the exclusive lock request priority over this
938 * shared lock request.
939 * An exclusive lock on nfsc_lock is used mainly for server
940 * crash recoveries and delegation recalls.
943 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR,
945 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
947 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
949 * Both nfsv4_lock() and nfsv4_getref() know to check
950 * for NFSCL_FORCEDISM() and return without sleeping to
951 * wait for the exclusive lock to be released, since it
952 * might be held by nfscl_umount() and we need to get out
953 * now for that case and not wait until nfscl_umount()
962 * If it needs a clientid, do the setclientid now.
964 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
966 panic("nfscl_clget");
967 if (p == NULL || cred == NULL) {
969 nfsv4_unlock(&clp->nfsc_lock, 0);
974 * If RFC3530 Sec. 14.2.33 is taken literally,
975 * NFSERR_CLIDINUSE will be returned persistently for the
976 * case where a new mount of the same file system is using
977 * a different principal. In practice, NFSERR_CLIDINUSE is
978 * only returned when there is outstanding unexpired state
979 * on the clientid. As such, try for twice the lease
980 * interval, if we know what that is. Otherwise, make a
982 * The case of returning NFSERR_STALECLIENTID is far less
983 * likely, but might occur if there is a significant delay
984 * between doing the SetClientID and SetClientIDConfirm Ops,
985 * such that the server throws away the clientid before
986 * receiving the SetClientIDConfirm.
988 if (clp->nfsc_renew > 0)
989 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
991 clidinusedelay = 120;
994 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
995 if (error == NFSERR_STALECLIENTID ||
996 error == NFSERR_STALEDONTRECOVER ||
997 error == NFSERR_BADSESSION ||
998 error == NFSERR_CLIDINUSE) {
999 (void) nfs_catnap(PZERO, error, "nfs_setcl");
1000 } else if (error == NFSERR_MINORVERMISMATCH &&
1002 if (nmp->nm_minorvers > 0)
1003 nmp->nm_minorvers--;
1007 } while (((error == NFSERR_STALECLIENTID ||
1008 error == NFSERR_BADSESSION ||
1009 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
1010 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0) ||
1011 (error == NFSERR_MINORVERMISMATCH && tryminvers));
1014 nfsv4_unlock(&clp->nfsc_lock, 0);
1018 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
1022 nfsv4_unlock(&clp->nfsc_lock, 1);
1031 * Get a reference to a clientid and return it, if valid.
1033 struct nfsclclient *
1034 nfscl_findcl(struct nfsmount *nmp)
1036 struct nfsclclient *clp;
1039 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
1045 * Release the clientid structure. It may be locked or reference counted.
1048 nfscl_clrelease(struct nfsclclient *clp)
1051 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1052 nfsv4_unlock(&clp->nfsc_lock, 0);
1054 nfsv4_relref(&clp->nfsc_lock);
1058 * External call for nfscl_clrelease.
1061 nfscl_clientrelease(struct nfsclclient *clp)
1065 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
1066 nfsv4_unlock(&clp->nfsc_lock, 0);
1068 nfsv4_relref(&clp->nfsc_lock);
1073 * Called when wanting to lock a byte region.
1076 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1077 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
1078 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
1079 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
1081 struct nfscllockowner *lp;
1082 struct nfsclopen *op;
1083 struct nfsclclient *clp;
1084 struct nfscllockowner *nlp;
1085 struct nfscllock *nlop, *otherlop;
1086 struct nfscldeleg *dp = NULL, *ldp = NULL;
1087 struct nfscllockownerhead *lhp = NULL;
1089 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
1091 int error = 0, ret, donelocally = 0;
1094 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1103 * Might need these, so MALLOC them now, to
1104 * avoid a tsleep() in MALLOC later.
1107 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1109 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1111 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1112 nlop->nfslo_type = type;
1113 nlop->nfslo_first = off;
1114 if (len == NFS64BITSSET) {
1115 nlop->nfslo_end = NFS64BITSSET;
1117 nlop->nfslo_end = off + len;
1118 if (nlop->nfslo_end <= nlop->nfslo_first)
1119 error = NFSERR_INVAL;
1126 error = nfscl_getcl(vp->v_mount, cred, p, false, true,
1130 free(nlp, M_NFSCLLOCKOWNER);
1131 free(otherlop, M_NFSCLLOCK);
1132 free(nlop, M_NFSCLLOCK);
1139 openownp = ropenownp;
1141 nfscl_filllockowner(id, own, flags);
1143 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
1144 nfscl_filllockowner(NULL, openown, F_POSIX);
1146 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1152 * First, search for a delegation. If one exists for this file,
1153 * the lock can be done locally against it, so long as there
1154 * isn't a local lock conflict.
1156 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1157 np->n_fhp->nfh_len);
1158 /* Just sanity check for correct type of delegation */
1159 if (dp != NULL && ((dp->nfsdl_flags &
1160 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1162 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1166 /* Now, find an open and maybe a lockowner. */
1167 ret = nfscl_getopen(&dp->nfsdl_owner, NULL, np->n_fhp->nfh_fh,
1168 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1170 ret = nfscl_getopen(NULL, clp->nfsc_openhash,
1171 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1172 ownp, mode, NULL, &op);
1174 lhp = &dp->nfsdl_lock;
1175 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1176 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1177 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1185 * Get the related Open and maybe lockowner.
1187 error = nfscl_getopen(NULL, clp->nfsc_openhash,
1188 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1189 ownp, mode, &lp, &op);
1191 lhp = &op->nfso_lock;
1193 if (!error && !recovery)
1194 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1195 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1198 nfscl_clrelease(clp);
1201 free(nlp, M_NFSCLLOCKOWNER);
1202 free(otherlop, M_NFSCLLOCK);
1203 free(nlop, M_NFSCLLOCK);
1208 * Ok, see if a lockowner exists and create one, as required.
1211 LIST_FOREACH(lp, lhp, nfsl_list) {
1212 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1216 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1218 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1219 NFSV4CL_LOCKNAMELEN);
1221 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1222 NFSV4CL_LOCKNAMELEN);
1223 nlp->nfsl_seqid = 0;
1224 nlp->nfsl_lockflags = flags;
1225 nlp->nfsl_inprog = NULL;
1226 nfscl_lockinit(&nlp->nfsl_rwlock);
1227 LIST_INIT(&nlp->nfsl_lock);
1229 nlp->nfsl_open = NULL;
1230 nfsstatsv1.cllocallockowners++;
1232 nlp->nfsl_open = op;
1233 nfsstatsv1.cllockowners++;
1235 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1242 * Now, update the byte ranges for locks.
1244 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1250 nfscl_clrelease(clp);
1253 * Serial modifications on the lock owner for multiple threads
1254 * for the same process using a read/write lock.
1257 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1263 free(nlp, M_NFSCLLOCKOWNER);
1265 free(nlop, M_NFSCLLOCK);
1267 free(otherlop, M_NFSCLLOCK);
1274 * Called to unlock a byte range, for LockU.
1277 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1278 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1279 struct nfsclclient *clp, void *id, int flags,
1280 struct nfscllockowner **lpp, int *dorpcp)
1282 struct nfscllockowner *lp;
1283 struct nfsclopen *op;
1284 struct nfscllock *nlop, *other_lop = NULL;
1285 struct nfscldeleg *dp;
1287 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1295 * Might need these, so MALLOC them now, to
1296 * avoid a tsleep() in MALLOC later.
1299 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1300 nlop->nfslo_type = F_UNLCK;
1301 nlop->nfslo_first = off;
1302 if (len == NFS64BITSSET) {
1303 nlop->nfslo_end = NFS64BITSSET;
1305 nlop->nfslo_end = off + len;
1306 if (nlop->nfslo_end <= nlop->nfslo_first) {
1307 free(nlop, M_NFSCLLOCK);
1308 return (NFSERR_INVAL);
1313 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1316 nfscl_filllockowner(id, own, flags);
1320 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1321 np->n_fhp->nfh_len);
1324 * First, unlock any local regions on a delegation.
1327 /* Look for this lockowner. */
1328 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1329 if (!NFSBCMP(lp->nfsl_owner, own,
1330 NFSV4CL_LOCKNAMELEN))
1334 /* Use other_lop, so nlop is still available */
1335 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1339 * Now, find a matching open/lockowner that hasn't already been done,
1340 * as marked by nfsl_inprog.
1344 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1345 np->n_fhp->nfh_len), nfso_hash) {
1346 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1347 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1348 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1349 if (lp->nfsl_inprog == NULL &&
1350 !NFSBCMP(lp->nfsl_owner, own,
1351 NFSV4CL_LOCKNAMELEN)) {
1362 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1366 * Serial modifications on the lock owner for multiple
1367 * threads for the same process using a read/write lock.
1369 lp->nfsl_inprog = p;
1370 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1375 free(nlop, M_NFSCLLOCK);
1377 free(other_lop, M_NFSCLLOCK);
1382 * Release all lockowners marked in progess for this process and file.
1385 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1386 void *id, int flags)
1388 struct nfsclopen *op;
1389 struct nfscllockowner *lp;
1391 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1394 nfscl_filllockowner(id, own, flags);
1396 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1397 np->n_fhp->nfh_len), nfso_hash) {
1398 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1399 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1400 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1401 if (lp->nfsl_inprog == p &&
1402 !NFSBCMP(lp->nfsl_owner, own,
1403 NFSV4CL_LOCKNAMELEN)) {
1404 lp->nfsl_inprog = NULL;
1405 nfscl_lockunlock(&lp->nfsl_rwlock);
1410 nfscl_clrelease(clp);
1415 * Called to find out if any bytes within the byte range specified are
1416 * write locked by the calling process. Used to determine if flushing
1417 * is required before a LockU.
1418 * If in doubt, return 1, so the flush will occur.
1421 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1422 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1424 struct nfscllockowner *lp;
1425 struct nfsclopen *op;
1426 struct nfsclclient *clp;
1427 struct nfscllock *lop;
1428 struct nfscldeleg *dp;
1431 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1435 switch (fl->l_whence) {
1439 * Caller is responsible for adding any necessary offset
1440 * when SEEK_CUR is used.
1445 off = np->n_size + fl->l_start;
1450 if (fl->l_len != 0) {
1451 end = off + fl->l_len;
1458 error = nfscl_getcl(vp->v_mount, cred, p, false, true, &clp);
1461 nfscl_filllockowner(id, own, flags);
1465 * First check the delegation locks.
1467 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1469 /* No need to flush if it is a write delegation. */
1470 if ((dp->nfsdl_flags & NFSCLDL_WRITE) != 0) {
1471 nfscl_clrelease(clp);
1475 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1476 if (!NFSBCMP(lp->nfsl_owner, own,
1477 NFSV4CL_LOCKNAMELEN))
1481 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1482 if (lop->nfslo_first >= end)
1484 if (lop->nfslo_end <= off)
1486 if (lop->nfslo_type == F_WRLCK) {
1487 nfscl_clrelease(clp);
1496 * Now, check state against the server.
1498 LIST_FOREACH(op, NFSCLOPENHASH(clp, np->n_fhp->nfh_fh,
1499 np->n_fhp->nfh_len), nfso_hash) {
1500 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1501 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1502 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1503 if (!NFSBCMP(lp->nfsl_owner, own,
1504 NFSV4CL_LOCKNAMELEN))
1508 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1509 if (lop->nfslo_first >= end)
1511 if (lop->nfslo_end <= off)
1513 if (lop->nfslo_type == F_WRLCK) {
1514 nfscl_clrelease(clp);
1522 nfscl_clrelease(clp);
1528 * Release a byte range lock owner structure.
1531 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1533 struct nfsclclient *clp;
1538 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1539 if (error != 0 && candelete &&
1540 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1541 nfscl_freelockowner(lp, 0);
1543 nfscl_lockunlock(&lp->nfsl_rwlock);
1544 nfscl_clrelease(clp);
1549 * Unlink the open structure.
1552 nfscl_unlinkopen(struct nfsclopen *op)
1555 LIST_REMOVE(op, nfso_list);
1556 if (op->nfso_hash.le_prev != NULL)
1557 LIST_REMOVE(op, nfso_hash);
1561 * Free up an open structure and any associated byte range lock structures.
1564 nfscl_freeopen(struct nfsclopen *op, int local, bool unlink)
1568 nfscl_unlinkopen(op);
1569 nfscl_freealllocks(&op->nfso_lock, local);
1570 free(op, M_NFSCLOPEN);
1572 nfsstatsv1.cllocalopens--;
1574 nfsstatsv1.clopens--;
1578 * Free up all lock owners and associated locks.
1581 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1583 struct nfscllockowner *lp, *nlp;
1585 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1586 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1588 nfscl_freelockowner(lp, local);
1593 * Called for an Open when NFSERR_EXPIRED is received from the server.
1594 * If there are no byte range locks nor a Share Deny lost, try to do a
1595 * fresh Open. Otherwise, free the open.
1598 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1599 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1601 struct nfscllockowner *lp;
1602 struct nfscldeleg *dp;
1603 int mustdelete = 0, error;
1606 * Look for any byte range lock(s).
1608 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1609 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1616 * If no byte range lock(s) nor a Share deny, try to re-open.
1618 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1619 newnfs_copycred(&op->nfso_cred, cred);
1621 error = nfsrpc_reopen(nmp, op->nfso_fh,
1622 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1626 free(dp, M_NFSCLDELEG);
1631 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1632 op->nfso_fhlen, cred, p, &dp);
1636 * If a byte range lock or Share deny or couldn't re-open, free it.
1639 nfscl_freeopen(op, 0, true);
1640 return (mustdelete);
1644 * Free up an open owner structure.
1647 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1652 * Make sure the NFSCLSTATE mutex is held, to avoid races with
1653 * calls in nfscl_renewthread() that do not hold a reference
1654 * count on the nfsclclient and just the mutex.
1655 * The mutex will not be held for calls done with the exclusive
1656 * nfsclclient lock held, in particular, nfscl_hasexpired()
1657 * and nfscl_recalldeleg() might do this.
1659 owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1662 LIST_REMOVE(owp, nfsow_list);
1665 free(owp, M_NFSCLOWNER);
1667 nfsstatsv1.cllocalopenowners--;
1669 nfsstatsv1.clopenowners--;
1673 * Free up a byte range lock owner structure.
1676 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1678 struct nfscllock *lop, *nlop;
1682 * Make sure the NFSCLSTATE mutex is held, to avoid races with
1683 * calls in nfscl_renewthread() that do not hold a reference
1684 * count on the nfsclclient and just the mutex.
1685 * The mutex will not be held for calls done with the exclusive
1686 * nfsclclient lock held, in particular, nfscl_hasexpired()
1687 * and nfscl_recalldeleg() might do this.
1689 owned = mtx_owned(NFSCLSTATEMUTEXPTR);
1692 LIST_REMOVE(lp, nfsl_list);
1695 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1696 nfscl_freelock(lop, local);
1698 free(lp, M_NFSCLLOCKOWNER);
1700 nfsstatsv1.cllocallockowners--;
1702 nfsstatsv1.cllockowners--;
1706 * Free up a byte range lock structure.
1709 nfscl_freelock(struct nfscllock *lop, int local)
1712 LIST_REMOVE(lop, nfslo_list);
1713 free(lop, M_NFSCLLOCK);
1715 nfsstatsv1.cllocallocks--;
1717 nfsstatsv1.cllocks--;
1721 * Clean out the state related to a delegation.
1724 nfscl_cleandeleg(struct nfscldeleg *dp)
1726 struct nfsclowner *owp, *nowp;
1727 struct nfsclopen *op;
1729 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1730 op = LIST_FIRST(&owp->nfsow_open);
1732 if (LIST_NEXT(op, nfso_list) != NULL)
1733 panic("nfscleandel");
1734 nfscl_freeopen(op, 1, true);
1736 nfscl_freeopenowner(owp, 1);
1738 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1742 * Free a delegation.
1745 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp, bool freeit)
1748 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1749 LIST_REMOVE(dp, nfsdl_hash);
1751 free(dp, M_NFSCLDELEG);
1752 nfsstatsv1.cldelegates--;
1757 * Free up all state related to this client structure.
1760 nfscl_cleanclient(struct nfsclclient *clp)
1762 struct nfsclowner *owp, *nowp;
1763 struct nfsclopen *op, *nop;
1764 struct nfscllayout *lyp, *nlyp;
1765 struct nfscldevinfo *dip, *ndip;
1767 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1768 nfscl_freelayout(lyp);
1770 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip)
1771 nfscl_freedevinfo(dip);
1773 /* Now, all the OpenOwners, etc. */
1774 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1775 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1776 nfscl_freeopen(op, 0, true);
1778 nfscl_freeopenowner(owp, 0);
1783 * Called when an NFSERR_EXPIRED is received from the server.
1786 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1787 struct ucred *cred, NFSPROC_T *p)
1789 struct nfsclowner *owp, *nowp, *towp;
1790 struct nfsclopen *op, *nop, *top;
1791 struct nfscldeleg *dp, *ndp;
1792 int ret, printed = 0;
1795 * First, merge locally issued Opens into the list for the server.
1797 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1798 while (dp != NULL) {
1799 ndp = TAILQ_NEXT(dp, nfsdl_list);
1800 owp = LIST_FIRST(&dp->nfsdl_owner);
1801 while (owp != NULL) {
1802 nowp = LIST_NEXT(owp, nfsow_list);
1803 op = LIST_FIRST(&owp->nfsow_open);
1805 if (LIST_NEXT(op, nfso_list) != NULL)
1807 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1808 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1809 NFSV4CL_LOCKNAMELEN))
1813 /* Merge opens in */
1814 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1815 if (top->nfso_fhlen == op->nfso_fhlen &&
1816 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1818 top->nfso_mode |= op->nfso_mode;
1819 top->nfso_opencnt += op->nfso_opencnt;
1824 /* Just add the open to the owner list */
1825 LIST_REMOVE(op, nfso_list);
1826 op->nfso_own = towp;
1827 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1828 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1829 op->nfso_fhlen), op, nfso_hash);
1830 nfsstatsv1.cllocalopens--;
1831 nfsstatsv1.clopens++;
1834 /* Just add the openowner to the client list */
1835 LIST_REMOVE(owp, nfsow_list);
1836 owp->nfsow_clp = clp;
1837 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1838 LIST_INSERT_HEAD(NFSCLOPENHASH(clp, op->nfso_fh,
1839 op->nfso_fhlen), op, nfso_hash);
1840 nfsstatsv1.cllocalopenowners--;
1841 nfsstatsv1.clopenowners++;
1842 nfsstatsv1.cllocalopens--;
1843 nfsstatsv1.clopens++;
1848 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1850 printf("nfsv4 expired locks lost\n");
1852 nfscl_cleandeleg(dp);
1853 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
1856 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1860 * Now, try and reopen against the server.
1862 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1863 owp->nfsow_seqid = 0;
1864 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1865 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1866 if (ret && !printed) {
1868 printf("nfsv4 expired locks lost\n");
1871 if (LIST_EMPTY(&owp->nfsow_open))
1872 nfscl_freeopenowner(owp, 0);
1877 * This function must be called after the process represented by "own" has
1878 * exited. Must be called with CLSTATE lock held.
1881 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1883 struct nfsclowner *owp, *nowp;
1884 struct nfscllockowner *lp;
1885 struct nfscldeleg *dp;
1887 /* First, get rid of local locks on delegations. */
1888 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1889 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1890 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1891 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1893 nfscl_freelockowner(lp, 1);
1898 owp = LIST_FIRST(&clp->nfsc_owner);
1899 while (owp != NULL) {
1900 nowp = LIST_NEXT(owp, nfsow_list);
1901 if (!NFSBCMP(owp->nfsow_owner, own,
1902 NFSV4CL_LOCKNAMELEN)) {
1904 * If there are children that haven't closed the
1905 * file descriptors yet, the opens will still be
1906 * here. For that case, let the renew thread clear
1907 * out the OpenOwner later.
1909 if (LIST_EMPTY(&owp->nfsow_open))
1910 nfscl_freeopenowner(owp, 0);
1912 owp->nfsow_defunct = 1;
1920 * Find open/lock owners for processes that have exited.
1923 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1925 struct nfsclowner *owp, *nowp;
1926 struct nfsclopen *op;
1927 struct nfscllockowner *lp, *nlp;
1928 struct nfscldeleg *dp;
1929 uint8_t own[NFSV4CL_LOCKNAMELEN];
1932 * All the pidhash locks must be acquired, since they are sx locks
1933 * and must be acquired before the mutexes. The pid(s) that will
1934 * be used aren't known yet, so all the locks need to be acquired.
1935 * Fortunately, this function is only performed once/sec.
1939 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1940 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1941 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1942 if (LIST_EMPTY(&lp->nfsl_lock))
1943 nfscl_emptylockowner(lp, lhp);
1946 if (nfscl_procdoesntexist(owp->nfsow_owner)) {
1947 memcpy(own, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
1948 nfscl_cleanup_common(clp, own);
1953 * For the single open_owner case, these lock owners need to be
1954 * checked to see if they still exist separately.
1955 * This is because nfscl_procdoesntexist() never returns true for
1956 * the single open_owner so that the above doesn't ever call
1957 * nfscl_cleanup_common().
1959 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1960 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1961 if (nfscl_procdoesntexist(lp->nfsl_owner)) {
1962 memcpy(own, lp->nfsl_owner,
1963 NFSV4CL_LOCKNAMELEN);
1964 nfscl_cleanup_common(clp, own);
1969 pidhash_sunlockall();
1973 * Take the empty lock owner and move it to the local lhp list if the
1974 * associated process no longer exists.
1977 nfscl_emptylockowner(struct nfscllockowner *lp,
1978 struct nfscllockownerfhhead *lhp)
1980 struct nfscllockownerfh *lfhp, *mylfhp;
1981 struct nfscllockowner *nlp;
1984 /* If not a Posix lock owner, just return. */
1985 if ((lp->nfsl_lockflags & F_POSIX) == 0)
1991 * First, search to see if this lock owner is already in the list.
1992 * If it is, then the associated process no longer exists.
1994 SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
1995 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
1996 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
1999 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
2000 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
2001 NFSV4CL_LOCKNAMELEN))
2004 /* If not found, check if process still exists. */
2005 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
2008 /* Move the lock owner over to the local list. */
2009 if (mylfhp == NULL) {
2010 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
2014 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
2015 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
2016 mylfhp->nfslfh_len);
2017 LIST_INIT(&mylfhp->nfslfh_lock);
2018 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
2020 LIST_REMOVE(lp, nfsl_list);
2021 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
2024 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
2026 * Called from nfs umount to free up the clientid.
2029 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p, struct nfscldeleghead *dhp)
2031 struct nfsclclient *clp;
2036 * For the case that matters, this is the thread that set
2037 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
2038 * done to ensure that any thread executing nfscl_getcl() after
2039 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
2040 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
2041 * explanation, courtesy of Alan Cox.
2042 * What follows is a snippet from Alan Cox's email at:
2043 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
2045 * 1. Set MNTK_UNMOUNTF
2046 * 2. Acquire a standard FreeBSD mutex "m".
2047 * 3. Update some data structures.
2048 * 4. Release mutex "m".
2050 * Then, other threads that acquire "m" after step 4 has occurred will
2051 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
2052 * step 2 may or may not see MNTK_UNMOUNTF as set.
2055 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
2063 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
2064 panic("nfscl umount");
2067 * First, handshake with the nfscl renew thread, to terminate
2070 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
2071 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
2072 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
2076 * Now, get the exclusive lock on the client state, so
2077 * that no uses of the state are still in progress.
2080 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2081 NFSCLSTATEMUTEXPTR, NULL);
2082 } while (!igotlock);
2086 * Free up all the state. It will expire on the server, but
2087 * maybe we should do a SetClientId/SetClientIdConfirm so
2088 * the server throws it away?
2090 LIST_REMOVE(clp, nfsc_list);
2091 nfscl_delegreturnall(clp, p, dhp);
2092 cred = newnfs_getcred();
2093 if (NFSHASNFSV4N(nmp)) {
2094 nfsrpc_destroysession(nmp, NULL, cred, p);
2095 nfsrpc_destroyclient(nmp, clp, cred, p);
2097 nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2098 nfscl_cleanclient(clp);
2101 free(clp, M_NFSCLCLIENT);
2107 * This function is called when a server replies with NFSERR_STALECLIENTID
2108 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
2109 * doing Opens and Locks with reclaim. If these fail, it deletes the
2110 * corresponding state.
2113 nfscl_recover(struct nfsclclient *clp, bool *retokp, struct ucred *cred,
2116 struct nfsclowner *owp, *nowp;
2117 struct nfsclopen *op, *nop;
2118 struct nfscllockowner *lp, *nlp;
2119 struct nfscllock *lop, *nlop;
2120 struct nfscldeleg *dp, *ndp, *tdp;
2121 struct nfsmount *nmp;
2122 struct ucred *tcred;
2123 struct nfsclopenhead extra_open;
2124 struct nfscldeleghead extra_deleg;
2127 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
2128 int i, igotlock = 0, error, trycnt, firstlock;
2129 struct nfscllayout *lyp, *nlyp;
2133 * First, lock the client structure, so everyone else will
2134 * block when trying to use state.
2137 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2139 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2140 NFSCLSTATEMUTEXPTR, NULL);
2141 } while (!igotlock);
2144 nmp = clp->nfsc_nmp;
2146 panic("nfscl recover");
2149 * For now, just get rid of all layouts. There may be a need
2150 * to do LayoutCommit Ops with reclaim == true later.
2152 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
2153 nfscl_freelayout(lyp);
2154 TAILQ_INIT(&clp->nfsc_layout);
2155 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
2156 LIST_INIT(&clp->nfsc_layouthash[i]);
2161 error = nfsrpc_setclient(nmp, clp, 1, retokp, cred, p);
2162 } while ((error == NFSERR_STALECLIENTID ||
2163 error == NFSERR_BADSESSION ||
2164 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2167 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2168 NFSCLFLAGS_RECVRINPROG);
2169 wakeup(&clp->nfsc_flags);
2170 nfsv4_unlock(&clp->nfsc_lock, 0);
2174 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2175 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2178 * Mark requests already queued on the server, so that they don't
2179 * initiate another recovery cycle. Any requests already in the
2180 * queue that handle state information will have the old stale
2181 * clientid/stateid and will get a NFSERR_STALESTATEID,
2182 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2183 * This will be translated to NFSERR_STALEDONTRECOVER when
2184 * R_DONTRECOVER is set.
2187 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2188 if (rep->r_nmp == nmp)
2189 rep->r_flags |= R_DONTRECOVER;
2194 * If nfsrpc_setclient() returns *retokp == true,
2195 * no more recovery is needed.
2201 * Now, mark all delegations "need reclaim".
2203 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2204 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2206 TAILQ_INIT(&extra_deleg);
2207 LIST_INIT(&extra_open);
2209 * Now traverse the state lists, doing Open and Lock Reclaims.
2211 tcred = newnfs_getcred();
2212 recovered_one = false;
2213 owp = LIST_FIRST(&clp->nfsc_owner);
2214 while (owp != NULL) {
2215 nowp = LIST_NEXT(owp, nfsow_list);
2216 owp->nfsow_seqid = 0;
2217 op = LIST_FIRST(&owp->nfsow_open);
2218 while (op != NULL) {
2219 nop = LIST_NEXT(op, nfso_list);
2220 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2221 /* Search for a delegation to reclaim with the open */
2222 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2223 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2225 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2226 mode = NFSV4OPEN_ACCESSWRITE;
2227 delegtype = NFSV4OPEN_DELEGATEWRITE;
2229 mode = NFSV4OPEN_ACCESSREAD;
2230 delegtype = NFSV4OPEN_DELEGATEREAD;
2232 if ((op->nfso_mode & mode) == mode &&
2233 op->nfso_fhlen == dp->nfsdl_fhlen &&
2234 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2239 delegtype = NFSV4OPEN_DELEGATENONE;
2240 newnfs_copycred(&op->nfso_cred, tcred);
2241 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2242 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2243 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2246 recovered_one = true;
2247 /* Handle any replied delegation */
2248 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2249 || NFSMNT_RDONLY(nmp->nm_mountp))) {
2250 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2251 mode = NFSV4OPEN_ACCESSWRITE;
2253 mode = NFSV4OPEN_ACCESSREAD;
2254 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2255 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2257 if ((op->nfso_mode & mode) == mode &&
2258 op->nfso_fhlen == dp->nfsdl_fhlen &&
2259 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2261 dp->nfsdl_stateid = ndp->nfsdl_stateid;
2262 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2263 dp->nfsdl_ace = ndp->nfsdl_ace;
2264 dp->nfsdl_change = ndp->nfsdl_change;
2265 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2266 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2267 dp->nfsdl_flags |= NFSCLDL_RECALL;
2268 free(ndp, M_NFSCLDELEG);
2275 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2277 /* and reclaim all byte range locks */
2278 lp = LIST_FIRST(&op->nfso_lock);
2279 while (lp != NULL) {
2280 nlp = LIST_NEXT(lp, nfsl_list);
2283 lop = LIST_FIRST(&lp->nfsl_lock);
2284 while (lop != NULL) {
2285 nlop = LIST_NEXT(lop, nfslo_list);
2286 if (lop->nfslo_end == NFS64BITSSET)
2289 len = lop->nfslo_end - lop->nfslo_first;
2290 error = nfscl_trylock(nmp, NULL,
2291 op->nfso_fh, op->nfso_fhlen, lp,
2292 firstlock, 1, lop->nfslo_first, len,
2293 lop->nfslo_type, tcred, p);
2295 nfscl_freelock(lop, 0);
2300 /* If no locks, but a lockowner, just delete it. */
2301 if (LIST_EMPTY(&lp->nfsl_lock))
2302 nfscl_freelockowner(lp, 0);
2305 } else if (error == NFSERR_NOGRACE && !recovered_one &&
2306 NFSHASNFSV4N(nmp)) {
2308 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2309 * actually end up here, since the client will do
2310 * a recovery for NFSERR_BADSESSION, but will get
2311 * an NFSERR_NOGRACE reply for the first "reclaim"
2313 * So, call nfscl_expireclient() to recover the
2314 * opens as best we can and then do a reclaim
2315 * complete and return.
2317 nfsrpc_reclaimcomplete(nmp, cred, p);
2318 nfscl_expireclient(clp, nmp, tcred, p);
2322 if (error != 0 && error != NFSERR_BADSESSION)
2323 nfscl_freeopen(op, 0, true);
2330 * Now, try and get any delegations not yet reclaimed by cobbling
2331 * to-gether an appropriate open.
2334 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2335 while (dp != NULL) {
2336 ndp = TAILQ_NEXT(dp, nfsdl_list);
2337 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2340 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2342 * Name must be as long an largest possible
2343 * NFSV4CL_LOCKNAMELEN. 12 for now.
2345 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2346 NFSV4CL_LOCKNAMELEN);
2347 LIST_INIT(&nowp->nfsow_open);
2348 nowp->nfsow_clp = clp;
2349 nowp->nfsow_seqid = 0;
2350 nowp->nfsow_defunct = 0;
2351 nfscl_lockinit(&nowp->nfsow_rwlock);
2354 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2355 nop = malloc(sizeof (struct nfsclopen) +
2356 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2357 nop->nfso_own = nowp;
2358 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2359 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2360 delegtype = NFSV4OPEN_DELEGATEWRITE;
2362 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2363 delegtype = NFSV4OPEN_DELEGATEREAD;
2365 nop->nfso_opencnt = 0;
2366 nop->nfso_posixlock = 1;
2367 nop->nfso_fhlen = dp->nfsdl_fhlen;
2368 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2369 LIST_INIT(&nop->nfso_lock);
2370 nop->nfso_stateid.seqid = 0;
2371 nop->nfso_stateid.other[0] = 0;
2372 nop->nfso_stateid.other[1] = 0;
2373 nop->nfso_stateid.other[2] = 0;
2374 newnfs_copycred(&dp->nfsdl_cred, tcred);
2375 newnfs_copyincred(tcred, &nop->nfso_cred);
2377 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2378 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2379 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2380 delegtype, tcred, p);
2382 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2383 mode = NFSV4OPEN_ACCESSWRITE;
2385 mode = NFSV4OPEN_ACCESSREAD;
2386 if ((nop->nfso_mode & mode) == mode &&
2387 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2388 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2390 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2391 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2392 dp->nfsdl_ace = tdp->nfsdl_ace;
2393 dp->nfsdl_change = tdp->nfsdl_change;
2394 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2395 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2396 dp->nfsdl_flags |= NFSCLDL_RECALL;
2397 free(tdp, M_NFSCLDELEG);
2399 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2405 free(nop, M_NFSCLOPEN);
2406 if (error == NFSERR_NOGRACE && !recovered_one &&
2407 NFSHASNFSV4N(nmp)) {
2409 * For NFSv4.1/4.2, the NFSERR_EXPIRED case will
2410 * actually end up here, since the client will do
2411 * a recovery for NFSERR_BADSESSION, but will get
2412 * an NFSERR_NOGRACE reply for the first "reclaim"
2414 * So, call nfscl_expireclient() to recover the
2415 * opens as best we can and then do a reclaim
2416 * complete and return.
2418 nfsrpc_reclaimcomplete(nmp, cred, p);
2419 nfscl_expireclient(clp, nmp, tcred, p);
2420 free(nowp, M_NFSCLOWNER);
2424 * Couldn't reclaim it, so throw the state
2427 nfscl_cleandeleg(dp);
2428 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
2430 recovered_one = true;
2431 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2438 * Now, get rid of extra Opens and Delegations.
2440 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2442 newnfs_copycred(&op->nfso_cred, tcred);
2443 error = nfscl_tryclose(op, tcred, nmp, p, true);
2444 if (error == NFSERR_GRACE)
2445 (void) nfs_catnap(PZERO, error, "nfsexcls");
2446 } while (error == NFSERR_GRACE);
2447 LIST_REMOVE(op, nfso_list);
2448 free(op, M_NFSCLOPEN);
2451 free(nowp, M_NFSCLOWNER);
2453 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2455 newnfs_copycred(&dp->nfsdl_cred, tcred);
2456 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2457 if (error == NFSERR_GRACE)
2458 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2459 } while (error == NFSERR_GRACE);
2460 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2461 free(dp, M_NFSCLDELEG);
2464 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2465 if (NFSHASNFSV4N(nmp))
2466 (void)nfsrpc_reclaimcomplete(nmp, cred, p);
2470 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2471 wakeup(&clp->nfsc_flags);
2472 nfsv4_unlock(&clp->nfsc_lock, 0);
2479 * This function is called when a server replies with NFSERR_EXPIRED.
2480 * It deletes all state for the client and does a fresh SetClientId/confirm.
2481 * XXX Someday it should post a signal to the process(es) that hold the
2482 * state, so they know that lock state has been lost.
2485 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2487 struct nfsmount *nmp;
2489 int igotlock = 0, error, trycnt;
2492 * If the clientid has gone away or a new SetClientid has already
2493 * been done, just return ok.
2495 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2499 * First, lock the client structure, so everyone else will
2500 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2501 * that only one thread does the work.
2504 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2506 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2507 NFSCLSTATEMUTEXPTR, NULL);
2508 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2509 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2511 nfsv4_unlock(&clp->nfsc_lock, 0);
2515 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2518 nmp = clp->nfsc_nmp;
2520 panic("nfscl expired");
2521 cred = newnfs_getcred();
2524 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2525 } while ((error == NFSERR_STALECLIENTID ||
2526 error == NFSERR_BADSESSION ||
2527 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2530 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2533 * Expire the state for the client.
2535 nfscl_expireclient(clp, nmp, cred, p);
2537 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2538 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2540 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2541 wakeup(&clp->nfsc_flags);
2542 nfsv4_unlock(&clp->nfsc_lock, 0);
2549 * This function inserts a lock in the list after insert_lop.
2552 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2553 struct nfscllock *insert_lop, int local)
2556 if ((struct nfscllockowner *)insert_lop == lp)
2557 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2559 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2561 nfsstatsv1.cllocallocks++;
2563 nfsstatsv1.cllocks++;
2567 * This function updates the locking for a lock owner and given file. It
2568 * maintains a list of lock ranges ordered on increasing file offset that
2569 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2570 * It always adds new_lop to the list and sometimes uses the one pointed
2572 * Returns 1 if the locks were modified, 0 otherwise.
2575 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2576 struct nfscllock **other_lopp, int local)
2578 struct nfscllock *new_lop = *new_lopp;
2579 struct nfscllock *lop, *tlop, *ilop;
2580 struct nfscllock *other_lop;
2581 int unlock = 0, modified = 0;
2585 * Work down the list until the lock is merged.
2587 if (new_lop->nfslo_type == F_UNLCK)
2589 ilop = (struct nfscllock *)lp;
2590 lop = LIST_FIRST(&lp->nfsl_lock);
2591 while (lop != NULL) {
2593 * Only check locks for this file that aren't before the start of
2596 if (lop->nfslo_end >= new_lop->nfslo_first) {
2597 if (new_lop->nfslo_end < lop->nfslo_first) {
2599 * If the new lock ends before the start of the
2600 * current lock's range, no merge, just insert
2605 if (new_lop->nfslo_type == lop->nfslo_type ||
2606 (new_lop->nfslo_first <= lop->nfslo_first &&
2607 new_lop->nfslo_end >= lop->nfslo_end)) {
2609 * This lock can be absorbed by the new lock/unlock.
2610 * This happens when it covers the entire range
2611 * of the old lock or is contiguous
2612 * with the old lock and is of the same type or an
2615 if (new_lop->nfslo_type != lop->nfslo_type ||
2616 new_lop->nfslo_first != lop->nfslo_first ||
2617 new_lop->nfslo_end != lop->nfslo_end)
2619 if (lop->nfslo_first < new_lop->nfslo_first)
2620 new_lop->nfslo_first = lop->nfslo_first;
2621 if (lop->nfslo_end > new_lop->nfslo_end)
2622 new_lop->nfslo_end = lop->nfslo_end;
2624 lop = LIST_NEXT(lop, nfslo_list);
2625 nfscl_freelock(tlop, local);
2630 * All these cases are for contiguous locks that are not the
2631 * same type, so they can't be merged.
2633 if (new_lop->nfslo_first <= lop->nfslo_first) {
2635 * This case is where the new lock overlaps with the
2636 * first part of the old lock. Move the start of the
2637 * old lock to just past the end of the new lock. The
2638 * new lock will be inserted in front of the old, since
2639 * ilop hasn't been updated. (We are done now.)
2641 if (lop->nfslo_first != new_lop->nfslo_end) {
2642 lop->nfslo_first = new_lop->nfslo_end;
2647 if (new_lop->nfslo_end >= lop->nfslo_end) {
2649 * This case is where the new lock overlaps with the
2650 * end of the old lock's range. Move the old lock's
2651 * end to just before the new lock's first and insert
2652 * the new lock after the old lock.
2653 * Might not be done yet, since the new lock could
2654 * overlap further locks with higher ranges.
2656 if (lop->nfslo_end != new_lop->nfslo_first) {
2657 lop->nfslo_end = new_lop->nfslo_first;
2661 lop = LIST_NEXT(lop, nfslo_list);
2665 * The final case is where the new lock's range is in the
2666 * middle of the current lock's and splits the current lock
2667 * up. Use *other_lopp to handle the second part of the
2668 * split old lock range. (We are done now.)
2669 * For unlock, we use new_lop as other_lop and tmp, since
2670 * other_lop and new_lop are the same for this case.
2671 * We noted the unlock case above, so we don't need
2672 * new_lop->nfslo_type any longer.
2674 tmp = new_lop->nfslo_first;
2676 other_lop = new_lop;
2679 other_lop = *other_lopp;
2682 other_lop->nfslo_first = new_lop->nfslo_end;
2683 other_lop->nfslo_end = lop->nfslo_end;
2684 other_lop->nfslo_type = lop->nfslo_type;
2685 lop->nfslo_end = tmp;
2686 nfscl_insertlock(lp, other_lop, lop, local);
2692 lop = LIST_NEXT(lop, nfslo_list);
2698 * Insert the new lock in the list at the appropriate place.
2701 nfscl_insertlock(lp, new_lop, ilop, local);
2709 * This function must be run as a kernel thread.
2710 * It does Renew Ops and recovery, when required.
2713 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2715 struct nfsclowner *owp, *nowp;
2716 struct nfsclopen *op;
2717 struct nfscllockowner *lp, *nlp;
2718 struct nfscldeleghead dh;
2719 struct nfscldeleg *dp, *ndp;
2722 int error, cbpathdown, islept, igotlock, ret, clearok;
2723 uint32_t recover_done_time = 0;
2725 static time_t prevsec = 0;
2726 struct nfscllockownerfh *lfhp, *nlfhp;
2727 struct nfscllockownerfhhead lfh;
2728 struct nfscllayout *lyp, *nlyp;
2729 struct nfscldevinfo *dip, *ndip;
2730 struct nfscllayouthead rlh;
2731 struct nfsclrecalllayout *recallp;
2732 struct nfsclds *dsp;
2737 cred = newnfs_getcred();
2739 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2740 mp = clp->nfsc_nmp->nm_mountp;
2743 newnfs_setroot(cred);
2745 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2747 * Only allow one full recover within 1/2 of the lease
2748 * duration (nfsc_renew).
2749 * retok is value/result. If passed in set to true,
2750 * it indicates only a CreateSession operation should
2752 * If it is returned true, it indicates that the
2753 * recovery only required a CreateSession.
2756 if (recover_done_time < NFSD_MONOSEC) {
2757 recover_done_time = NFSD_MONOSEC +
2761 NFSCL_DEBUG(1, "Doing recovery, only "
2762 "createsession=%d\n", retok);
2763 nfscl_recover(clp, &retok, cred, p);
2765 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2766 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2767 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2768 clidrev = clp->nfsc_clientidrev;
2769 error = nfsrpc_renew(clp, NULL, cred, p);
2770 if (error == NFSERR_CBPATHDOWN)
2772 else if (error == NFSERR_STALECLIENTID) {
2774 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2776 } else if (error == NFSERR_EXPIRED)
2777 (void) nfscl_hasexpired(clp, clidrev, p);
2781 if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2782 /* Do renews for any DS sessions. */
2783 NFSLOCKMNT(clp->nfsc_nmp);
2784 /* Skip first entry, since the MDS is handled above. */
2785 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2787 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2788 while (dsp != NULL) {
2789 if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2790 dsp->nfsclds_sess.nfsess_defunct == 0) {
2791 dsp->nfsclds_expire = NFSD_MONOSEC +
2793 NFSUNLOCKMNT(clp->nfsc_nmp);
2794 (void)nfsrpc_renew(clp, dsp, cred, p);
2797 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2799 NFSUNLOCKMNT(clp->nfsc_nmp);
2805 /* It's a Total Recall! */
2806 nfscl_totalrecall(clp);
2809 * Now, handle defunct owners.
2811 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2812 if (LIST_EMPTY(&owp->nfsow_open)) {
2813 if (owp->nfsow_defunct != 0)
2814 nfscl_freeopenowner(owp, 0);
2819 * Do the recall on any delegations. To avoid trouble, always
2820 * come back up here after having slept.
2824 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2825 while (dp != NULL) {
2826 ndp = TAILQ_NEXT(dp, nfsdl_list);
2827 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2829 * Wait for outstanding I/O ops to be done.
2831 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2833 nfsv4_unlock(&clp->nfsc_lock, 0);
2836 dp->nfsdl_rwlock.nfslock_lock |=
2838 msleep(&dp->nfsdl_rwlock,
2839 NFSCLSTATEMUTEXPTR, PVFS, "nfscld",
2841 if (NFSCL_FORCEDISM(mp))
2846 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2847 &islept, NFSCLSTATEMUTEXPTR, mp);
2848 if (igotlock == 0 && NFSCL_FORCEDISM(mp))
2854 newnfs_copycred(&dp->nfsdl_cred, cred);
2855 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2856 NULL, cred, p, 1, &vp);
2858 nfscl_cleandeleg(dp);
2859 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2861 LIST_REMOVE(dp, nfsdl_hash);
2862 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2864 nfsstatsv1.cldelegates--;
2868 * The nfsc_lock must be released before doing
2869 * vrele(), since it might call nfs_inactive().
2870 * For the unlikely case where the vnode failed
2871 * to be acquired by nfscl_recalldeleg(), a
2872 * VOP_RECLAIM() should be in progress and it
2873 * will return the delegation.
2875 nfsv4_unlock(&clp->nfsc_lock, 0);
2888 * Clear out old delegations, if we are above the high water
2889 * mark. Only clear out ones with no state related to them.
2890 * The tailq list is in LRU order.
2892 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2893 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2894 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2895 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2896 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2897 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2898 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2899 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2901 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2902 op = LIST_FIRST(&owp->nfsow_open);
2909 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2910 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2917 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2918 LIST_REMOVE(dp, nfsdl_hash);
2919 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2921 nfsstatsv1.cldelegates--;
2927 nfsv4_unlock(&clp->nfsc_lock, 0);
2930 * Do the recall on any layouts. To avoid trouble, always
2931 * come back up here after having slept.
2935 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2936 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2938 * Wait for outstanding I/O ops to be done.
2940 if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2941 (lyp->nfsly_lock.nfslock_lock &
2942 NFSV4LOCK_LOCK) != 0) {
2943 lyp->nfsly_lock.nfslock_lock |=
2945 msleep(&lyp->nfsly_lock.nfslock_lock,
2946 NFSCLSTATEMUTEXPTR, PVFS, "nfslyp",
2948 if (NFSCL_FORCEDISM(mp))
2952 /* Move the layout to the recall list. */
2953 TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2955 LIST_REMOVE(lyp, nfsly_hash);
2956 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2958 /* Handle any layout commits. */
2959 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2960 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2961 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2963 NFSCL_DEBUG(3, "do layoutcommit\n");
2964 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2972 /* Now, look for stale layouts. */
2973 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2974 while (lyp != NULL) {
2975 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2976 if (lyp->nfsly_timestamp < NFSD_MONOSEC &&
2977 (lyp->nfsly_flags & (NFSLY_RECALL |
2978 NFSLY_RETONCLOSE)) == 0 &&
2979 lyp->nfsly_lock.nfslock_usecnt == 0 &&
2980 lyp->nfsly_lock.nfslock_lock == 0) {
2981 NFSCL_DEBUG(4, "ret stale lay=%d\n",
2983 recallp = malloc(sizeof(*recallp),
2984 M_NFSLAYRECALL, M_NOWAIT);
2985 if (recallp == NULL)
2987 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2988 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2989 lyp->nfsly_stateid.seqid, 0, 0, NULL,
2996 * Free up any unreferenced device info structures.
2998 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
2999 if (dip->nfsdi_layoutrefs == 0 &&
3000 dip->nfsdi_refcnt == 0) {
3001 NFSCL_DEBUG(4, "freeing devinfo\n");
3002 LIST_REMOVE(dip, nfsdi_list);
3003 nfscl_freedevinfo(dip);
3008 /* Do layout return(s), as required. */
3009 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
3010 TAILQ_REMOVE(&rlh, lyp, nfsly_list);
3011 NFSCL_DEBUG(4, "ret layout\n");
3012 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
3013 if ((lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
3015 lyp->nfsly_flags |= NFSLY_RETURNED;
3019 nfscl_freelayout(lyp);
3023 * Delegreturn any delegations cleaned out or recalled.
3025 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
3026 newnfs_copycred(&dp->nfsdl_cred, cred);
3027 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3028 TAILQ_REMOVE(&dh, dp, nfsdl_list);
3029 free(dp, M_NFSCLDELEG);
3034 * Call nfscl_cleanupkext() once per second to check for
3035 * open/lock owners where the process has exited.
3037 mytime = NFSD_MONOSEC;
3038 if (prevsec != mytime) {
3040 nfscl_cleanupkext(clp, &lfh);
3044 * Do a ReleaseLockOwner for all lock owners where the
3045 * associated process no longer exists, as found by
3046 * nfscl_cleanupkext().
3048 newnfs_setroot(cred);
3049 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
3050 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
3052 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
3053 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
3055 nfscl_freelockowner(lp, 0);
3062 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
3063 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
3066 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
3067 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
3070 wakeup((caddr_t)clp);
3078 * Initiate state recovery. Called when NFSERR_STALECLIENTID,
3079 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
3082 nfscl_initiate_recovery(struct nfsclclient *clp)
3088 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
3090 wakeup((caddr_t)clp);
3094 * Dump out the state stuff for debugging.
3097 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
3098 int lockowner, int locks)
3100 struct nfsclclient *clp;
3101 struct nfsclowner *owp;
3102 struct nfsclopen *op;
3103 struct nfscllockowner *lp;
3104 struct nfscllock *lop;
3105 struct nfscldeleg *dp;
3109 printf("nfscl dumpstate NULL clp\n");
3113 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
3114 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3115 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3116 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3117 owp->nfsow_owner[0], owp->nfsow_owner[1],
3118 owp->nfsow_owner[2], owp->nfsow_owner[3],
3120 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3122 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3123 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3124 op->nfso_stateid.other[2], op->nfso_opencnt,
3126 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3128 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3129 lp->nfsl_owner[0], lp->nfsl_owner[1],
3130 lp->nfsl_owner[2], lp->nfsl_owner[3],
3132 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3133 lp->nfsl_stateid.other[2]);
3134 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3137 printf("lck typ=%d fst=%ju end=%ju\n",
3138 lop->nfslo_type, (intmax_t)lop->nfslo_first,
3139 (intmax_t)lop->nfslo_end);
3141 printf("lck typ=%d fst=%qd end=%qd\n",
3142 lop->nfslo_type, lop->nfslo_first,
3150 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3151 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
3152 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
3153 owp->nfsow_owner[0], owp->nfsow_owner[1],
3154 owp->nfsow_owner[2], owp->nfsow_owner[3],
3156 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3158 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
3159 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
3160 op->nfso_stateid.other[2], op->nfso_opencnt,
3162 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
3164 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
3165 lp->nfsl_owner[0], lp->nfsl_owner[1],
3166 lp->nfsl_owner[2], lp->nfsl_owner[3],
3168 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
3169 lp->nfsl_stateid.other[2]);
3170 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3173 printf("lck typ=%d fst=%ju end=%ju\n",
3174 lop->nfslo_type, (intmax_t)lop->nfslo_first,
3175 (intmax_t)lop->nfslo_end);
3177 printf("lck typ=%d fst=%qd end=%qd\n",
3178 lop->nfslo_type, lop->nfslo_first,
3189 * Check for duplicate open owners and opens.
3190 * (Only used as a diagnostic aid.)
3193 nfscl_dupopen(vnode_t vp, int dupopens)
3195 struct nfsclclient *clp;
3196 struct nfsclowner *owp, *owp2;
3197 struct nfsclopen *op, *op2;
3200 clp = VFSTONFS(vp->v_mount)->nm_clp;
3202 printf("nfscl dupopen NULL clp\n");
3205 nfhp = VTONFS(vp)->n_fhp;
3209 * First, search for duplicate owners.
3210 * These should never happen!
3212 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3213 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3215 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
3216 NFSV4CL_LOCKNAMELEN)) {
3218 printf("DUP OWNER\n");
3219 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3226 * Now, search for duplicate stateids.
3227 * These shouldn't happen, either.
3229 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3230 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3231 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3232 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3234 (op->nfso_stateid.other[0] != 0 ||
3235 op->nfso_stateid.other[1] != 0 ||
3236 op->nfso_stateid.other[2] != 0) &&
3237 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
3238 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
3239 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
3241 printf("DUP STATEID\n");
3242 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3251 * Now search for duplicate opens.
3252 * Duplicate opens for the same owner
3253 * should never occur. Other duplicates are
3254 * possible and are checked for if "dupopens"
3257 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3258 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3259 if (nfhp->nfh_len == op2->nfso_fhlen &&
3260 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3261 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3262 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3263 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3264 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3265 (!NFSBCMP(op->nfso_own->nfsow_owner,
3266 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3268 if (!NFSBCMP(op->nfso_own->nfsow_owner,
3269 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3271 printf("BADDUP OPEN\n");
3274 printf("DUP OPEN\n");
3276 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0,
3289 * During close, find an open that needs to be dereferenced and
3290 * dereference it. If there are no more opens for this file,
3291 * log a message to that effect.
3292 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
3293 * on the file's vnode.
3294 * This is the safe way, since it is difficult to identify
3295 * which open the close is for and I/O can be performed after the
3296 * close(2) system call when a file is mmap'd.
3297 * If it returns 0 for success, there will be a referenced
3298 * clp returned via clpp.
3301 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3303 struct nfsclclient *clp;
3304 struct nfsclowner *owp;
3305 struct nfsclopen *op;
3306 struct nfscldeleg *dp;
3310 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3315 nfhp = VTONFS(vp)->n_fhp;
3319 * First, look for one under a delegation that was locally issued
3320 * and just decrement the opencnt for it. Since all my Opens against
3321 * the server are DENY_NONE, I don't see a problem with hanging
3322 * onto them. (It is much easier to use one of the extant Opens
3323 * that I already have on the server when a Delegation is recalled
3324 * than to do fresh Opens.) Someday, I might need to rethink this, but.
3326 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3328 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3329 op = LIST_FIRST(&owp->nfsow_open);
3332 * Since a delegation is for a file, there
3333 * should never be more than one open for
3336 if (LIST_NEXT(op, nfso_list) != NULL)
3337 panic("nfscdeleg opens");
3338 if (notdecr && op->nfso_opencnt > 0) {
3347 /* Now process the opens against the server. */
3348 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3350 if (op->nfso_fhlen == nfhp->nfh_len &&
3351 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3353 /* Found an open, decrement cnt if possible */
3354 if (notdecr && op->nfso_opencnt > 0) {
3359 * There are more opens, so just return.
3361 if (op->nfso_opencnt > 0) {
3369 printf("nfscl: never fnd open\n");
3374 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3376 struct nfsclclient *clp;
3377 struct nfsmount *nmp;
3378 struct nfsclowner *owp, *nowp;
3379 struct nfsclopen *op, *nop;
3380 struct nfsclopenhead delayed;
3381 struct nfscldeleg *dp;
3383 struct nfsclrecalllayout *recallp;
3384 struct nfscllayout *lyp;
3387 error = nfscl_getcl(vp->v_mount, NULL, NULL, false, true, &clp);
3392 nmp = VFSTONFS(vp->v_mount);
3393 nfhp = VTONFS(vp)->n_fhp;
3394 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3397 * First get rid of the local Open structures, which should be no
3400 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3402 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3403 op = LIST_FIRST(&owp->nfsow_open);
3405 KASSERT((op->nfso_opencnt == 0),
3406 ("nfscl: bad open cnt on deleg"));
3407 nfscl_freeopen(op, 1, true);
3409 nfscl_freeopenowner(owp, 1);
3413 /* Return any layouts marked return on close. */
3414 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp,
3417 /* Now process the opens against the server. */
3418 LIST_INIT(&delayed);
3420 LIST_FOREACH(op, NFSCLOPENHASH(clp, nfhp->nfh_fh, nfhp->nfh_len),
3422 if (op->nfso_fhlen == nfhp->nfh_len &&
3423 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3425 /* Found an open, close it. */
3427 KASSERT((op->nfso_opencnt == 0),
3428 ("nfscl: bad open cnt on server (%d)",
3432 if (NFSHASNFSV4N(nmp))
3433 error = nfsrpc_doclose(nmp, op, p, false, true);
3435 error = nfsrpc_doclose(nmp, op, p, true, true);
3437 if (error == NFSERR_DELAY) {
3438 nfscl_unlinkopen(op);
3439 op->nfso_own = NULL;
3440 LIST_INSERT_HEAD(&delayed, op, nfso_list);
3445 nfscl_clrelease(clp);
3447 /* Now, wait for any layout that is returned upon close. */
3449 while ((lyp->nfsly_flags & NFSLY_RETURNED) == 0) {
3450 if (NFSCL_FORCEDISM(nmp->nm_mountp)) {
3454 msleep(lyp, NFSCLSTATEMUTEXPTR, PZERO, "nfslroc", hz);
3457 nfscl_freelayout(lyp);
3462 * recallp has been set NULL by nfscl_retoncloselayout() if it was
3463 * used by the function, but calling free() with a NULL pointer is ok.
3465 free(recallp, M_NFSLAYRECALL);
3467 /* Now, loop retrying the delayed closes. */
3468 LIST_FOREACH_SAFE(op, &delayed, nfso_list, nop) {
3469 nfsrpc_doclose(nmp, op, p, true, false);
3470 LIST_REMOVE(op, nfso_list);
3471 nfscl_freeopen(op, 0, false);
3477 * Return all delegations on this client.
3478 * (Must be called with client sleep lock.)
3481 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p,
3482 struct nfscldeleghead *dhp)
3484 struct nfscldeleg *dp, *ndp;
3487 cred = newnfs_getcred();
3488 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3489 nfscl_cleandeleg(dp);
3490 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3492 nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3493 TAILQ_INSERT_HEAD(dhp, dp, nfsdl_list);
3495 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
3501 * Return any delegation for this vp.
3504 nfscl_delegreturnvp(vnode_t vp, NFSPROC_T *p)
3506 struct nfsclclient *clp;
3507 struct nfscldeleg *dp;
3510 struct nfsmount *nmp;
3512 nmp = VFSTONFS(vp->v_mount);
3514 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
3520 cred = newnfs_getcred();
3525 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
3526 np->n_fhp->nfh_len);
3528 nfscl_cleandeleg(dp);
3529 nfscl_freedeleg(&clp->nfsc_deleg, dp, false);
3531 newnfs_copycred(&dp->nfsdl_cred, cred);
3532 nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3533 free(dp, M_NFSCLDELEG);
3540 * Do a callback RPC.
3543 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3545 int clist, gotseq_ok, i, j, k, op, rcalls;
3547 struct nfsclclient *clp;
3548 struct nfscldeleg *dp = NULL;
3549 int numops, taglen = -1, error = 0, trunc __unused;
3550 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3551 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3557 nfsattrbit_t attrbits, rattrbits;
3558 nfsv4stateid_t stateid;
3559 uint32_t seqid, slotid = 0, highslot, cachethis __unused;
3560 uint8_t sessionid[NFSX_V4SESSIONID];
3562 struct nfscllayout *lyp;
3563 uint64_t filesid[2], len, off;
3564 int changed, gotone, laytype, recalltype;
3566 struct nfsclrecalllayout *recallp = NULL;
3567 struct nfsclsession *tsep;
3571 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3572 taglen = fxdr_unsigned(int, *tl);
3573 if (taglen < 0 || taglen > NFSV4_OPAQUELIMIT) {
3578 if (taglen <= NFSV4_SMALLSTR)
3581 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3582 error = nfsrv_mtostr(nd, tagstr, taglen);
3584 if (taglen > NFSV4_SMALLSTR)
3585 free(tagstr, M_TEMP);
3589 (void) nfsm_strtom(nd, tag, taglen);
3590 if (taglen > NFSV4_SMALLSTR) {
3591 free(tagstr, M_TEMP);
3593 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3594 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3595 minorvers = fxdr_unsigned(u_int32_t, *tl++);
3596 if (minorvers != NFSV4_MINORVERSION &&
3597 minorvers != NFSV41_MINORVERSION &&
3598 minorvers != NFSV42_MINORVERSION)
3599 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3600 cbident = fxdr_unsigned(u_int32_t, *tl++);
3604 numops = fxdr_unsigned(int, *tl);
3606 * Loop around doing the sub ops.
3608 for (i = 0; i < numops; i++) {
3609 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3610 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3612 op = fxdr_unsigned(int, *tl);
3613 nd->nd_procnum = op;
3614 if (i == 0 && op != NFSV4OP_CBSEQUENCE && minorvers !=
3615 NFSV4_MINORVERSION) {
3616 nd->nd_repstat = NFSERR_OPNOTINSESS;
3617 *repp = nfscl_errmap(nd, minorvers);
3621 if (op < NFSV4OP_CBGETATTR ||
3622 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3623 (op > NFSV4OP_CBNOTIFYDEVID &&
3624 minorvers == NFSV41_MINORVERSION) ||
3625 (op > NFSV4OP_CBOFFLOAD &&
3626 minorvers == NFSV42_MINORVERSION)) {
3627 nd->nd_repstat = NFSERR_OPILLEGAL;
3628 *repp = nfscl_errmap(nd, minorvers);
3632 if (op < NFSV42_CBNOPS)
3633 nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
3635 case NFSV4OP_CBGETATTR:
3636 NFSCL_DEBUG(4, "cbgetattr\n");
3639 error = nfsm_getfh(nd, &nfhp);
3641 error = nfsrv_getattrbits(nd, &attrbits,
3644 mp = nfscl_getmnt(minorvers, sessionid, cbident,
3647 error = NFSERR_SERVERFAULT;
3650 error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3651 nfhp->nfh_len, p, &np);
3656 NFSZERO_ATTRBIT(&rattrbits);
3658 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3661 if (NFSISSET_ATTRBIT(&attrbits,
3664 va.va_size = np->n_size;
3668 NFSSETBIT_ATTRBIT(&rattrbits,
3671 if (NFSISSET_ATTRBIT(&attrbits,
3672 NFSATTRBIT_CHANGE)) {
3676 (np->n_flag & NDELEGMOD))
3678 NFSSETBIT_ATTRBIT(&rattrbits,
3682 error = NFSERR_SERVERFAULT;
3690 free(nfhp, M_NFSFH);
3692 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3693 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3696 case NFSV4OP_CBRECALL:
3697 NFSCL_DEBUG(4, "cbrecall\n");
3698 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3700 stateid.seqid = *tl++;
3701 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3703 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3704 trunc = fxdr_unsigned(int, *tl);
3705 error = nfsm_getfh(nd, &nfhp);
3708 if (minorvers == NFSV4_MINORVERSION)
3709 clp = nfscl_getclnt(cbident);
3711 clp = nfscl_getclntsess(sessionid);
3713 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3715 if (dp != NULL && (dp->nfsdl_flags &
3716 NFSCLDL_DELEGRET) == 0) {
3719 wakeup((caddr_t)clp);
3722 error = NFSERR_SERVERFAULT;
3727 free(nfhp, M_NFSFH);
3729 case NFSV4OP_CBLAYOUTRECALL:
3730 NFSCL_DEBUG(4, "cblayrec\n");
3732 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3733 laytype = fxdr_unsigned(int, *tl++);
3734 iomode = fxdr_unsigned(uint32_t, *tl++);
3735 if (newnfs_true == *tl++)
3739 recalltype = fxdr_unsigned(int, *tl);
3740 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n",
3741 laytype, iomode, changed, recalltype);
3742 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3744 if (laytype != NFSLAYOUT_NFSV4_1_FILES &&
3745 laytype != NFSLAYOUT_FLEXFILE)
3746 error = NFSERR_NOMATCHLAYOUT;
3747 else if (recalltype == NFSLAYOUTRETURN_FILE) {
3748 error = nfsm_getfh(nd, &nfhp);
3749 NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3752 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3754 off = fxdr_hyper(tl); tl += 2;
3755 len = fxdr_hyper(tl); tl += 2;
3756 stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3757 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3758 if (minorvers == NFSV4_MINORVERSION)
3759 error = NFSERR_NOTSUPP;
3760 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n",
3761 (uintmax_t)off, (uintmax_t)len,
3762 stateid.seqid, error);
3765 clp = nfscl_getclntsess(sessionid);
3766 NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3768 lyp = nfscl_findlayout(clp,
3771 NFSCL_DEBUG(4, "cblyp=%p\n",
3776 NFSLY_FLEXFILE)) != 0 &&
3777 !NFSBCMP(stateid.other,
3778 lyp->nfsly_stateid.other,
3779 NFSX_STATEIDOTHER)) {
3789 lyp->nfsly_stateid.seqid)
3790 lyp->nfsly_stateid.seqid =
3798 lyp->nfsly_stateid.seqid);
3801 NFSERR_NOMATCHLAYOUT;
3803 error = NFSERR_NOMATCHLAYOUT;
3806 free(nfhp, M_NFSFH);
3807 } else if (recalltype == NFSLAYOUTRETURN_FSID) {
3808 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3809 filesid[0] = fxdr_hyper(tl); tl += 2;
3810 filesid[1] = fxdr_hyper(tl); tl += 2;
3813 clp = nfscl_getclntsess(sessionid);
3815 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3817 if (lyp->nfsly_filesid[0] ==
3819 lyp->nfsly_filesid[1] ==
3826 lyp->nfsly_stateid.seqid,
3836 error = NFSERR_NOMATCHLAYOUT;
3838 error = NFSERR_NOMATCHLAYOUT;
3840 } else if (recalltype == NFSLAYOUTRETURN_ALL) {
3843 clp = nfscl_getclntsess(sessionid);
3845 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3847 error = nfscl_layoutrecall(
3848 recalltype, lyp, iomode, 0,
3850 lyp->nfsly_stateid.seqid,
3851 0, 0, NULL, recallp);
3858 error = NFSERR_NOMATCHLAYOUT;
3860 error = NFSERR_NOMATCHLAYOUT;
3863 error = NFSERR_NOMATCHLAYOUT;
3864 if (recallp != NULL) {
3865 free(recallp, M_NFSLAYRECALL);
3869 case NFSV4OP_CBSEQUENCE:
3871 error = NFSERR_SEQUENCEPOS;
3874 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3876 bcopy(tl, sessionid, NFSX_V4SESSIONID);
3877 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3878 seqid = fxdr_unsigned(uint32_t, *tl++);
3879 slotid = fxdr_unsigned(uint32_t, *tl++);
3880 highslot = fxdr_unsigned(uint32_t, *tl++);
3882 /* Throw away the referring call stuff. */
3883 clist = fxdr_unsigned(int, *tl);
3884 for (j = 0; j < clist; j++) {
3885 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3887 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3888 rcalls = fxdr_unsigned(int, *tl);
3889 for (k = 0; k < rcalls; k++) {
3890 NFSM_DISSECT(tl, uint32_t *,
3895 clp = nfscl_getclntsess(sessionid);
3897 error = NFSERR_SERVERFAULT;
3899 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3900 error = nfsv4_seqsession(seqid, slotid,
3901 highslot, tsep->nfsess_cbslots, &rep,
3902 tsep->nfsess_backslots);
3905 if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3909 * Handle a reply for a retried
3910 * callback. The reply will be
3911 * re-inserted in the session cache
3912 * by the nfsv4_seqsess_cacherep() call
3915 KASSERT(error == NFSERR_REPLYFROMCACHE,
3916 ("cbsequence: non-NULL rep"));
3917 NFSCL_DEBUG(4, "Got cbretry\n");
3918 m_freem(nd->nd_mreq);
3923 NFSM_BUILD(tl, uint32_t *,
3924 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3925 bcopy(sessionid, tl, NFSX_V4SESSIONID);
3926 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3927 *tl++ = txdr_unsigned(seqid);
3928 *tl++ = txdr_unsigned(slotid);
3929 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3930 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3934 if (i == 0 && minorvers != NFSV4_MINORVERSION)
3935 error = NFSERR_OPNOTINSESS;
3937 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
3938 error = NFSERR_NOTSUPP;
3943 if (error == EBADRPC || error == NFSERR_BADXDR) {
3944 nd->nd_repstat = NFSERR_BADXDR;
3946 nd->nd_repstat = error;
3951 if (nd->nd_repstat) {
3952 *repp = nfscl_errmap(nd, minorvers);
3955 *repp = 0; /* NFS4_OK */
3958 if (recallp != NULL)
3959 free(recallp, M_NFSLAYRECALL);
3961 if (error == EBADRPC || error == NFSERR_BADXDR)
3962 nd->nd_repstat = NFSERR_BADXDR;
3964 printf("nfsv4 comperr1=%d\n", error);
3967 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3971 *retopsp = txdr_unsigned(retops);
3973 *nd->nd_errp = nfscl_errmap(nd, minorvers);
3975 if (gotseq_ok != 0) {
3976 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
3978 clp = nfscl_getclntsess(sessionid);
3980 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3981 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
3992 * Generate the next cbident value. Basically just increment a static value
3993 * and then check that it isn't already in the list, if it has wrapped around.
3996 nfscl_nextcbident(void)
3998 struct nfsclclient *clp;
4000 static u_int32_t nextcbident = 0;
4001 static int haswrapped = 0;
4004 if (nextcbident == 0)
4008 * Search the clientid list for one already using this cbident.
4013 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4014 if (clp->nfsc_cbident == nextcbident) {
4024 return (nextcbident);
4028 * Get the mount point related to a given cbident or session and busy it.
4031 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
4032 struct nfsclclient **clpp)
4034 struct nfsclclient *clp;
4037 struct nfsclsession *tsep;
4041 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4042 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4043 if (minorvers == NFSV4_MINORVERSION) {
4044 if (clp->nfsc_cbident == cbident)
4046 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4054 mp = clp->nfsc_nmp->nm_mountp;
4057 error = vfs_busy(mp, 0);
4066 * Get the clientid pointer related to a given cbident.
4068 static struct nfsclclient *
4069 nfscl_getclnt(u_int32_t cbident)
4071 struct nfsclclient *clp;
4073 LIST_FOREACH(clp, &nfsclhead, nfsc_list)
4074 if (clp->nfsc_cbident == cbident)
4080 * Get the clientid pointer related to a given sessionid.
4082 static struct nfsclclient *
4083 nfscl_getclntsess(uint8_t *sessionid)
4085 struct nfsclclient *clp;
4086 struct nfsclsession *tsep;
4088 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
4089 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
4090 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
4098 * Search for a lock conflict locally on the client. A conflict occurs if
4099 * - not same owner and overlapping byte range and at least one of them is
4100 * a write lock or this is an unlock.
4103 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
4104 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
4105 struct nfscllock **lopp)
4107 struct nfsclopen *op;
4111 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
4115 LIST_FOREACH(op, NFSCLOPENHASH(clp, fhp, fhlen), nfso_hash) {
4116 if (op->nfso_fhlen == fhlen &&
4117 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
4118 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
4128 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
4129 u_int8_t *own, struct nfscllock **lopp)
4131 struct nfscllockowner *lp;
4132 struct nfscllock *lop;
4134 LIST_FOREACH(lp, lhp, nfsl_list) {
4135 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
4136 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
4137 if (lop->nfslo_first >= nlop->nfslo_end)
4139 if (lop->nfslo_end <= nlop->nfslo_first)
4141 if (lop->nfslo_type == F_WRLCK ||
4142 nlop->nfslo_type == F_WRLCK ||
4143 nlop->nfslo_type == F_UNLCK) {
4146 return (NFSERR_DENIED);
4155 * Check for a local conflicting lock.
4158 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
4159 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
4161 struct nfscllock *lop, nlck;
4162 struct nfscldeleg *dp;
4164 u_int8_t own[NFSV4CL_LOCKNAMELEN];
4167 nlck.nfslo_type = fl->l_type;
4168 nlck.nfslo_first = off;
4169 if (len == NFS64BITSSET) {
4170 nlck.nfslo_end = NFS64BITSSET;
4172 nlck.nfslo_end = off + len;
4173 if (nlck.nfslo_end <= nlck.nfslo_first)
4174 return (NFSERR_INVAL);
4177 nfscl_filllockowner(id, own, flags);
4179 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4180 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
4181 &nlck, own, dp, &lop);
4183 fl->l_whence = SEEK_SET;
4184 fl->l_start = lop->nfslo_first;
4185 if (lop->nfslo_end == NFS64BITSSET)
4188 fl->l_len = lop->nfslo_end - lop->nfslo_first;
4189 fl->l_pid = (pid_t)0;
4190 fl->l_type = lop->nfslo_type;
4191 error = -1; /* no RPC required */
4192 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
4193 fl->l_type == F_RDLCK)) {
4195 * The delegation ensures that there isn't a conflicting
4196 * lock on the server, so return -1 to indicate an RPC
4199 fl->l_type = F_UNLCK;
4207 * Handle Recall of a delegation.
4208 * The clp must be exclusive locked when this is called.
4211 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
4212 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
4213 int called_from_renewthread, vnode_t *vpp)
4215 struct nfsclowner *owp, *lowp, *nowp;
4216 struct nfsclopen *op, *lop;
4217 struct nfscllockowner *lp;
4218 struct nfscllock *lckp;
4223 KASSERT(vpp != NULL, ("nfscl_recalldeleg: vpp NULL"));
4226 * First, get a vnode for the file. This is needed to do RPCs.
4228 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
4229 dp->nfsdl_fhlen, p, &np);
4232 * File isn't open, so nothing to move over to the
4242 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
4245 * Ok, if it's a write delegation, flush data to the server, so
4246 * that close/open consistency is retained.
4250 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
4251 np->n_flag |= NDELEGRECALL;
4253 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
4255 np->n_flag &= ~NDELEGRECALL;
4257 NFSINVALATTRCACHE(np);
4259 if (ret == EIO && called_from_renewthread != 0) {
4261 * If the flush failed with EIO for the renew thread,
4262 * return now, so that the dirty buffer will be flushed
4269 * Now, for each openowner with opens issued locally, move them
4270 * over to state against the server.
4272 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
4273 lop = LIST_FIRST(&lowp->nfsow_open);
4275 if (LIST_NEXT(lop, nfso_list) != NULL)
4276 panic("nfsdlg mult opens");
4278 * Look for the same openowner against the server.
4280 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
4281 if (!NFSBCMP(lowp->nfsow_owner,
4282 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
4283 newnfs_copycred(&dp->nfsdl_cred, cred);
4284 ret = nfscl_moveopen(vp, clp, nmp, lop,
4286 if (ret == NFSERR_STALECLIENTID ||
4287 ret == NFSERR_STALEDONTRECOVER ||
4288 ret == NFSERR_BADSESSION)
4291 nfscl_freeopen(lop, 1, true);
4300 * If no openowner found, create one and get an open
4305 sizeof (struct nfsclowner), M_NFSCLOWNER,
4307 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
4308 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
4309 dp->nfsdl_fhlen, NULL, NULL);
4310 newnfs_copycred(&dp->nfsdl_cred, cred);
4311 ret = nfscl_moveopen(vp, clp, nmp, lop,
4314 nfscl_freeopenowner(owp, 0);
4315 if (ret == NFSERR_STALECLIENTID ||
4316 ret == NFSERR_STALEDONTRECOVER ||
4317 ret == NFSERR_BADSESSION)
4320 nfscl_freeopen(lop, 1, true);
4330 * Now, get byte range locks for any locks done locally.
4332 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4333 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4334 newnfs_copycred(&dp->nfsdl_cred, cred);
4335 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4336 if (ret == NFSERR_STALESTATEID ||
4337 ret == NFSERR_STALEDONTRECOVER ||
4338 ret == NFSERR_STALECLIENTID ||
4339 ret == NFSERR_BADSESSION)
4349 * Move a locally issued open over to an owner on the state list.
4350 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4351 * returns with it unlocked.
4354 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4355 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4356 struct ucred *cred, NFSPROC_T *p)
4358 struct nfsclopen *op, *nop;
4359 struct nfscldeleg *ndp;
4361 int error = 0, newone;
4364 * First, look for an appropriate open, If found, just increment the
4367 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4368 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4369 op->nfso_fhlen == lop->nfso_fhlen &&
4370 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4371 op->nfso_opencnt += lop->nfso_opencnt;
4372 nfscl_freeopen(lop, 1, true);
4377 /* No appropriate open, so we have to do one against the server. */
4379 nop = malloc(sizeof (struct nfsclopen) +
4380 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4381 nop->nfso_hash.le_prev = NULL;
4383 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4384 lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4386 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
4387 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4388 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4391 nfscl_freeopen(op, 0, true);
4393 op->nfso_mode |= lop->nfso_mode;
4394 op->nfso_opencnt += lop->nfso_opencnt;
4395 nfscl_freeopen(lop, 1, true);
4398 free(nop, M_NFSCLOPEN);
4401 * What should I do with the returned delegation, since the
4402 * delegation is being recalled? For now, just printf and
4405 printf("Moveopen returned deleg\n");
4406 free(ndp, M_NFSCLDELEG);
4412 * Recall all delegations on this client.
4415 nfscl_totalrecall(struct nfsclclient *clp)
4417 struct nfscldeleg *dp;
4419 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4420 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4421 dp->nfsdl_flags |= NFSCLDL_RECALL;
4426 * Relock byte ranges. Called for delegation recall and state expiry.
4429 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4430 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4433 struct nfscllockowner *nlp;
4437 int error, newone, donelocally;
4439 if (NFSHASNFSV4N(nmp) && NFSHASONEOPENOWN(nmp)) {
4442 np->n_flag |= NMIGHTBELOCKED;
4446 off = lop->nfslo_first;
4447 len = lop->nfslo_end - lop->nfslo_first;
4448 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4449 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4450 lp->nfsl_openowner, &nlp, &newone, &donelocally);
4451 if (error || donelocally)
4453 nfhp = VTONFS(vp)->n_fhp;
4454 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4455 nfhp->nfh_len, nlp, newone, 0, off,
4456 len, lop->nfslo_type, cred, p);
4458 nfscl_freelockowner(nlp, 0);
4463 * Called to re-open a file. Basically get a vnode for the file handle
4464 * and then call nfsrpc_openrpc() to do the rest.
4467 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4468 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4469 struct ucred *cred, NFSPROC_T *p)
4475 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4479 if (np->n_v4 != NULL) {
4480 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4481 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4482 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4492 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4493 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4497 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4498 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4499 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4500 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4505 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4506 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4508 if (error == NFSERR_DELAY)
4509 (void) nfs_catnap(PZERO, error, "nfstryop");
4510 } while (error == NFSERR_DELAY);
4511 if (error == EAUTH || error == EACCES) {
4512 /* Try again using system credentials */
4513 newnfs_setroot(cred);
4515 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4516 newfhlen, mode, op, name, namelen, ndpp, reclaim,
4517 delegtype, cred, p, 1, 0);
4518 if (error == NFSERR_DELAY)
4519 (void) nfs_catnap(PZERO, error, "nfstryop");
4520 } while (error == NFSERR_DELAY);
4526 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4527 * NFSERR_DELAY. Also, retry with system credentials, if the provided
4531 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4532 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4533 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4535 struct nfsrv_descript nfsd, *nd = &nfsd;
4539 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4540 reclaim, off, len, type, cred, p, 0);
4541 if (!error && nd->nd_repstat == NFSERR_DELAY)
4542 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4544 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4546 error = nd->nd_repstat;
4547 if (error == EAUTH || error == EACCES) {
4548 /* Try again using root credentials */
4549 newnfs_setroot(cred);
4551 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4552 newone, reclaim, off, len, type, cred, p, 1);
4553 if (!error && nd->nd_repstat == NFSERR_DELAY)
4554 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4556 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4558 error = nd->nd_repstat;
4564 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4565 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4569 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4570 struct nfsmount *nmp, NFSPROC_T *p)
4575 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4576 if (error == NFSERR_DELAY)
4577 (void) nfs_catnap(PZERO, error, "nfstrydp");
4578 } while (error == NFSERR_DELAY);
4579 if (error == EAUTH || error == EACCES) {
4580 /* Try again using system credentials */
4581 newnfs_setroot(cred);
4583 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4584 if (error == NFSERR_DELAY)
4585 (void) nfs_catnap(PZERO, error, "nfstrydp");
4586 } while (error == NFSERR_DELAY);
4592 * Try a close against the server. Just call nfsrpc_closerpc(),
4593 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4597 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4598 struct nfsmount *nmp, NFSPROC_T *p, bool loop_on_delayed)
4600 struct nfsrv_descript nfsd, *nd = &nfsd;
4604 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4605 if (loop_on_delayed && error == NFSERR_DELAY)
4606 (void) nfs_catnap(PZERO, error, "nfstrycl");
4607 } while (loop_on_delayed && error == NFSERR_DELAY);
4608 if (error == EAUTH || error == EACCES) {
4609 /* Try again using system credentials */
4610 newnfs_setroot(cred);
4612 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4613 if (loop_on_delayed && error == NFSERR_DELAY)
4614 (void) nfs_catnap(PZERO, error, "nfstrycl");
4615 } while (loop_on_delayed && error == NFSERR_DELAY);
4621 * Decide if a delegation on a file permits close without flushing writes
4622 * to the server. This might be a big performance win in some environments.
4623 * (Not useful until the client does caching on local stable storage.)
4626 nfscl_mustflush(vnode_t vp)
4628 struct nfsclclient *clp;
4629 struct nfscldeleg *dp;
4631 struct nfsmount *nmp;
4634 nmp = VFSTONFS(vp->v_mount);
4635 if (!NFSHASNFSV4(nmp))
4638 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4644 clp = nfscl_findcl(nmp);
4649 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4650 if (dp != NULL && (dp->nfsdl_flags &
4651 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4653 (dp->nfsdl_sizelimit >= np->n_size ||
4654 !NFSHASSTRICT3530(nmp))) {
4663 * See if a (write) delegation exists for this file.
4666 nfscl_nodeleg(vnode_t vp, int writedeleg)
4668 struct nfsclclient *clp;
4669 struct nfscldeleg *dp;
4671 struct nfsmount *nmp;
4674 nmp = VFSTONFS(vp->v_mount);
4675 if (!NFSHASNFSV4(nmp))
4678 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4684 clp = nfscl_findcl(nmp);
4689 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4691 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4692 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4702 * Look for an associated delegation that should be DelegReturned.
4705 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4707 struct nfsclclient *clp;
4708 struct nfscldeleg *dp;
4709 struct nfsclowner *owp;
4710 struct nfscllockowner *lp;
4711 struct nfsmount *nmp;
4715 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4717 nmp = VFSTONFS(vp->v_mount);
4718 if (NFSHASPNFS(nmp))
4721 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4727 mp = nmp->nm_mountp;
4730 * Loop around waiting for:
4731 * - outstanding I/O operations on delegations to complete
4732 * - for a delegation on vp that has state, lock the client and
4734 * - return delegation with no state
4737 clp = nfscl_findcl(nmp);
4742 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4743 np->n_fhp->nfh_len);
4746 * Wait for outstanding I/O ops to be done.
4748 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4750 nfsv4_unlock(&clp->nfsc_lock, 0);
4753 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4754 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4756 if (NFSCL_FORCEDISM(mp)) {
4757 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4764 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4765 if (!LIST_EMPTY(&owp->nfsow_open)) {
4771 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4772 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4778 if (needsrecall && !triedrecall) {
4779 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4782 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4783 &islept, NFSCLSTATEMUTEXPTR, mp);
4784 if (NFSCL_FORCEDISM(mp)) {
4785 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4787 nfsv4_unlock(&clp->nfsc_lock, 0);
4797 cred = newnfs_getcred();
4798 newnfs_copycred(&dp->nfsdl_cred, cred);
4799 nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0, NULL);
4803 nfsv4_unlock(&clp->nfsc_lock, 0);
4807 *stp = dp->nfsdl_stateid;
4809 nfscl_cleandeleg(dp);
4810 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
4813 nfsv4_unlock(&clp->nfsc_lock, 0);
4820 * Look for associated delegation(s) that should be DelegReturned.
4823 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4824 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4826 struct nfsclclient *clp;
4827 struct nfscldeleg *dp;
4828 struct nfsclowner *owp;
4829 struct nfscllockowner *lp;
4830 struct nfsmount *nmp;
4834 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4836 nmp = VFSTONFS(fvp->v_mount);
4839 if (NFSHASPNFS(nmp))
4842 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
4847 mp = nmp->nm_mountp;
4850 * Loop around waiting for:
4851 * - outstanding I/O operations on delegations to complete
4852 * - for a delegation on fvp that has state, lock the client and
4854 * - return delegation(s) with no state.
4857 clp = nfscl_findcl(nmp);
4863 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4864 np->n_fhp->nfh_len);
4865 if (dp != NULL && *gotfdp == 0) {
4867 * Wait for outstanding I/O ops to be done.
4869 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4871 nfsv4_unlock(&clp->nfsc_lock, 0);
4874 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4875 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4877 if (NFSCL_FORCEDISM(mp)) {
4878 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4887 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4888 if (!LIST_EMPTY(&owp->nfsow_open)) {
4894 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4895 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4901 if (needsrecall && !triedrecall) {
4902 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4905 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4906 &islept, NFSCLSTATEMUTEXPTR, mp);
4907 if (NFSCL_FORCEDISM(mp)) {
4908 dp->nfsdl_flags &= ~NFSCLDL_DELEGRET;
4910 nfsv4_unlock(&clp->nfsc_lock, 0);
4922 cred = newnfs_getcred();
4923 newnfs_copycred(&dp->nfsdl_cred, cred);
4924 nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0, NULL);
4928 nfsv4_unlock(&clp->nfsc_lock, 0);
4932 *fstp = dp->nfsdl_stateid;
4935 nfscl_cleandeleg(dp);
4936 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
4939 nfsv4_unlock(&clp->nfsc_lock, 0);
4944 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4945 np->n_fhp->nfh_len);
4946 if (dp != NULL && *gottdp == 0) {
4948 * Wait for outstanding I/O ops to be done.
4950 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4951 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4952 msleep(&dp->nfsdl_rwlock, NFSCLSTATEMUTEXPTR, PZERO,
4954 if (NFSCL_FORCEDISM(mp)) {
4962 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4963 if (!LIST_EMPTY(&owp->nfsow_open)) {
4968 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4969 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4974 *tstp = dp->nfsdl_stateid;
4977 nfscl_cleandeleg(dp);
4978 nfscl_freedeleg(&clp->nfsc_deleg, dp, true);
4987 * Get a reference on the clientid associated with the mount point.
4988 * Return 1 if success, 0 otherwise.
4991 nfscl_getref(struct nfsmount *nmp)
4993 struct nfsclclient *clp;
4997 clp = nfscl_findcl(nmp);
5002 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, nmp->nm_mountp);
5004 if (NFSCL_FORCEDISM(nmp->nm_mountp))
5011 * Release a reference on a clientid acquired with the above call.
5014 nfscl_relref(struct nfsmount *nmp)
5016 struct nfsclclient *clp;
5019 clp = nfscl_findcl(nmp);
5024 nfsv4_relref(&clp->nfsc_lock);
5029 * Save the size attribute in the delegation, since the nfsnode
5033 nfscl_reclaimnode(vnode_t vp)
5035 struct nfsclclient *clp;
5036 struct nfscldeleg *dp;
5037 struct nfsnode *np = VTONFS(vp);
5038 struct nfsmount *nmp;
5040 nmp = VFSTONFS(vp->v_mount);
5041 if (!NFSHASNFSV4(nmp))
5044 clp = nfscl_findcl(nmp);
5049 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5050 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5051 dp->nfsdl_size = np->n_size;
5056 * Get the saved size attribute in the delegation, since it is a
5057 * newly allocated nfsnode.
5060 nfscl_newnode(vnode_t vp)
5062 struct nfsclclient *clp;
5063 struct nfscldeleg *dp;
5064 struct nfsnode *np = VTONFS(vp);
5065 struct nfsmount *nmp;
5067 nmp = VFSTONFS(vp->v_mount);
5068 if (!NFSHASNFSV4(nmp))
5071 clp = nfscl_findcl(nmp);
5076 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5077 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
5078 np->n_size = dp->nfsdl_size;
5083 * If there is a valid write delegation for this file, set the modtime
5084 * to the local clock time.
5087 nfscl_delegmodtime(vnode_t vp)
5089 struct nfsclclient *clp;
5090 struct nfscldeleg *dp;
5091 struct nfsnode *np = VTONFS(vp);
5092 struct nfsmount *nmp;
5094 nmp = VFSTONFS(vp->v_mount);
5095 if (!NFSHASNFSV4(nmp))
5098 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5104 clp = nfscl_findcl(nmp);
5109 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5110 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
5111 nanotime(&dp->nfsdl_modtime);
5112 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
5118 * If there is a valid write delegation for this file with a modtime set,
5119 * put that modtime in mtime.
5122 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
5124 struct nfsclclient *clp;
5125 struct nfscldeleg *dp;
5126 struct nfsnode *np = VTONFS(vp);
5127 struct nfsmount *nmp;
5129 nmp = VFSTONFS(vp->v_mount);
5130 if (!NFSHASNFSV4(nmp))
5133 if ((nmp->nm_privflag & NFSMNTP_DELEGISSUED) == 0) {
5139 clp = nfscl_findcl(nmp);
5144 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5146 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
5147 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
5148 *mtime = dp->nfsdl_modtime;
5153 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
5155 short *defaulterrp, *errp;
5157 if (!nd->nd_repstat)
5159 if (nd->nd_procnum == NFSPROC_NOOP)
5160 return (txdr_unsigned(nd->nd_repstat & 0xffff));
5161 if (nd->nd_repstat == EBADRPC)
5162 return (txdr_unsigned(NFSERR_BADXDR));
5163 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
5164 nd->nd_repstat == NFSERR_OPILLEGAL)
5165 return (txdr_unsigned(nd->nd_repstat));
5166 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
5167 minorvers > NFSV4_MINORVERSION) {
5168 /* NFSv4.n error. */
5169 return (txdr_unsigned(nd->nd_repstat));
5171 if (nd->nd_procnum < NFSV4OP_CBNOPS)
5172 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
5174 return (txdr_unsigned(nd->nd_repstat));
5176 if (*errp == (short)nd->nd_repstat)
5177 return (txdr_unsigned(nd->nd_repstat));
5178 return (txdr_unsigned(*defaulterrp));
5182 * Called to find/add a layout to a client.
5183 * This function returns the layout with a refcnt (shared lock) upon
5184 * success (returns 0) or with no lock/refcnt on the layout when an
5185 * error is returned.
5186 * If a layout is passed in via lypp, it is locked (exclusively locked).
5189 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
5190 nfsv4stateid_t *stateidp, int layouttype, int retonclose,
5191 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
5192 struct ucred *cred, NFSPROC_T *p)
5194 struct nfsclclient *clp;
5195 struct nfscllayout *lyp, *tlyp;
5196 struct nfsclflayout *flp;
5197 struct nfsnode *np = VTONFS(vp);
5199 int layout_passed_in;
5201 mp = nmp->nm_mountp;
5202 layout_passed_in = 1;
5206 layout_passed_in = 0;
5207 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
5214 if (layout_passed_in != 0)
5215 nfsv4_unlock(&lyp->nfsly_lock, 0);
5218 free(tlyp, M_NFSLAYOUT);
5223 * Although no lyp was passed in, another thread might have
5224 * allocated one. If one is found, just increment it's ref
5225 * count and return it.
5227 lyp = nfscl_findlayout(clp, fhp, fhlen);
5231 lyp->nfsly_stateid.seqid = stateidp->seqid;
5232 lyp->nfsly_stateid.other[0] = stateidp->other[0];
5233 lyp->nfsly_stateid.other[1] = stateidp->other[1];
5234 lyp->nfsly_stateid.other[2] = stateidp->other[2];
5235 lyp->nfsly_lastbyte = 0;
5236 LIST_INIT(&lyp->nfsly_flayread);
5237 LIST_INIT(&lyp->nfsly_flayrw);
5238 LIST_INIT(&lyp->nfsly_recall);
5239 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
5240 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
5241 lyp->nfsly_clp = clp;
5242 if (layouttype == NFSLAYOUT_FLEXFILE)
5243 lyp->nfsly_flags = NFSLY_FLEXFILE;
5245 lyp->nfsly_flags = NFSLY_FILES;
5246 if (retonclose != 0)
5247 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5248 lyp->nfsly_fhlen = fhlen;
5249 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
5250 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5251 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
5253 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5255 nfsstatsv1.cllayouts++;
5257 if (retonclose != 0)
5258 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
5259 if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5260 lyp->nfsly_stateid.seqid = stateidp->seqid;
5261 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5262 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5263 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5265 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5266 if (NFSCL_FORCEDISM(mp)) {
5269 free(tlyp, M_NFSLAYOUT);
5273 } else if (stateidp->seqid > lyp->nfsly_stateid.seqid)
5274 lyp->nfsly_stateid.seqid = stateidp->seqid;
5276 /* Merge the new list of File Layouts into the list. */
5277 flp = LIST_FIRST(fhlp);
5279 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
5280 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
5282 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
5284 if (layout_passed_in != 0)
5285 nfsv4_unlock(&lyp->nfsly_lock, 1);
5288 free(tlyp, M_NFSLAYOUT);
5293 * Search for a layout by MDS file handle.
5294 * If one is found, it is returned with a refcnt (shared lock) iff
5295 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
5298 struct nfscllayout *
5299 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
5300 uint64_t off, uint32_t rwaccess, struct nfsclflayout **retflpp,
5303 struct nfscllayout *lyp;
5305 int error, igotlock;
5307 mp = clp->nfsc_nmp->nm_mountp;
5311 lyp = nfscl_findlayout(clp, fhp, fhlen);
5313 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5314 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
5315 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
5316 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
5317 error = nfscl_findlayoutforio(lyp, off, rwaccess,
5320 nfsv4_getref(&lyp->nfsly_lock, NULL,
5321 NFSCLSTATEMUTEXPTR, mp);
5324 igotlock = nfsv4_lock(&lyp->nfsly_lock,
5325 1, NULL, NFSCLSTATEMUTEXPTR, mp);
5326 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
5329 if (NFSCL_FORCEDISM(mp)) {
5343 * Search for a layout by MDS file handle. If one is found, mark in to be
5344 * recalled, if it already marked "return on close".
5347 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
5348 int fhlen, struct nfsclrecalllayout **recallpp, struct nfscllayout **lypp)
5350 struct nfscllayout *lyp;
5354 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vp->v_mount)) ||
5355 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
5356 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
5358 lyp = nfscl_findlayout(clp, fhp, fhlen);
5359 if (lyp != NULL && (lyp->nfsly_flags & NFSLY_RETONCLOSE) != 0) {
5360 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5362 if (!LIST_EMPTY(&lyp->nfsly_flayread))
5363 iomode |= NFSLAYOUTIOMODE_READ;
5364 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5365 iomode |= NFSLAYOUTIOMODE_RW;
5366 nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5367 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL,
5369 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
5373 /* Now, wake up renew thread to do LayoutReturn. */
5380 * Mark the layout to be recalled and with an error.
5381 * Also, disable the dsp from further use.
5384 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp,
5385 struct nfscllayout *lyp, struct nfsclds *dsp)
5387 struct nfsclrecalllayout *recallp;
5390 printf("DS being disabled, error=%d\n", stat);
5391 /* Set up the return of the layout. */
5392 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
5395 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5396 if (!LIST_EMPTY(&lyp->nfsly_flayread))
5397 iomode |= NFSLAYOUTIOMODE_READ;
5398 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5399 iomode |= NFSLAYOUTIOMODE_RW;
5400 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5401 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op,
5402 dp->nfsdi_deviceid, recallp);
5404 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode);
5407 free(recallp, M_NFSLAYRECALL);
5410 /* And shut the TCP connection down. */
5411 nfscl_cancelreqs(dsp);
5415 * Cancel all RPCs for this "dsp" by closing the connection.
5416 * Also, mark the session as defunct.
5417 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and
5418 * cannot be shut down.
5421 nfscl_cancelreqs(struct nfsclds *dsp)
5423 struct __rpc_client *cl;
5424 static int non_event;
5427 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 &&
5428 dsp->nfsclds_sockp != NULL &&
5429 dsp->nfsclds_sockp->nr_client != NULL) {
5430 dsp->nfsclds_flags |= NFSCLDS_CLOSED;
5431 cl = dsp->nfsclds_sockp->nr_client;
5432 dsp->nfsclds_sess.nfsess_defunct = 1;
5436 * This 1sec sleep is done to reduce the number of reconnect
5437 * attempts made on the DS while it has failed.
5439 tsleep(&non_event, PVFS, "ndscls", hz);
5446 * Dereference a layout.
5449 nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
5454 nfsv4_unlock(&lyp->nfsly_lock, 0);
5456 nfsv4_relref(&lyp->nfsly_lock);
5461 * Search for a devinfo by deviceid. If one is found, return it after
5462 * acquiring a reference count on it.
5464 struct nfscldevinfo *
5465 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
5466 struct nfscldevinfo *dip)
5471 dip = nfscl_finddevinfo(clp, deviceid);
5473 dip->nfsdi_refcnt++;
5479 * Dereference a devinfo structure.
5482 nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
5485 dip->nfsdi_refcnt--;
5486 if (dip->nfsdi_refcnt == 0)
5487 wakeup(&dip->nfsdi_refcnt);
5491 * Dereference a devinfo structure.
5494 nfscl_reldevinfo(struct nfscldevinfo *dip)
5498 nfscl_reldevinfo_locked(dip);
5503 * Find a layout for this file handle. Return NULL upon failure.
5505 static struct nfscllayout *
5506 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5508 struct nfscllayout *lyp;
5510 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5511 if (lyp->nfsly_fhlen == fhlen &&
5512 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5518 * Find a devinfo for this deviceid. Return NULL upon failure.
5520 static struct nfscldevinfo *
5521 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5523 struct nfscldevinfo *dip;
5525 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5526 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5533 * Merge the new file layout list into the main one, maintaining it in
5534 * increasing offset order.
5537 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5538 struct nfsclflayouthead *newfhlp)
5540 struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5542 flp = LIST_FIRST(fhlp);
5544 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5545 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5547 flp = LIST_NEXT(flp, nfsfl_list);
5549 if (prevflp == NULL)
5550 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5552 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5558 * Add this nfscldevinfo to the client, if it doesn't already exist.
5559 * This function consumes the structure pointed at by dip, if not NULL.
5562 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind,
5563 struct nfsclflayout *flp)
5565 struct nfsclclient *clp;
5566 struct nfscldevinfo *tdip;
5574 free(dip, M_NFSDEVINFO);
5577 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5578 dev = flp->nfsfl_dev;
5580 dev = flp->nfsfl_ffm[ind].dev;
5581 tdip = nfscl_finddevinfo(clp, dev);
5583 tdip->nfsdi_layoutrefs++;
5584 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5585 flp->nfsfl_devp = tdip;
5587 flp->nfsfl_ffm[ind].devp = tdip;
5588 nfscl_reldevinfo_locked(tdip);
5591 free(dip, M_NFSDEVINFO);
5595 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5596 dip->nfsdi_layoutrefs = 1;
5597 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5598 flp->nfsfl_devp = dip;
5600 flp->nfsfl_ffm[ind].devp = dip;
5609 * Free up a layout structure and associated file layout structure(s).
5612 nfscl_freelayout(struct nfscllayout *layp)
5614 struct nfsclflayout *flp, *nflp;
5615 struct nfsclrecalllayout *rp, *nrp;
5617 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5618 LIST_REMOVE(flp, nfsfl_list);
5619 nfscl_freeflayout(flp);
5621 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5622 LIST_REMOVE(flp, nfsfl_list);
5623 nfscl_freeflayout(flp);
5625 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5626 LIST_REMOVE(rp, nfsrecly_list);
5627 free(rp, M_NFSLAYRECALL);
5630 nfsstatsv1.cllayouts--;
5631 free(layp, M_NFSLAYOUT);
5635 * Free up a file layout structure.
5638 nfscl_freeflayout(struct nfsclflayout *flp)
5642 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) {
5643 for (i = 0; i < flp->nfsfl_fhcnt; i++)
5644 free(flp->nfsfl_fh[i], M_NFSFH);
5645 if (flp->nfsfl_devp != NULL)
5646 flp->nfsfl_devp->nfsdi_layoutrefs--;
5648 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0)
5649 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) {
5650 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++)
5651 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH);
5652 if (flp->nfsfl_ffm[i].devp != NULL)
5653 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--;
5655 free(flp, M_NFSFLAYOUT);
5659 * Free up a file layout devinfo structure.
5662 nfscl_freedevinfo(struct nfscldevinfo *dip)
5665 free(dip, M_NFSDEVINFO);
5669 * Mark any layouts that match as recalled.
5672 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5673 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op,
5674 char *devid, struct nfsclrecalllayout *recallp)
5676 struct nfsclrecalllayout *rp, *orp;
5678 recallp->nfsrecly_recalltype = recalltype;
5679 recallp->nfsrecly_iomode = iomode;
5680 recallp->nfsrecly_stateseqid = stateseqid;
5681 recallp->nfsrecly_off = off;
5682 recallp->nfsrecly_len = len;
5683 recallp->nfsrecly_stat = stat;
5684 recallp->nfsrecly_op = op;
5686 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID);
5688 * Order the list as file returns first, followed by fsid and any
5689 * returns, both in increasing stateseqid order.
5690 * Note that the seqids wrap around, so 1 is after 0xffffffff.
5691 * (I'm not sure this is correct because I find RFC5661 confusing
5692 * on this, but hopefully it will work ok.)
5695 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5697 if ((recalltype == NFSLAYOUTRETURN_FILE &&
5698 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5699 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5700 (recalltype != NFSLAYOUTRETURN_FILE &&
5701 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5702 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5703 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5708 * Put any error return on all the file returns that will
5711 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE &&
5712 stat != 0 && rp->nfsrecly_stat == 0) {
5713 rp->nfsrecly_stat = stat;
5714 rp->nfsrecly_op = op;
5716 NFSBCOPY(devid, rp->nfsrecly_devid,
5722 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5725 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5727 lyp->nfsly_flags |= NFSLY_RECALL;
5728 wakeup(lyp->nfsly_clp);
5733 * Compare the two seqids for ordering. The trick is that the seqids can
5734 * wrap around from 0xffffffff->0, so check for the cases where one
5735 * has wrapped around.
5736 * Return 1 if seqid1 comes before seqid2, 0 otherwise.
5739 nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5742 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5743 /* seqid2 has wrapped around. */
5745 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5746 /* seqid1 has wrapped around. */
5748 if (seqid1 <= seqid2)
5754 * Do a layout return for each of the recalls.
5757 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5758 struct ucred *cred, NFSPROC_T *p)
5760 struct nfsclrecalllayout *rp;
5761 nfsv4stateid_t stateid;
5764 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5765 stateid.seqid = lyp->nfsly_stateid.seqid;
5766 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5767 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5769 layouttype = NFSLAYOUT_FLEXFILE;
5770 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5771 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5772 lyp->nfsly_fhlen, 0, layouttype,
5773 rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5774 rp->nfsrecly_off, rp->nfsrecly_len,
5775 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op,
5776 rp->nfsrecly_devid);
5781 * Do the layout commit for a file layout.
5784 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5785 struct ucred *cred, NFSPROC_T *p)
5787 struct nfsclflayout *flp;
5789 int error, layouttype;
5791 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5792 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5794 layouttype = NFSLAYOUT_FLEXFILE;
5795 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5796 if (layouttype == NFSLAYOUT_FLEXFILE &&
5797 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) {
5798 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n");
5799 /* If not supported, don't bother doing it. */
5801 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5804 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5805 len = flp->nfsfl_end - flp->nfsfl_off;
5806 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5807 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5808 lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5809 layouttype, cred, p);
5810 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5811 if (error == NFSERR_NOTSUPP) {
5812 /* If not supported, don't bother doing it. */
5814 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5823 * Commit all layouts for a file (vnode).
5826 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5828 struct nfsclclient *clp;
5829 struct nfscllayout *lyp;
5830 struct nfsnode *np = VTONFS(vp);
5832 struct nfsmount *nmp;
5836 if (NFSHASNOLAYOUTCOMMIT(nmp))
5844 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5849 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5850 if (NFSCL_FORCEDISM(mp)) {
5855 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5856 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5858 NFSCL_DEBUG(4, "do layoutcommit2\n");
5859 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5863 nfsv4_relref(&lyp->nfsly_lock);