2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Rick Macklem, University of Guelph
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * These functions implement the client side state handling for NFSv4.
35 * NFSv4 state handling:
36 * - A lockowner is used to determine lock contention, so it
37 * corresponds directly to a Posix pid. (1 to 1 mapping)
38 * - The correct granularity of an OpenOwner is not nearly so
39 * obvious. An OpenOwner does the following:
40 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
41 * - is used to check for Open/Share contention (not applicable to
42 * this client, since all Opens are Deny_None)
43 * As such, I considered both extreme.
44 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
45 * all Open, Close and Lock (with a new lockowner) Ops.
46 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
47 * every Open, for most servers.
48 * So, I chose to use the same mapping as I did for LockOwnwers.
49 * The main concern here is that you can end up with multiple Opens
50 * for the same File Handle, but on different OpenOwners (opens
51 * inherited from parents, grandparents...) and you do not know
52 * which of these the vnodeop close applies to. This is handled by
53 * delaying the Close Op(s) until all of the Opens have been closed.
54 * (It is not yet obvious if this is the correct granularity.)
55 * - How the code handles serialization:
56 * - For the ClientId, it uses an exclusive lock while getting its
57 * SetClientId and during recovery. Otherwise, it uses a shared
58 * lock via a reference count.
59 * - For the rest of the data structures, it uses an SMP mutex
60 * (once the nfs client is SMP safe) and doesn't sleep while
61 * manipulating the linked lists.
62 * - The serialization of Open/Close/Lock/LockU falls out in the
63 * "wash", since OpenOwners and LockOwners are both mapped from
64 * Posix pid. In other words, there is only one Posix pid using
65 * any given owner, so that owner is serialized. (If you change
66 * the granularity of the OpenOwner, then code must be added to
67 * serialize Ops on the OpenOwner.)
68 * - When to get rid of OpenOwners and LockOwners.
69 * - The function nfscl_cleanup_common() is executed after a process exits.
70 * It goes through the client list looking for all Open and Lock Owners.
71 * When one is found, it is marked "defunct" or in the case of
72 * an OpenOwner without any Opens, freed.
73 * The renew thread scans for defunct Owners and gets rid of them,
74 * if it can. The LockOwners will also be deleted when the
75 * associated Open is closed.
76 * - If the LockU or Close Op(s) fail during close in a way
77 * that could be recovered upon retry, they are relinked to the
78 * ClientId's defunct open list and retried by the renew thread
79 * until they succeed or an unmount/recovery occurs.
80 * (Since we are done with them, they do not need to be recovered.)
84 #include <fs/nfs/nfsport.h>
89 extern struct nfsstatsv1 nfsstatsv1;
90 extern struct nfsreqhead nfsd_reqq;
91 extern u_int32_t newnfs_false, newnfs_true;
92 extern int nfscl_debuglevel;
93 extern int nfscl_enablecallb;
94 extern int nfs_numnfscbd;
98 struct nfsclhead nfsclhead; /* Head of clientid list */
99 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
100 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER;
101 #endif /* !APPLEKEXT */
103 static int nfscl_delegcnt = 0;
104 static int nfscl_layoutcnt = 0;
105 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
106 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **);
107 static void nfscl_clrelease(struct nfsclclient *);
108 static void nfscl_cleanclient(struct nfsclclient *);
109 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
110 struct ucred *, NFSPROC_T *);
111 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
112 struct nfsmount *, struct ucred *, NFSPROC_T *);
113 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
114 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
115 struct nfscllock *, int);
116 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
117 struct nfscllock **, int);
118 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
119 static u_int32_t nfscl_nextcbident(void);
120 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
121 static struct nfsclclient *nfscl_getclnt(u_int32_t);
122 static struct nfsclclient *nfscl_getclntsess(uint8_t *);
123 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
125 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
126 int, struct nfsclrecalllayout **);
127 static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
128 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
130 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
131 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
132 u_int8_t *, struct nfscllock **);
133 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
134 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
135 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
136 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
137 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
138 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
139 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
140 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
141 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
142 static void nfscl_totalrecall(struct nfsclclient *);
143 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
144 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
145 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
146 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
147 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
148 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
149 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
150 struct ucred *, NFSPROC_T *);
151 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
152 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
153 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
154 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
155 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
156 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
157 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int);
158 static void nfscl_freeopenowner(struct nfsclowner *, int);
159 static void nfscl_cleandeleg(struct nfscldeleg *);
160 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
161 struct nfsmount *, NFSPROC_T *);
162 static void nfscl_emptylockowner(struct nfscllockowner *,
163 struct nfscllockownerfhhead *);
164 static void nfscl_mergeflayouts(struct nfsclflayouthead *,
165 struct nfsclflayouthead *);
166 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
167 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *);
168 static int nfscl_seq(uint32_t, uint32_t);
169 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
170 struct ucred *, NFSPROC_T *);
171 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
172 struct ucred *, NFSPROC_T *);
174 static short nfscberr_null[] = {
179 static short nfscberr_getattr[] = {
188 static short nfscberr_recall[] = {
198 static short *nfscl_cberrmap[] = {
206 #define NETFAMILY(clp) \
207 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
210 * Called for an open operation.
211 * If the nfhp argument is NULL, just get an openowner.
214 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
215 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
216 struct nfsclopen **opp, int *newonep, int *retp, int lockit)
218 struct nfsclclient *clp;
219 struct nfsclowner *owp, *nowp;
220 struct nfsclopen *op = NULL, *nop = NULL;
221 struct nfscldeleg *dp;
222 struct nfsclownerhead *ohp;
223 u_int8_t own[NFSV4CL_LOCKNAMELEN];
234 * Might need one or both of these, so MALLOC them now, to
235 * avoid a tsleep() in MALLOC later.
237 nowp = malloc(sizeof (struct nfsclowner),
238 M_NFSCLOWNER, M_WAITOK);
240 nop = malloc(sizeof (struct nfsclopen) +
241 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
242 ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
244 free(nowp, M_NFSCLOWNER);
246 free(nop, M_NFSCLOPEN);
251 * Get the Open iff it already exists.
252 * If none found, add the new one or return error, depending upon
257 /* First check the delegation list */
258 if (nfhp != NULL && usedeleg) {
259 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
260 if (dp->nfsdl_fhlen == fhlen &&
261 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
262 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
263 (dp->nfsdl_flags & NFSCLDL_WRITE))
272 nfscl_filllockowner(p->td_proc, own, F_POSIX);
273 ohp = &dp->nfsdl_owner;
275 /* For NFSv4.1 and this option, use a single open_owner. */
276 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
277 nfscl_filllockowner(NULL, own, F_POSIX);
279 nfscl_filllockowner(p->td_proc, own, F_POSIX);
280 ohp = &clp->nfsc_owner;
282 /* Now, search for an openowner */
283 LIST_FOREACH(owp, ohp, nfsow_list) {
284 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
289 * Create a new open, as required.
291 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
295 * Now, check the mode on the open and return the appropriate
299 if (nfhp != NULL && dp != NULL && nop == NULL)
300 /* new local open on delegation */
301 *retp = NFSCLOPEN_SETCRED;
303 *retp = NFSCLOPEN_OK;
305 if (op != NULL && (amode & ~(op->nfso_mode))) {
306 op->nfso_mode |= amode;
307 if (retp != NULL && dp == NULL)
308 *retp = NFSCLOPEN_DOOPEN;
312 * Serialize modifications to the open owner for multiple threads
313 * within the same process using a read/write sleep lock.
314 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
315 * by acquiring a shared lock. The close operations still use an
316 * exclusive lock for this case.
319 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) {
321 * Get a shared lock on the OpenOwner, but first
322 * wait for any pending exclusive lock, so that the
323 * exclusive locker gets priority.
325 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
326 NFSCLSTATEMUTEXPTR, NULL);
327 nfsv4_getref(&owp->nfsow_rwlock, NULL,
328 NFSCLSTATEMUTEXPTR, NULL);
330 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
334 free(nowp, M_NFSCLOWNER);
336 free(nop, M_NFSCLOPEN);
345 * Create a new open, as required.
348 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
349 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
350 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
351 struct ucred *cred, int *newonep)
353 struct nfsclowner *owp = *owpp, *nowp;
354 struct nfsclopen *op, *nop;
364 if (owp == NULL && nowp != NULL) {
365 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
366 LIST_INIT(&nowp->nfsow_open);
367 nowp->nfsow_clp = clp;
368 nowp->nfsow_seqid = 0;
369 nowp->nfsow_defunct = 0;
370 nfscl_lockinit(&nowp->nfsow_rwlock);
372 nfsstatsv1.cllocalopenowners++;
373 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
375 nfsstatsv1.clopenowners++;
376 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
384 /* If an fhp has been specified, create an Open as well. */
386 /* and look for the correct open, based upon FH */
387 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
388 if (op->nfso_fhlen == fhlen &&
389 !NFSBCMP(op->nfso_fh, fhp, fhlen))
392 if (op == NULL && nop != NULL) {
395 nop->nfso_opencnt = 0;
396 nop->nfso_posixlock = 1;
397 nop->nfso_fhlen = fhlen;
398 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
399 LIST_INIT(&nop->nfso_lock);
400 nop->nfso_stateid.seqid = 0;
401 nop->nfso_stateid.other[0] = 0;
402 nop->nfso_stateid.other[1] = 0;
403 nop->nfso_stateid.other[2] = 0;
404 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
405 newnfs_copyincred(cred, &nop->nfso_cred);
407 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
408 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
410 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
411 nfsstatsv1.cllocalopens++;
413 nfsstatsv1.clopens++;
415 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
427 * Called to find/add a delegation to a client.
430 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
431 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
433 struct nfscldeleg *dp = *dpp, *tdp;
436 * First, if we have received a Read delegation for a file on a
437 * read/write file system, just return it, because they aren't
440 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
441 (dp->nfsdl_flags & NFSCLDL_READ)) {
442 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
443 free(dp, M_NFSCLDELEG);
448 /* Look for the correct deleg, based upon FH */
450 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
454 return (NFSERR_BADSTATEID);
457 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
458 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
460 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
461 nfsstatsv1.cldelegates++;
465 * Delegation already exists, what do we do if a new one??
468 printf("Deleg already exists!\n");
469 free(dp, M_NFSCLDELEG);
480 * Find a delegation for this file handle. Return NULL upon failure.
482 static struct nfscldeleg *
483 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
485 struct nfscldeleg *dp;
487 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
488 if (dp->nfsdl_fhlen == fhlen &&
489 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
496 * Get a stateid for an I/O operation. First, look for an open and iff
497 * found, return either a lockowner stateid or the open stateid.
498 * If no Open is found, just return error and the special stateid of all zeros.
501 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
502 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
505 struct nfsclclient *clp;
506 struct nfsclowner *owp;
507 struct nfsclopen *op = NULL, *top;
508 struct nfscllockowner *lp;
509 struct nfscldeleg *dp;
511 struct nfsmount *nmp;
512 u_int8_t own[NFSV4CL_LOCKNAMELEN];
517 * Initially, just set the special stateid of all zeros.
518 * (Don't do this for a DS, since the special stateid can't be used.)
522 stateidp->other[0] = 0;
523 stateidp->other[1] = 0;
524 stateidp->other[2] = 0;
526 if (vnode_vtype(vp) != VREG)
529 nmp = VFSTONFS(vnode_mount(vp));
531 clp = nfscl_findcl(nmp);
538 * Wait for recovery to complete.
540 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
541 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
542 PZERO, "nfsrecvr", NULL);
545 * First, look for a delegation.
547 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
548 if (dp->nfsdl_fhlen == fhlen &&
549 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
550 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
551 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
552 stateidp->seqid = dp->nfsdl_stateid.seqid;
553 stateidp->other[0] = dp->nfsdl_stateid.other[0];
554 stateidp->other[1] = dp->nfsdl_stateid.other[1];
555 stateidp->other[2] = dp->nfsdl_stateid.other[2];
556 if (!(np->n_flag & NDELEGRECALL)) {
557 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
559 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
561 dp->nfsdl_timestamp = NFSD_MONOSEC +
563 dp->nfsdl_rwlock.nfslock_usecnt++;
564 *lckpp = (void *)&dp->nfsdl_rwlock;
575 * If p != NULL, we want to search the parentage tree
576 * for a matching OpenOwner and use that.
578 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
579 nfscl_filllockowner(NULL, own, F_POSIX);
581 nfscl_filllockowner(p->td_proc, own, F_POSIX);
583 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own,
585 if (error == 0 && lp != NULL && fords == 0) {
586 /* Don't return a lock stateid for a DS. */
588 lp->nfsl_stateid.seqid;
590 lp->nfsl_stateid.other[0];
592 lp->nfsl_stateid.other[1];
594 lp->nfsl_stateid.other[2];
600 /* If not found, just look for any OpenOwner that will work. */
603 owp = LIST_FIRST(&clp->nfsc_owner);
604 while (!done && owp != NULL) {
605 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
606 if (op->nfso_fhlen == fhlen &&
607 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
608 if (top == NULL && (op->nfso_mode &
609 NFSV4OPEN_ACCESSWRITE) != 0 &&
610 (mode & NFSV4OPEN_ACCESSREAD) != 0)
612 if ((mode & op->nfso_mode) == mode) {
619 owp = LIST_NEXT(owp, nfsow_list);
622 NFSCL_DEBUG(2, "openmode top=%p\n", top);
623 if (top == NULL || NFSHASOPENMODE(nmp)) {
630 * For read aheads or write behinds, use the open cred.
631 * A read ahead or write behind is indicated by p == NULL.
634 newnfs_copycred(&op->nfso_cred, cred);
638 * No lock stateid, so return the open stateid.
640 stateidp->seqid = op->nfso_stateid.seqid;
641 stateidp->other[0] = op->nfso_stateid.other[0];
642 stateidp->other[1] = op->nfso_stateid.other[1];
643 stateidp->other[2] = op->nfso_stateid.other[2];
649 * Search for a matching file, mode and, optionally, lockowner.
652 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
653 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode,
654 struct nfscllockowner **lpp, struct nfsclopen **opp)
656 struct nfsclowner *owp;
657 struct nfsclopen *op, *rop, *rop2;
658 struct nfscllockowner *lp;
664 * rop will be set to the open to be returned. There are three
665 * variants of this, all for an open of the correct file:
666 * 1 - A match of lockown.
667 * 2 - A match of the openown, when no lockown match exists.
668 * 3 - A match for any open, if no openown or lockown match exists.
669 * Looking for #2 over #3 probably isn't necessary, but since
670 * RFC3530 is vague w.r.t. the relationship between openowners and
671 * lockowners, I think this is the safer way to go.
676 /* Search the client list */
677 owp = LIST_FIRST(ohp);
678 while (owp != NULL && keep_looping != 0) {
679 /* and look for the correct open */
680 op = LIST_FIRST(&owp->nfsow_open);
681 while (op != NULL && keep_looping != 0) {
682 if (op->nfso_fhlen == fhlen &&
683 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
684 && (op->nfso_mode & mode) == mode) {
686 /* Now look for a matching lockowner. */
687 LIST_FOREACH(lp, &op->nfso_lock,
689 if (!NFSBCMP(lp->nfsl_owner,
691 NFSV4CL_LOCKNAMELEN)) {
699 if (rop == NULL && !NFSBCMP(owp->nfsow_owner,
700 openown, NFSV4CL_LOCKNAMELEN)) {
708 op = LIST_NEXT(op, nfso_list);
710 owp = LIST_NEXT(owp, nfsow_list);
721 * Release use of an open owner. Called when open operations are done
722 * with the open owner.
725 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
726 __unused int error, __unused int candelete, int unlocked)
733 if (NFSHASONEOPENOWN(nmp))
734 nfsv4_relref(&owp->nfsow_rwlock);
736 nfscl_lockunlock(&owp->nfsow_rwlock);
738 nfscl_clrelease(owp->nfsow_clp);
743 * Release use of an open structure under an open owner.
746 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
749 struct nfsclclient *clp;
750 struct nfsclowner *owp;
756 if (NFSHASONEOPENOWN(nmp))
757 nfsv4_relref(&owp->nfsow_rwlock);
759 nfscl_lockunlock(&owp->nfsow_rwlock);
760 clp = owp->nfsow_clp;
761 if (error && candelete && op->nfso_opencnt == 0)
762 nfscl_freeopen(op, 0);
763 nfscl_clrelease(clp);
768 * Called to get a clientid structure. It will optionally lock the
769 * client data structures to do the SetClientId/SetClientId_confirm,
770 * but will release that lock and return the clientid with a reference
772 * If the "cred" argument is NULL, a new clientid should not be created.
773 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
775 * The start_renewthread argument tells nfscl_getcl() to start a renew
776 * thread if this creates a new clp.
777 * It always clpp with a reference count on it, unless returning an error.
780 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
781 int start_renewthread, struct nfsclclient **clpp)
783 struct nfsclclient *clp;
784 struct nfsclclient *newclp = NULL;
785 struct nfsmount *nmp;
786 char uuid[HOSTUUIDLEN];
787 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
792 getcredhostuuid(cred, uuid, sizeof uuid);
793 idlen = strlen(uuid);
795 idlen += sizeof (u_int64_t);
797 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
799 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
804 * If a forced dismount is already in progress, don't
805 * allocate a new clientid and get out now. For the case where
806 * clp != NULL, this is a harmless optimization.
808 if (NFSCL_FORCEDISM(mp)) {
811 free(newclp, M_NFSCLCLIENT);
816 if (newclp == NULL) {
821 clp->nfsc_idlen = idlen;
822 LIST_INIT(&clp->nfsc_owner);
823 TAILQ_INIT(&clp->nfsc_deleg);
824 TAILQ_INIT(&clp->nfsc_layout);
825 LIST_INIT(&clp->nfsc_devinfo);
826 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
827 LIST_INIT(&clp->nfsc_deleghash[i]);
828 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
829 LIST_INIT(&clp->nfsc_layouthash[i]);
830 clp->nfsc_flags = NFSCLFLAGS_INITED;
831 clp->nfsc_clientidrev = 1;
832 clp->nfsc_cbident = nfscl_nextcbident();
833 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
835 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
839 if (start_renewthread != 0)
840 nfscl_start_renewthread(clp);
844 free(newclp, M_NFSCLCLIENT);
847 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
848 !NFSCL_FORCEDISM(mp))
849 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
850 NFSCLSTATEMUTEXPTR, mp);
853 * Call nfsv4_lock() with "iwantlock == 0" so that it will
854 * wait for a pending exclusive lock request. This gives the
855 * exclusive lock request priority over this shared lock
857 * An exclusive lock on nfsc_lock is used mainly for server
860 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp);
861 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
863 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
865 * Both nfsv4_lock() and nfsv4_getref() know to check
866 * for NFSCL_FORCEDISM() and return without sleeping to
867 * wait for the exclusive lock to be released, since it
868 * might be held by nfscl_umount() and we need to get out
869 * now for that case and not wait until nfscl_umount()
878 * If it needs a clientid, do the setclientid now.
880 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
882 panic("nfscl_clget");
883 if (p == NULL || cred == NULL) {
885 nfsv4_unlock(&clp->nfsc_lock, 0);
890 * If RFC3530 Sec. 14.2.33 is taken literally,
891 * NFSERR_CLIDINUSE will be returned persistently for the
892 * case where a new mount of the same file system is using
893 * a different principal. In practice, NFSERR_CLIDINUSE is
894 * only returned when there is outstanding unexpired state
895 * on the clientid. As such, try for twice the lease
896 * interval, if we know what that is. Otherwise, make a
898 * The case of returning NFSERR_STALECLIENTID is far less
899 * likely, but might occur if there is a significant delay
900 * between doing the SetClientID and SetClientIDConfirm Ops,
901 * such that the server throws away the clientid before
902 * receiving the SetClientIDConfirm.
904 if (clp->nfsc_renew > 0)
905 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
907 clidinusedelay = 120;
910 error = nfsrpc_setclient(nmp, clp, 0, cred, p);
911 if (error == NFSERR_STALECLIENTID ||
912 error == NFSERR_STALEDONTRECOVER ||
913 error == NFSERR_BADSESSION ||
914 error == NFSERR_CLIDINUSE) {
915 (void) nfs_catnap(PZERO, error, "nfs_setcl");
917 } while (((error == NFSERR_STALECLIENTID ||
918 error == NFSERR_BADSESSION ||
919 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
920 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
923 nfsv4_unlock(&clp->nfsc_lock, 0);
927 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
931 nfsv4_unlock(&clp->nfsc_lock, 1);
940 * Get a reference to a clientid and return it, if valid.
942 APPLESTATIC struct nfsclclient *
943 nfscl_findcl(struct nfsmount *nmp)
945 struct nfsclclient *clp;
948 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
954 * Release the clientid structure. It may be locked or reference counted.
957 nfscl_clrelease(struct nfsclclient *clp)
960 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
961 nfsv4_unlock(&clp->nfsc_lock, 0);
963 nfsv4_relref(&clp->nfsc_lock);
967 * External call for nfscl_clrelease.
970 nfscl_clientrelease(struct nfsclclient *clp)
974 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
975 nfsv4_unlock(&clp->nfsc_lock, 0);
977 nfsv4_relref(&clp->nfsc_lock);
982 * Called when wanting to lock a byte region.
985 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
986 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
987 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
988 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
990 struct nfscllockowner *lp;
991 struct nfsclopen *op;
992 struct nfsclclient *clp;
993 struct nfscllockowner *nlp;
994 struct nfscllock *nlop, *otherlop;
995 struct nfscldeleg *dp = NULL, *ldp = NULL;
996 struct nfscllockownerhead *lhp = NULL;
998 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
1000 int error = 0, ret, donelocally = 0;
1003 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1012 * Might need these, so MALLOC them now, to
1013 * avoid a tsleep() in MALLOC later.
1016 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1018 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1020 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1021 nlop->nfslo_type = type;
1022 nlop->nfslo_first = off;
1023 if (len == NFS64BITSSET) {
1024 nlop->nfslo_end = NFS64BITSSET;
1026 nlop->nfslo_end = off + len;
1027 if (nlop->nfslo_end <= nlop->nfslo_first)
1028 error = NFSERR_INVAL;
1035 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
1038 free(nlp, M_NFSCLLOCKOWNER);
1039 free(otherlop, M_NFSCLLOCK);
1040 free(nlop, M_NFSCLLOCK);
1047 openownp = ropenownp;
1049 nfscl_filllockowner(id, own, flags);
1051 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
1052 nfscl_filllockowner(NULL, openown, F_POSIX);
1054 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1060 * First, search for a delegation. If one exists for this file,
1061 * the lock can be done locally against it, so long as there
1062 * isn't a local lock conflict.
1064 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1065 np->n_fhp->nfh_len);
1066 /* Just sanity check for correct type of delegation */
1067 if (dp != NULL && ((dp->nfsdl_flags &
1068 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1070 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1074 /* Now, find an open and maybe a lockowner. */
1075 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
1076 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1078 ret = nfscl_getopen(&clp->nfsc_owner,
1079 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1080 ownp, mode, NULL, &op);
1082 lhp = &dp->nfsdl_lock;
1083 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1084 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1085 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1093 * Get the related Open and maybe lockowner.
1095 error = nfscl_getopen(&clp->nfsc_owner,
1096 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1097 ownp, mode, &lp, &op);
1099 lhp = &op->nfso_lock;
1101 if (!error && !recovery)
1102 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1103 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1106 nfscl_clrelease(clp);
1109 free(nlp, M_NFSCLLOCKOWNER);
1110 free(otherlop, M_NFSCLLOCK);
1111 free(nlop, M_NFSCLLOCK);
1116 * Ok, see if a lockowner exists and create one, as required.
1119 LIST_FOREACH(lp, lhp, nfsl_list) {
1120 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1124 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1126 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1127 NFSV4CL_LOCKNAMELEN);
1129 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1130 NFSV4CL_LOCKNAMELEN);
1131 nlp->nfsl_seqid = 0;
1132 nlp->nfsl_lockflags = flags;
1133 nlp->nfsl_inprog = NULL;
1134 nfscl_lockinit(&nlp->nfsl_rwlock);
1135 LIST_INIT(&nlp->nfsl_lock);
1137 nlp->nfsl_open = NULL;
1138 nfsstatsv1.cllocallockowners++;
1140 nlp->nfsl_open = op;
1141 nfsstatsv1.cllockowners++;
1143 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1150 * Now, update the byte ranges for locks.
1152 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1158 nfscl_clrelease(clp);
1161 * Serial modifications on the lock owner for multiple threads
1162 * for the same process using a read/write lock.
1165 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1171 free(nlp, M_NFSCLLOCKOWNER);
1173 free(nlop, M_NFSCLLOCK);
1175 free(otherlop, M_NFSCLLOCK);
1182 * Called to unlock a byte range, for LockU.
1185 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1186 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1187 struct nfsclclient *clp, void *id, int flags,
1188 struct nfscllockowner **lpp, int *dorpcp)
1190 struct nfscllockowner *lp;
1191 struct nfsclowner *owp;
1192 struct nfsclopen *op;
1193 struct nfscllock *nlop, *other_lop = NULL;
1194 struct nfscldeleg *dp;
1196 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1204 * Might need these, so MALLOC them now, to
1205 * avoid a tsleep() in MALLOC later.
1208 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1209 nlop->nfslo_type = F_UNLCK;
1210 nlop->nfslo_first = off;
1211 if (len == NFS64BITSSET) {
1212 nlop->nfslo_end = NFS64BITSSET;
1214 nlop->nfslo_end = off + len;
1215 if (nlop->nfslo_end <= nlop->nfslo_first) {
1216 free(nlop, M_NFSCLLOCK);
1217 return (NFSERR_INVAL);
1222 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1225 nfscl_filllockowner(id, own, flags);
1229 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1230 np->n_fhp->nfh_len);
1233 * First, unlock any local regions on a delegation.
1236 /* Look for this lockowner. */
1237 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1238 if (!NFSBCMP(lp->nfsl_owner, own,
1239 NFSV4CL_LOCKNAMELEN))
1243 /* Use other_lop, so nlop is still available */
1244 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1248 * Now, find a matching open/lockowner that hasn't already been done,
1249 * as marked by nfsl_inprog.
1253 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1254 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1255 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1256 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1257 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1258 if (lp->nfsl_inprog == NULL &&
1259 !NFSBCMP(lp->nfsl_owner, own,
1260 NFSV4CL_LOCKNAMELEN)) {
1274 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1278 * Serial modifications on the lock owner for multiple
1279 * threads for the same process using a read/write lock.
1281 lp->nfsl_inprog = p;
1282 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1287 free(nlop, M_NFSCLLOCK);
1289 free(other_lop, M_NFSCLLOCK);
1294 * Release all lockowners marked in progess for this process and file.
1297 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1298 void *id, int flags)
1300 struct nfsclowner *owp;
1301 struct nfsclopen *op;
1302 struct nfscllockowner *lp;
1304 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1307 nfscl_filllockowner(id, own, flags);
1309 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1310 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1311 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1312 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1313 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1314 if (lp->nfsl_inprog == p &&
1315 !NFSBCMP(lp->nfsl_owner, own,
1316 NFSV4CL_LOCKNAMELEN)) {
1317 lp->nfsl_inprog = NULL;
1318 nfscl_lockunlock(&lp->nfsl_rwlock);
1324 nfscl_clrelease(clp);
1329 * Called to find out if any bytes within the byte range specified are
1330 * write locked by the calling process. Used to determine if flushing
1331 * is required before a LockU.
1332 * If in doubt, return 1, so the flush will occur.
1335 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1336 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1338 struct nfsclowner *owp;
1339 struct nfscllockowner *lp;
1340 struct nfsclopen *op;
1341 struct nfsclclient *clp;
1342 struct nfscllock *lop;
1343 struct nfscldeleg *dp;
1346 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1350 switch (fl->l_whence) {
1354 * Caller is responsible for adding any necessary offset
1355 * when SEEK_CUR is used.
1360 off = np->n_size + fl->l_start;
1365 if (fl->l_len != 0) {
1366 end = off + fl->l_len;
1373 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
1376 nfscl_filllockowner(id, own, flags);
1380 * First check the delegation locks.
1382 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1384 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1385 if (!NFSBCMP(lp->nfsl_owner, own,
1386 NFSV4CL_LOCKNAMELEN))
1390 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1391 if (lop->nfslo_first >= end)
1393 if (lop->nfslo_end <= off)
1395 if (lop->nfslo_type == F_WRLCK) {
1396 nfscl_clrelease(clp);
1405 * Now, check state against the server.
1407 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1408 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1409 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1410 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1411 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1412 if (!NFSBCMP(lp->nfsl_owner, own,
1413 NFSV4CL_LOCKNAMELEN))
1417 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1418 if (lop->nfslo_first >= end)
1420 if (lop->nfslo_end <= off)
1422 if (lop->nfslo_type == F_WRLCK) {
1423 nfscl_clrelease(clp);
1432 nfscl_clrelease(clp);
1438 * Release a byte range lock owner structure.
1441 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1443 struct nfsclclient *clp;
1448 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1449 if (error != 0 && candelete &&
1450 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1451 nfscl_freelockowner(lp, 0);
1453 nfscl_lockunlock(&lp->nfsl_rwlock);
1454 nfscl_clrelease(clp);
1459 * Free up an open structure and any associated byte range lock structures.
1462 nfscl_freeopen(struct nfsclopen *op, int local)
1465 LIST_REMOVE(op, nfso_list);
1466 nfscl_freealllocks(&op->nfso_lock, local);
1467 free(op, M_NFSCLOPEN);
1469 nfsstatsv1.cllocalopens--;
1471 nfsstatsv1.clopens--;
1475 * Free up all lock owners and associated locks.
1478 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1480 struct nfscllockowner *lp, *nlp;
1482 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1483 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1485 nfscl_freelockowner(lp, local);
1490 * Called for an Open when NFSERR_EXPIRED is received from the server.
1491 * If there are no byte range locks nor a Share Deny lost, try to do a
1492 * fresh Open. Otherwise, free the open.
1495 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1496 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1498 struct nfscllockowner *lp;
1499 struct nfscldeleg *dp;
1500 int mustdelete = 0, error;
1503 * Look for any byte range lock(s).
1505 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1506 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1513 * If no byte range lock(s) nor a Share deny, try to re-open.
1515 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1516 newnfs_copycred(&op->nfso_cred, cred);
1518 error = nfsrpc_reopen(nmp, op->nfso_fh,
1519 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1523 free(dp, M_NFSCLDELEG);
1528 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1529 op->nfso_fhlen, cred, p, &dp);
1533 * If a byte range lock or Share deny or couldn't re-open, free it.
1536 nfscl_freeopen(op, 0);
1537 return (mustdelete);
1541 * Free up an open owner structure.
1544 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1547 LIST_REMOVE(owp, nfsow_list);
1548 free(owp, M_NFSCLOWNER);
1550 nfsstatsv1.cllocalopenowners--;
1552 nfsstatsv1.clopenowners--;
1556 * Free up a byte range lock owner structure.
1559 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1561 struct nfscllock *lop, *nlop;
1563 LIST_REMOVE(lp, nfsl_list);
1564 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1565 nfscl_freelock(lop, local);
1567 free(lp, M_NFSCLLOCKOWNER);
1569 nfsstatsv1.cllocallockowners--;
1571 nfsstatsv1.cllockowners--;
1575 * Free up a byte range lock structure.
1578 nfscl_freelock(struct nfscllock *lop, int local)
1581 LIST_REMOVE(lop, nfslo_list);
1582 free(lop, M_NFSCLLOCK);
1584 nfsstatsv1.cllocallocks--;
1586 nfsstatsv1.cllocks--;
1590 * Clean out the state related to a delegation.
1593 nfscl_cleandeleg(struct nfscldeleg *dp)
1595 struct nfsclowner *owp, *nowp;
1596 struct nfsclopen *op;
1598 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1599 op = LIST_FIRST(&owp->nfsow_open);
1601 if (LIST_NEXT(op, nfso_list) != NULL)
1602 panic("nfscleandel");
1603 nfscl_freeopen(op, 1);
1605 nfscl_freeopenowner(owp, 1);
1607 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1611 * Free a delegation.
1614 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1617 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1618 LIST_REMOVE(dp, nfsdl_hash);
1619 free(dp, M_NFSCLDELEG);
1620 nfsstatsv1.cldelegates--;
1625 * Free up all state related to this client structure.
1628 nfscl_cleanclient(struct nfsclclient *clp)
1630 struct nfsclowner *owp, *nowp;
1631 struct nfsclopen *op, *nop;
1632 struct nfscllayout *lyp, *nlyp;
1633 struct nfscldevinfo *dip, *ndip;
1635 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1636 nfscl_freelayout(lyp);
1638 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip)
1639 nfscl_freedevinfo(dip);
1641 /* Now, all the OpenOwners, etc. */
1642 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1643 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1644 nfscl_freeopen(op, 0);
1646 nfscl_freeopenowner(owp, 0);
1651 * Called when an NFSERR_EXPIRED is received from the server.
1654 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1655 struct ucred *cred, NFSPROC_T *p)
1657 struct nfsclowner *owp, *nowp, *towp;
1658 struct nfsclopen *op, *nop, *top;
1659 struct nfscldeleg *dp, *ndp;
1660 int ret, printed = 0;
1663 * First, merge locally issued Opens into the list for the server.
1665 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1666 while (dp != NULL) {
1667 ndp = TAILQ_NEXT(dp, nfsdl_list);
1668 owp = LIST_FIRST(&dp->nfsdl_owner);
1669 while (owp != NULL) {
1670 nowp = LIST_NEXT(owp, nfsow_list);
1671 op = LIST_FIRST(&owp->nfsow_open);
1673 if (LIST_NEXT(op, nfso_list) != NULL)
1675 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1676 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1677 NFSV4CL_LOCKNAMELEN))
1681 /* Merge opens in */
1682 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1683 if (top->nfso_fhlen == op->nfso_fhlen &&
1684 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1686 top->nfso_mode |= op->nfso_mode;
1687 top->nfso_opencnt += op->nfso_opencnt;
1692 /* Just add the open to the owner list */
1693 LIST_REMOVE(op, nfso_list);
1694 op->nfso_own = towp;
1695 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1696 nfsstatsv1.cllocalopens--;
1697 nfsstatsv1.clopens++;
1700 /* Just add the openowner to the client list */
1701 LIST_REMOVE(owp, nfsow_list);
1702 owp->nfsow_clp = clp;
1703 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1704 nfsstatsv1.cllocalopenowners--;
1705 nfsstatsv1.clopenowners++;
1706 nfsstatsv1.cllocalopens--;
1707 nfsstatsv1.clopens++;
1712 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1714 printf("nfsv4 expired locks lost\n");
1716 nfscl_cleandeleg(dp);
1717 nfscl_freedeleg(&clp->nfsc_deleg, dp);
1720 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1724 * Now, try and reopen against the server.
1726 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1727 owp->nfsow_seqid = 0;
1728 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1729 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1730 if (ret && !printed) {
1732 printf("nfsv4 expired locks lost\n");
1735 if (LIST_EMPTY(&owp->nfsow_open))
1736 nfscl_freeopenowner(owp, 0);
1741 * This function must be called after the process represented by "own" has
1742 * exited. Must be called with CLSTATE lock held.
1745 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1747 struct nfsclowner *owp, *nowp;
1748 struct nfscllockowner *lp, *nlp;
1749 struct nfscldeleg *dp;
1751 /* First, get rid of local locks on delegations. */
1752 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1753 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1754 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1755 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1757 nfscl_freelockowner(lp, 1);
1761 owp = LIST_FIRST(&clp->nfsc_owner);
1762 while (owp != NULL) {
1763 nowp = LIST_NEXT(owp, nfsow_list);
1764 if (!NFSBCMP(owp->nfsow_owner, own,
1765 NFSV4CL_LOCKNAMELEN)) {
1767 * If there are children that haven't closed the
1768 * file descriptors yet, the opens will still be
1769 * here. For that case, let the renew thread clear
1770 * out the OpenOwner later.
1772 if (LIST_EMPTY(&owp->nfsow_open))
1773 nfscl_freeopenowner(owp, 0);
1775 owp->nfsow_defunct = 1;
1782 * Find open/lock owners for processes that have exited.
1785 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1787 struct nfsclowner *owp, *nowp;
1788 struct nfsclopen *op;
1789 struct nfscllockowner *lp, *nlp;
1790 struct nfscldeleg *dp;
1794 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1795 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1796 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1797 if (LIST_EMPTY(&lp->nfsl_lock))
1798 nfscl_emptylockowner(lp, lhp);
1801 if (nfscl_procdoesntexist(owp->nfsow_owner))
1802 nfscl_cleanup_common(clp, owp->nfsow_owner);
1806 * For the single open_owner case, these lock owners need to be
1807 * checked to see if they still exist separately.
1808 * This is because nfscl_procdoesntexist() never returns true for
1809 * the single open_owner so that the above doesn't ever call
1810 * nfscl_cleanup_common().
1812 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1813 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1814 if (nfscl_procdoesntexist(lp->nfsl_owner))
1815 nfscl_cleanup_common(clp, lp->nfsl_owner);
1819 NFSPROCLISTUNLOCK();
1823 * Take the empty lock owner and move it to the local lhp list if the
1824 * associated process no longer exists.
1827 nfscl_emptylockowner(struct nfscllockowner *lp,
1828 struct nfscllockownerfhhead *lhp)
1830 struct nfscllockownerfh *lfhp, *mylfhp;
1831 struct nfscllockowner *nlp;
1834 /* If not a Posix lock owner, just return. */
1835 if ((lp->nfsl_lockflags & F_POSIX) == 0)
1841 * First, search to see if this lock owner is already in the list.
1842 * If it is, then the associated process no longer exists.
1844 SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
1845 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
1846 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
1849 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
1850 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
1851 NFSV4CL_LOCKNAMELEN))
1854 /* If not found, check if process still exists. */
1855 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
1858 /* Move the lock owner over to the local list. */
1859 if (mylfhp == NULL) {
1860 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
1864 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
1865 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
1866 mylfhp->nfslfh_len);
1867 LIST_INIT(&mylfhp->nfslfh_lock);
1868 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
1870 LIST_REMOVE(lp, nfsl_list);
1871 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
1874 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
1876 * Called from nfs umount to free up the clientid.
1879 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1881 struct nfsclclient *clp;
1886 * For the case that matters, this is the thread that set
1887 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
1888 * done to ensure that any thread executing nfscl_getcl() after
1889 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
1890 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
1891 * explanation, courtesy of Alan Cox.
1892 * What follows is a snippet from Alan Cox's email at:
1893 * http://docs.FreeBSD.org/cgi/
1894 * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
1896 * 1. Set MNTK_UNMOUNTF
1897 * 2. Acquire a standard FreeBSD mutex "m".
1898 * 3. Update some data structures.
1899 * 4. Release mutex "m".
1901 * Then, other threads that acquire "m" after step 4 has occurred will
1902 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
1903 * step 2 may or may not see MNTK_UNMOUNTF as set.
1906 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
1914 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1915 panic("nfscl umount");
1918 * First, handshake with the nfscl renew thread, to terminate
1921 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1922 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1923 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
1927 * Now, get the exclusive lock on the client state, so
1928 * that no uses of the state are still in progress.
1931 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1932 NFSCLSTATEMUTEXPTR, NULL);
1933 } while (!igotlock);
1937 * Free up all the state. It will expire on the server, but
1938 * maybe we should do a SetClientId/SetClientIdConfirm so
1939 * the server throws it away?
1941 LIST_REMOVE(clp, nfsc_list);
1942 nfscl_delegreturnall(clp, p);
1943 cred = newnfs_getcred();
1944 if (NFSHASNFSV4N(nmp)) {
1945 (void)nfsrpc_destroysession(nmp, clp, cred, p);
1946 (void)nfsrpc_destroyclient(nmp, clp, cred, p);
1948 (void)nfsrpc_setclient(nmp, clp, 0, cred, p);
1949 nfscl_cleanclient(clp);
1952 free(clp, M_NFSCLCLIENT);
1958 * This function is called when a server replies with NFSERR_STALECLIENTID
1959 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
1960 * doing Opens and Locks with reclaim. If these fail, it deletes the
1961 * corresponding state.
1964 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
1966 struct nfsclowner *owp, *nowp;
1967 struct nfsclopen *op, *nop;
1968 struct nfscllockowner *lp, *nlp;
1969 struct nfscllock *lop, *nlop;
1970 struct nfscldeleg *dp, *ndp, *tdp;
1971 struct nfsmount *nmp;
1972 struct ucred *tcred;
1973 struct nfsclopenhead extra_open;
1974 struct nfscldeleghead extra_deleg;
1977 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1978 int i, igotlock = 0, error, trycnt, firstlock;
1979 struct nfscllayout *lyp, *nlyp;
1982 * First, lock the client structure, so everyone else will
1983 * block when trying to use state.
1986 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
1988 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1989 NFSCLSTATEMUTEXPTR, NULL);
1990 } while (!igotlock);
1993 nmp = clp->nfsc_nmp;
1995 panic("nfscl recover");
1998 * For now, just get rid of all layouts. There may be a need
1999 * to do LayoutCommit Ops with reclaim == true later.
2001 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
2002 nfscl_freelayout(lyp);
2003 TAILQ_INIT(&clp->nfsc_layout);
2004 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
2005 LIST_INIT(&clp->nfsc_layouthash[i]);
2009 error = nfsrpc_setclient(nmp, clp, 1, cred, p);
2010 } while ((error == NFSERR_STALECLIENTID ||
2011 error == NFSERR_BADSESSION ||
2012 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2015 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2016 NFSCLFLAGS_RECVRINPROG);
2017 wakeup(&clp->nfsc_flags);
2018 nfsv4_unlock(&clp->nfsc_lock, 0);
2022 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2023 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2026 * Mark requests already queued on the server, so that they don't
2027 * initiate another recovery cycle. Any requests already in the
2028 * queue that handle state information will have the old stale
2029 * clientid/stateid and will get a NFSERR_STALESTATEID,
2030 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2031 * This will be translated to NFSERR_STALEDONTRECOVER when
2032 * R_DONTRECOVER is set.
2035 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2036 if (rep->r_nmp == nmp)
2037 rep->r_flags |= R_DONTRECOVER;
2042 * Now, mark all delegations "need reclaim".
2044 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2045 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2047 TAILQ_INIT(&extra_deleg);
2048 LIST_INIT(&extra_open);
2050 * Now traverse the state lists, doing Open and Lock Reclaims.
2052 tcred = newnfs_getcred();
2053 owp = LIST_FIRST(&clp->nfsc_owner);
2054 while (owp != NULL) {
2055 nowp = LIST_NEXT(owp, nfsow_list);
2056 owp->nfsow_seqid = 0;
2057 op = LIST_FIRST(&owp->nfsow_open);
2058 while (op != NULL) {
2059 nop = LIST_NEXT(op, nfso_list);
2060 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2061 /* Search for a delegation to reclaim with the open */
2062 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2063 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2065 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2066 mode = NFSV4OPEN_ACCESSWRITE;
2067 delegtype = NFSV4OPEN_DELEGATEWRITE;
2069 mode = NFSV4OPEN_ACCESSREAD;
2070 delegtype = NFSV4OPEN_DELEGATEREAD;
2072 if ((op->nfso_mode & mode) == mode &&
2073 op->nfso_fhlen == dp->nfsdl_fhlen &&
2074 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2079 delegtype = NFSV4OPEN_DELEGATENONE;
2080 newnfs_copycred(&op->nfso_cred, tcred);
2081 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2082 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2083 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2086 /* Handle any replied delegation */
2087 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2088 || NFSMNT_RDONLY(nmp->nm_mountp))) {
2089 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2090 mode = NFSV4OPEN_ACCESSWRITE;
2092 mode = NFSV4OPEN_ACCESSREAD;
2093 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2094 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2096 if ((op->nfso_mode & mode) == mode &&
2097 op->nfso_fhlen == dp->nfsdl_fhlen &&
2098 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2100 dp->nfsdl_stateid = ndp->nfsdl_stateid;
2101 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2102 dp->nfsdl_ace = ndp->nfsdl_ace;
2103 dp->nfsdl_change = ndp->nfsdl_change;
2104 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2105 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2106 dp->nfsdl_flags |= NFSCLDL_RECALL;
2107 free(ndp, M_NFSCLDELEG);
2114 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2116 /* and reclaim all byte range locks */
2117 lp = LIST_FIRST(&op->nfso_lock);
2118 while (lp != NULL) {
2119 nlp = LIST_NEXT(lp, nfsl_list);
2122 lop = LIST_FIRST(&lp->nfsl_lock);
2123 while (lop != NULL) {
2124 nlop = LIST_NEXT(lop, nfslo_list);
2125 if (lop->nfslo_end == NFS64BITSSET)
2128 len = lop->nfslo_end - lop->nfslo_first;
2129 error = nfscl_trylock(nmp, NULL,
2130 op->nfso_fh, op->nfso_fhlen, lp,
2131 firstlock, 1, lop->nfslo_first, len,
2132 lop->nfslo_type, tcred, p);
2134 nfscl_freelock(lop, 0);
2139 /* If no locks, but a lockowner, just delete it. */
2140 if (LIST_EMPTY(&lp->nfsl_lock))
2141 nfscl_freelockowner(lp, 0);
2146 if (error != 0 && error != NFSERR_BADSESSION)
2147 nfscl_freeopen(op, 0);
2154 * Now, try and get any delegations not yet reclaimed by cobbling
2155 * to-gether an appropriate open.
2158 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2159 while (dp != NULL) {
2160 ndp = TAILQ_NEXT(dp, nfsdl_list);
2161 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2164 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2166 * Name must be as long an largest possible
2167 * NFSV4CL_LOCKNAMELEN. 12 for now.
2169 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2170 NFSV4CL_LOCKNAMELEN);
2171 LIST_INIT(&nowp->nfsow_open);
2172 nowp->nfsow_clp = clp;
2173 nowp->nfsow_seqid = 0;
2174 nowp->nfsow_defunct = 0;
2175 nfscl_lockinit(&nowp->nfsow_rwlock);
2178 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2179 nop = malloc(sizeof (struct nfsclopen) +
2180 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2181 nop->nfso_own = nowp;
2182 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2183 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2184 delegtype = NFSV4OPEN_DELEGATEWRITE;
2186 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2187 delegtype = NFSV4OPEN_DELEGATEREAD;
2189 nop->nfso_opencnt = 0;
2190 nop->nfso_posixlock = 1;
2191 nop->nfso_fhlen = dp->nfsdl_fhlen;
2192 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2193 LIST_INIT(&nop->nfso_lock);
2194 nop->nfso_stateid.seqid = 0;
2195 nop->nfso_stateid.other[0] = 0;
2196 nop->nfso_stateid.other[1] = 0;
2197 nop->nfso_stateid.other[2] = 0;
2198 newnfs_copycred(&dp->nfsdl_cred, tcred);
2199 newnfs_copyincred(tcred, &nop->nfso_cred);
2201 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2202 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2203 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2204 delegtype, tcred, p);
2206 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2207 mode = NFSV4OPEN_ACCESSWRITE;
2209 mode = NFSV4OPEN_ACCESSREAD;
2210 if ((nop->nfso_mode & mode) == mode &&
2211 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2212 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2214 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2215 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2216 dp->nfsdl_ace = tdp->nfsdl_ace;
2217 dp->nfsdl_change = tdp->nfsdl_change;
2218 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2219 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2220 dp->nfsdl_flags |= NFSCLDL_RECALL;
2221 free(tdp, M_NFSCLDELEG);
2223 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2229 free(nop, M_NFSCLOPEN);
2231 * Couldn't reclaim it, so throw the state
2234 nfscl_cleandeleg(dp);
2235 nfscl_freedeleg(&clp->nfsc_deleg, dp);
2237 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2244 * Now, get rid of extra Opens and Delegations.
2246 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2248 newnfs_copycred(&op->nfso_cred, tcred);
2249 error = nfscl_tryclose(op, tcred, nmp, p);
2250 if (error == NFSERR_GRACE)
2251 (void) nfs_catnap(PZERO, error, "nfsexcls");
2252 } while (error == NFSERR_GRACE);
2253 LIST_REMOVE(op, nfso_list);
2254 free(op, M_NFSCLOPEN);
2257 free(nowp, M_NFSCLOWNER);
2259 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2261 newnfs_copycred(&dp->nfsdl_cred, tcred);
2262 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2263 if (error == NFSERR_GRACE)
2264 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2265 } while (error == NFSERR_GRACE);
2266 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2267 free(dp, M_NFSCLDELEG);
2270 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2271 if (NFSHASNFSV4N(nmp))
2272 (void)nfsrpc_reclaimcomplete(nmp, cred, p);
2275 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2276 wakeup(&clp->nfsc_flags);
2277 nfsv4_unlock(&clp->nfsc_lock, 0);
2283 * This function is called when a server replies with NFSERR_EXPIRED.
2284 * It deletes all state for the client and does a fresh SetClientId/confirm.
2285 * XXX Someday it should post a signal to the process(es) that hold the
2286 * state, so they know that lock state has been lost.
2289 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2291 struct nfsmount *nmp;
2293 int igotlock = 0, error, trycnt;
2296 * If the clientid has gone away or a new SetClientid has already
2297 * been done, just return ok.
2299 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2303 * First, lock the client structure, so everyone else will
2304 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2305 * that only one thread does the work.
2308 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2310 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2311 NFSCLSTATEMUTEXPTR, NULL);
2312 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2313 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2315 nfsv4_unlock(&clp->nfsc_lock, 0);
2319 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2322 nmp = clp->nfsc_nmp;
2324 panic("nfscl expired");
2325 cred = newnfs_getcred();
2328 error = nfsrpc_setclient(nmp, clp, 0, cred, p);
2329 } while ((error == NFSERR_STALECLIENTID ||
2330 error == NFSERR_BADSESSION ||
2331 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2334 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2337 * Expire the state for the client.
2339 nfscl_expireclient(clp, nmp, cred, p);
2341 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2342 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2344 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2345 wakeup(&clp->nfsc_flags);
2346 nfsv4_unlock(&clp->nfsc_lock, 0);
2353 * This function inserts a lock in the list after insert_lop.
2356 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2357 struct nfscllock *insert_lop, int local)
2360 if ((struct nfscllockowner *)insert_lop == lp)
2361 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2363 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2365 nfsstatsv1.cllocallocks++;
2367 nfsstatsv1.cllocks++;
2371 * This function updates the locking for a lock owner and given file. It
2372 * maintains a list of lock ranges ordered on increasing file offset that
2373 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2374 * It always adds new_lop to the list and sometimes uses the one pointed
2376 * Returns 1 if the locks were modified, 0 otherwise.
2379 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2380 struct nfscllock **other_lopp, int local)
2382 struct nfscllock *new_lop = *new_lopp;
2383 struct nfscllock *lop, *tlop, *ilop;
2384 struct nfscllock *other_lop;
2385 int unlock = 0, modified = 0;
2389 * Work down the list until the lock is merged.
2391 if (new_lop->nfslo_type == F_UNLCK)
2393 ilop = (struct nfscllock *)lp;
2394 lop = LIST_FIRST(&lp->nfsl_lock);
2395 while (lop != NULL) {
2397 * Only check locks for this file that aren't before the start of
2400 if (lop->nfslo_end >= new_lop->nfslo_first) {
2401 if (new_lop->nfslo_end < lop->nfslo_first) {
2403 * If the new lock ends before the start of the
2404 * current lock's range, no merge, just insert
2409 if (new_lop->nfslo_type == lop->nfslo_type ||
2410 (new_lop->nfslo_first <= lop->nfslo_first &&
2411 new_lop->nfslo_end >= lop->nfslo_end)) {
2413 * This lock can be absorbed by the new lock/unlock.
2414 * This happens when it covers the entire range
2415 * of the old lock or is contiguous
2416 * with the old lock and is of the same type or an
2419 if (new_lop->nfslo_type != lop->nfslo_type ||
2420 new_lop->nfslo_first != lop->nfslo_first ||
2421 new_lop->nfslo_end != lop->nfslo_end)
2423 if (lop->nfslo_first < new_lop->nfslo_first)
2424 new_lop->nfslo_first = lop->nfslo_first;
2425 if (lop->nfslo_end > new_lop->nfslo_end)
2426 new_lop->nfslo_end = lop->nfslo_end;
2428 lop = LIST_NEXT(lop, nfslo_list);
2429 nfscl_freelock(tlop, local);
2434 * All these cases are for contiguous locks that are not the
2435 * same type, so they can't be merged.
2437 if (new_lop->nfslo_first <= lop->nfslo_first) {
2439 * This case is where the new lock overlaps with the
2440 * first part of the old lock. Move the start of the
2441 * old lock to just past the end of the new lock. The
2442 * new lock will be inserted in front of the old, since
2443 * ilop hasn't been updated. (We are done now.)
2445 if (lop->nfslo_first != new_lop->nfslo_end) {
2446 lop->nfslo_first = new_lop->nfslo_end;
2451 if (new_lop->nfslo_end >= lop->nfslo_end) {
2453 * This case is where the new lock overlaps with the
2454 * end of the old lock's range. Move the old lock's
2455 * end to just before the new lock's first and insert
2456 * the new lock after the old lock.
2457 * Might not be done yet, since the new lock could
2458 * overlap further locks with higher ranges.
2460 if (lop->nfslo_end != new_lop->nfslo_first) {
2461 lop->nfslo_end = new_lop->nfslo_first;
2465 lop = LIST_NEXT(lop, nfslo_list);
2469 * The final case is where the new lock's range is in the
2470 * middle of the current lock's and splits the current lock
2471 * up. Use *other_lopp to handle the second part of the
2472 * split old lock range. (We are done now.)
2473 * For unlock, we use new_lop as other_lop and tmp, since
2474 * other_lop and new_lop are the same for this case.
2475 * We noted the unlock case above, so we don't need
2476 * new_lop->nfslo_type any longer.
2478 tmp = new_lop->nfslo_first;
2480 other_lop = new_lop;
2483 other_lop = *other_lopp;
2486 other_lop->nfslo_first = new_lop->nfslo_end;
2487 other_lop->nfslo_end = lop->nfslo_end;
2488 other_lop->nfslo_type = lop->nfslo_type;
2489 lop->nfslo_end = tmp;
2490 nfscl_insertlock(lp, other_lop, lop, local);
2496 lop = LIST_NEXT(lop, nfslo_list);
2502 * Insert the new lock in the list at the appropriate place.
2505 nfscl_insertlock(lp, new_lop, ilop, local);
2513 * This function must be run as a kernel thread.
2514 * It does Renew Ops and recovery, when required.
2517 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2519 struct nfsclowner *owp, *nowp;
2520 struct nfsclopen *op;
2521 struct nfscllockowner *lp, *nlp;
2522 struct nfscldeleghead dh;
2523 struct nfscldeleg *dp, *ndp;
2526 int error, cbpathdown, islept, igotlock, ret, clearok;
2527 uint32_t recover_done_time = 0;
2529 static time_t prevsec = 0;
2530 struct nfscllockownerfh *lfhp, *nlfhp;
2531 struct nfscllockownerfhhead lfh;
2532 struct nfscllayout *lyp, *nlyp;
2533 struct nfscldevinfo *dip, *ndip;
2534 struct nfscllayouthead rlh;
2535 struct nfsclrecalllayout *recallp;
2536 struct nfsclds *dsp;
2538 cred = newnfs_getcred();
2540 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2543 newnfs_setroot(cred);
2545 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2547 * Only allow one recover within 1/2 of the lease
2548 * duration (nfsc_renew).
2550 if (recover_done_time < NFSD_MONOSEC) {
2551 recover_done_time = NFSD_MONOSEC +
2553 NFSCL_DEBUG(1, "Doing recovery..\n");
2554 nfscl_recover(clp, cred, p);
2556 NFSCL_DEBUG(1, "Clear Recovery dt=%u ms=%jd\n",
2557 recover_done_time, (intmax_t)NFSD_MONOSEC);
2559 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2563 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2564 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2565 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2566 clidrev = clp->nfsc_clientidrev;
2567 error = nfsrpc_renew(clp, NULL, cred, p);
2568 if (error == NFSERR_CBPATHDOWN)
2570 else if (error == NFSERR_STALECLIENTID ||
2571 error == NFSERR_BADSESSION) {
2573 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2575 } else if (error == NFSERR_EXPIRED)
2576 (void) nfscl_hasexpired(clp, clidrev, p);
2580 if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2581 /* Do renews for any DS sessions. */
2582 NFSLOCKMNT(clp->nfsc_nmp);
2583 /* Skip first entry, since the MDS is handled above. */
2584 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2586 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2587 while (dsp != NULL) {
2588 if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2589 dsp->nfsclds_sess.nfsess_defunct == 0) {
2590 dsp->nfsclds_expire = NFSD_MONOSEC +
2592 NFSUNLOCKMNT(clp->nfsc_nmp);
2593 (void)nfsrpc_renew(clp, dsp, cred, p);
2596 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2598 NFSUNLOCKMNT(clp->nfsc_nmp);
2604 /* It's a Total Recall! */
2605 nfscl_totalrecall(clp);
2608 * Now, handle defunct owners.
2610 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2611 if (LIST_EMPTY(&owp->nfsow_open)) {
2612 if (owp->nfsow_defunct != 0)
2613 nfscl_freeopenowner(owp, 0);
2618 * Do the recall on any delegations. To avoid trouble, always
2619 * come back up here after having slept.
2623 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2624 while (dp != NULL) {
2625 ndp = TAILQ_NEXT(dp, nfsdl_list);
2626 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2628 * Wait for outstanding I/O ops to be done.
2630 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2632 nfsv4_unlock(&clp->nfsc_lock, 0);
2635 dp->nfsdl_rwlock.nfslock_lock |=
2637 (void) nfsmsleep(&dp->nfsdl_rwlock,
2638 NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2643 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2644 &islept, NFSCLSTATEMUTEXPTR, NULL);
2649 newnfs_copycred(&dp->nfsdl_cred, cred);
2650 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2653 nfscl_cleandeleg(dp);
2654 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2656 LIST_REMOVE(dp, nfsdl_hash);
2657 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2659 nfsstatsv1.cldelegates--;
2667 * Clear out old delegations, if we are above the high water
2668 * mark. Only clear out ones with no state related to them.
2669 * The tailq list is in LRU order.
2671 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2672 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2673 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2674 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2675 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2676 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2677 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2678 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2680 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2681 op = LIST_FIRST(&owp->nfsow_open);
2688 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2689 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2696 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2697 LIST_REMOVE(dp, nfsdl_hash);
2698 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2700 nfsstatsv1.cldelegates--;
2706 nfsv4_unlock(&clp->nfsc_lock, 0);
2709 * Do the recall on any layouts. To avoid trouble, always
2710 * come back up here after having slept.
2714 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2715 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2717 * Wait for outstanding I/O ops to be done.
2719 if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2720 (lyp->nfsly_lock.nfslock_lock &
2721 NFSV4LOCK_LOCK) != 0) {
2722 lyp->nfsly_lock.nfslock_lock |=
2724 nfsmsleep(&lyp->nfsly_lock.nfslock_lock,
2725 NFSCLSTATEMUTEXPTR, PZERO, "nfslyp",
2729 /* Move the layout to the recall list. */
2730 TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2732 LIST_REMOVE(lyp, nfsly_hash);
2733 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2735 /* Handle any layout commits. */
2736 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2737 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2738 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2740 NFSCL_DEBUG(3, "do layoutcommit\n");
2741 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2749 /* Now, look for stale layouts. */
2750 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2751 while (lyp != NULL) {
2752 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2753 if (lyp->nfsly_timestamp < NFSD_MONOSEC &&
2754 (lyp->nfsly_flags & NFSLY_RECALL) == 0 &&
2755 lyp->nfsly_lock.nfslock_usecnt == 0 &&
2756 lyp->nfsly_lock.nfslock_lock == 0) {
2757 NFSCL_DEBUG(4, "ret stale lay=%d\n",
2759 recallp = malloc(sizeof(*recallp),
2760 M_NFSLAYRECALL, M_NOWAIT);
2761 if (recallp == NULL)
2763 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2764 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2765 lyp->nfsly_stateid.seqid, 0, 0, NULL,
2772 * Free up any unreferenced device info structures.
2774 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
2775 if (dip->nfsdi_layoutrefs == 0 &&
2776 dip->nfsdi_refcnt == 0) {
2777 NFSCL_DEBUG(4, "freeing devinfo\n");
2778 LIST_REMOVE(dip, nfsdi_list);
2779 nfscl_freedevinfo(dip);
2784 /* Do layout return(s), as required. */
2785 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
2786 TAILQ_REMOVE(&rlh, lyp, nfsly_list);
2787 NFSCL_DEBUG(4, "ret layout\n");
2788 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
2789 nfscl_freelayout(lyp);
2793 * Delegreturn any delegations cleaned out or recalled.
2795 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2796 newnfs_copycred(&dp->nfsdl_cred, cred);
2797 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2798 TAILQ_REMOVE(&dh, dp, nfsdl_list);
2799 free(dp, M_NFSCLDELEG);
2804 * Call nfscl_cleanupkext() once per second to check for
2805 * open/lock owners where the process has exited.
2807 mytime = NFSD_MONOSEC;
2808 if (prevsec != mytime) {
2810 nfscl_cleanupkext(clp, &lfh);
2814 * Do a ReleaseLockOwner for all lock owners where the
2815 * associated process no longer exists, as found by
2816 * nfscl_cleanupkext().
2818 newnfs_setroot(cred);
2819 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
2820 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
2822 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
2823 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
2825 nfscl_freelockowner(lp, 0);
2832 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2833 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
2835 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2836 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2839 wakeup((caddr_t)clp);
2847 * Initiate state recovery. Called when NFSERR_STALECLIENTID,
2848 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
2851 nfscl_initiate_recovery(struct nfsclclient *clp)
2857 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2859 wakeup((caddr_t)clp);
2863 * Dump out the state stuff for debugging.
2866 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2867 int lockowner, int locks)
2869 struct nfsclclient *clp;
2870 struct nfsclowner *owp;
2871 struct nfsclopen *op;
2872 struct nfscllockowner *lp;
2873 struct nfscllock *lop;
2874 struct nfscldeleg *dp;
2878 printf("nfscl dumpstate NULL clp\n");
2882 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2883 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2884 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2885 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2886 owp->nfsow_owner[0], owp->nfsow_owner[1],
2887 owp->nfsow_owner[2], owp->nfsow_owner[3],
2889 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2891 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2892 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2893 op->nfso_stateid.other[2], op->nfso_opencnt,
2895 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2897 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2898 lp->nfsl_owner[0], lp->nfsl_owner[1],
2899 lp->nfsl_owner[2], lp->nfsl_owner[3],
2901 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2902 lp->nfsl_stateid.other[2]);
2903 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2906 printf("lck typ=%d fst=%ju end=%ju\n",
2907 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2908 (intmax_t)lop->nfslo_end);
2910 printf("lck typ=%d fst=%qd end=%qd\n",
2911 lop->nfslo_type, lop->nfslo_first,
2919 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2920 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2921 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2922 owp->nfsow_owner[0], owp->nfsow_owner[1],
2923 owp->nfsow_owner[2], owp->nfsow_owner[3],
2925 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2927 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2928 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2929 op->nfso_stateid.other[2], op->nfso_opencnt,
2931 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2933 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2934 lp->nfsl_owner[0], lp->nfsl_owner[1],
2935 lp->nfsl_owner[2], lp->nfsl_owner[3],
2937 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2938 lp->nfsl_stateid.other[2]);
2939 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2942 printf("lck typ=%d fst=%ju end=%ju\n",
2943 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2944 (intmax_t)lop->nfslo_end);
2946 printf("lck typ=%d fst=%qd end=%qd\n",
2947 lop->nfslo_type, lop->nfslo_first,
2958 * Check for duplicate open owners and opens.
2959 * (Only used as a diagnostic aid.)
2962 nfscl_dupopen(vnode_t vp, int dupopens)
2964 struct nfsclclient *clp;
2965 struct nfsclowner *owp, *owp2;
2966 struct nfsclopen *op, *op2;
2969 clp = VFSTONFS(vnode_mount(vp))->nm_clp;
2971 printf("nfscl dupopen NULL clp\n");
2974 nfhp = VTONFS(vp)->n_fhp;
2978 * First, search for duplicate owners.
2979 * These should never happen!
2981 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2982 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2984 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
2985 NFSV4CL_LOCKNAMELEN)) {
2987 printf("DUP OWNER\n");
2988 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
2995 * Now, search for duplicate stateids.
2996 * These shouldn't happen, either.
2998 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2999 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3000 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3001 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3003 (op->nfso_stateid.other[0] != 0 ||
3004 op->nfso_stateid.other[1] != 0 ||
3005 op->nfso_stateid.other[2] != 0) &&
3006 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
3007 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
3008 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
3010 printf("DUP STATEID\n");
3011 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
3021 * Now search for duplicate opens.
3022 * Duplicate opens for the same owner
3023 * should never occur. Other duplicates are
3024 * possible and are checked for if "dupopens"
3027 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3028 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3029 if (nfhp->nfh_len == op2->nfso_fhlen &&
3030 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3031 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3032 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3033 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3034 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3035 (!NFSBCMP(op->nfso_own->nfsow_owner,
3036 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3038 if (!NFSBCMP(op->nfso_own->nfsow_owner,
3039 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3041 printf("BADDUP OPEN\n");
3044 printf("DUP OPEN\n");
3046 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
3059 * During close, find an open that needs to be dereferenced and
3060 * dereference it. If there are no more opens for this file,
3061 * log a message to that effect.
3062 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
3063 * on the file's vnode.
3064 * This is the safe way, since it is difficult to identify
3065 * which open the close is for and I/O can be performed after the
3066 * close(2) system call when a file is mmap'd.
3067 * If it returns 0 for success, there will be a referenced
3068 * clp returned via clpp.
3071 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3073 struct nfsclclient *clp;
3074 struct nfsclowner *owp;
3075 struct nfsclopen *op;
3076 struct nfscldeleg *dp;
3080 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
3085 nfhp = VTONFS(vp)->n_fhp;
3089 * First, look for one under a delegation that was locally issued
3090 * and just decrement the opencnt for it. Since all my Opens against
3091 * the server are DENY_NONE, I don't see a problem with hanging
3092 * onto them. (It is much easier to use one of the extant Opens
3093 * that I already have on the server when a Delegation is recalled
3094 * than to do fresh Opens.) Someday, I might need to rethink this, but.
3096 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3098 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3099 op = LIST_FIRST(&owp->nfsow_open);
3102 * Since a delegation is for a file, there
3103 * should never be more than one open for
3106 if (LIST_NEXT(op, nfso_list) != NULL)
3107 panic("nfscdeleg opens");
3108 if (notdecr && op->nfso_opencnt > 0) {
3117 /* Now process the opens against the server. */
3118 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3119 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3120 if (op->nfso_fhlen == nfhp->nfh_len &&
3121 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3123 /* Found an open, decrement cnt if possible */
3124 if (notdecr && op->nfso_opencnt > 0) {
3129 * There are more opens, so just return.
3131 if (op->nfso_opencnt > 0) {
3140 printf("nfscl: never fnd open\n");
3145 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3147 struct nfsclclient *clp;
3148 struct nfsclowner *owp, *nowp;
3149 struct nfsclopen *op;
3150 struct nfscldeleg *dp;
3152 struct nfsclrecalllayout *recallp;
3155 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
3160 nfhp = VTONFS(vp)->n_fhp;
3161 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3164 * First get rid of the local Open structures, which should be no
3167 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3169 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3170 op = LIST_FIRST(&owp->nfsow_open);
3172 KASSERT((op->nfso_opencnt == 0),
3173 ("nfscl: bad open cnt on deleg"));
3174 nfscl_freeopen(op, 1);
3176 nfscl_freeopenowner(owp, 1);
3180 /* Return any layouts marked return on close. */
3181 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp);
3183 /* Now process the opens against the server. */
3185 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3186 op = LIST_FIRST(&owp->nfsow_open);
3187 while (op != NULL) {
3188 if (op->nfso_fhlen == nfhp->nfh_len &&
3189 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3191 /* Found an open, close it. */
3192 KASSERT((op->nfso_opencnt == 0),
3193 ("nfscl: bad open cnt on server"));
3195 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
3200 op = LIST_NEXT(op, nfso_list);
3205 * recallp has been set NULL by nfscl_retoncloselayout() if it was
3206 * used by the function, but calling free() with a NULL pointer is ok.
3208 free(recallp, M_NFSLAYRECALL);
3213 * Return all delegations on this client.
3214 * (Must be called with client sleep lock.)
3217 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
3219 struct nfscldeleg *dp, *ndp;
3222 cred = newnfs_getcred();
3223 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3224 nfscl_cleandeleg(dp);
3225 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3226 nfscl_freedeleg(&clp->nfsc_deleg, dp);
3232 * Do a callback RPC.
3235 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3237 int clist, gotseq_ok, i, j, k, op, rcalls;
3239 struct nfsclclient *clp;
3240 struct nfscldeleg *dp = NULL;
3241 int numops, taglen = -1, error = 0, trunc;
3242 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3243 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3249 nfsattrbit_t attrbits, rattrbits;
3250 nfsv4stateid_t stateid;
3251 uint32_t seqid, slotid = 0, highslot, cachethis;
3252 uint8_t sessionid[NFSX_V4SESSIONID];
3254 struct nfscllayout *lyp;
3255 uint64_t filesid[2], len, off;
3256 int changed, gotone, laytype, recalltype;
3258 struct nfsclrecalllayout *recallp = NULL;
3259 struct nfsclsession *tsep;
3263 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3264 taglen = fxdr_unsigned(int, *tl);
3269 if (taglen <= NFSV4_SMALLSTR)
3272 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3273 error = nfsrv_mtostr(nd, tagstr, taglen);
3275 if (taglen > NFSV4_SMALLSTR)
3276 free(tagstr, M_TEMP);
3280 (void) nfsm_strtom(nd, tag, taglen);
3281 if (taglen > NFSV4_SMALLSTR) {
3282 free(tagstr, M_TEMP);
3284 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3285 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3286 minorvers = fxdr_unsigned(u_int32_t, *tl++);
3287 if (minorvers != NFSV4_MINORVERSION && minorvers != NFSV41_MINORVERSION)
3288 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3289 cbident = fxdr_unsigned(u_int32_t, *tl++);
3293 numops = fxdr_unsigned(int, *tl);
3295 * Loop around doing the sub ops.
3297 for (i = 0; i < numops; i++) {
3298 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3299 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3301 op = fxdr_unsigned(int, *tl);
3302 if (op < NFSV4OP_CBGETATTR ||
3303 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3304 (op > NFSV4OP_CBNOTIFYDEVID &&
3305 minorvers == NFSV41_MINORVERSION)) {
3306 nd->nd_repstat = NFSERR_OPILLEGAL;
3307 *repp = nfscl_errmap(nd, minorvers);
3311 nd->nd_procnum = op;
3312 if (op < NFSV41_CBNOPS)
3313 nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
3315 case NFSV4OP_CBGETATTR:
3316 NFSCL_DEBUG(4, "cbgetattr\n");
3319 error = nfsm_getfh(nd, &nfhp);
3321 error = nfsrv_getattrbits(nd, &attrbits,
3323 if (error == 0 && i == 0 &&
3324 minorvers != NFSV4_MINORVERSION)
3325 error = NFSERR_OPNOTINSESS;
3327 mp = nfscl_getmnt(minorvers, sessionid, cbident,
3330 error = NFSERR_SERVERFAULT;
3333 error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3334 nfhp->nfh_len, p, &np);
3339 NFSZERO_ATTRBIT(&rattrbits);
3341 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3344 if (NFSISSET_ATTRBIT(&attrbits,
3347 va.va_size = np->n_size;
3351 NFSSETBIT_ATTRBIT(&rattrbits,
3354 if (NFSISSET_ATTRBIT(&attrbits,
3355 NFSATTRBIT_CHANGE)) {
3359 (np->n_flag & NDELEGMOD))
3361 NFSSETBIT_ATTRBIT(&rattrbits,
3365 error = NFSERR_SERVERFAULT;
3373 free(nfhp, M_NFSFH);
3375 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3376 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3379 case NFSV4OP_CBRECALL:
3380 NFSCL_DEBUG(4, "cbrecall\n");
3381 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3383 stateid.seqid = *tl++;
3384 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3386 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3387 trunc = fxdr_unsigned(int, *tl);
3388 error = nfsm_getfh(nd, &nfhp);
3389 if (error == 0 && i == 0 &&
3390 minorvers != NFSV4_MINORVERSION)
3391 error = NFSERR_OPNOTINSESS;
3394 if (minorvers == NFSV4_MINORVERSION)
3395 clp = nfscl_getclnt(cbident);
3397 clp = nfscl_getclntsess(sessionid);
3399 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3401 if (dp != NULL && (dp->nfsdl_flags &
3402 NFSCLDL_DELEGRET) == 0) {
3405 wakeup((caddr_t)clp);
3408 error = NFSERR_SERVERFAULT;
3413 free(nfhp, M_NFSFH);
3415 case NFSV4OP_CBLAYOUTRECALL:
3416 NFSCL_DEBUG(4, "cblayrec\n");
3418 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3419 laytype = fxdr_unsigned(int, *tl++);
3420 iomode = fxdr_unsigned(uint32_t, *tl++);
3421 if (newnfs_true == *tl++)
3425 recalltype = fxdr_unsigned(int, *tl);
3426 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n",
3427 laytype, iomode, changed, recalltype);
3428 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3430 if (laytype != NFSLAYOUT_NFSV4_1_FILES &&
3431 laytype != NFSLAYOUT_FLEXFILE)
3432 error = NFSERR_NOMATCHLAYOUT;
3433 else if (recalltype == NFSLAYOUTRETURN_FILE) {
3434 error = nfsm_getfh(nd, &nfhp);
3435 NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3438 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3440 off = fxdr_hyper(tl); tl += 2;
3441 len = fxdr_hyper(tl); tl += 2;
3442 stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3443 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3444 if (minorvers == NFSV4_MINORVERSION)
3445 error = NFSERR_NOTSUPP;
3447 error = NFSERR_OPNOTINSESS;
3448 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n",
3449 (uintmax_t)off, (uintmax_t)len,
3450 stateid.seqid, error);
3453 clp = nfscl_getclntsess(sessionid);
3454 NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3456 lyp = nfscl_findlayout(clp,
3459 NFSCL_DEBUG(4, "cblyp=%p\n",
3464 NFSLY_FLEXFILE)) != 0 &&
3465 !NFSBCMP(stateid.other,
3466 lyp->nfsly_stateid.other,
3467 NFSX_STATEIDOTHER)) {
3482 NFSERR_NOMATCHLAYOUT;
3484 error = NFSERR_NOMATCHLAYOUT;
3487 free(nfhp, M_NFSFH);
3488 } else if (recalltype == NFSLAYOUTRETURN_FSID) {
3489 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3490 filesid[0] = fxdr_hyper(tl); tl += 2;
3491 filesid[1] = fxdr_hyper(tl); tl += 2;
3494 clp = nfscl_getclntsess(sessionid);
3496 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3498 if (lyp->nfsly_filesid[0] ==
3500 lyp->nfsly_filesid[1] ==
3507 lyp->nfsly_stateid.seqid,
3517 error = NFSERR_NOMATCHLAYOUT;
3519 error = NFSERR_NOMATCHLAYOUT;
3521 } else if (recalltype == NFSLAYOUTRETURN_ALL) {
3524 clp = nfscl_getclntsess(sessionid);
3526 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3528 error = nfscl_layoutrecall(
3529 recalltype, lyp, iomode, 0,
3531 lyp->nfsly_stateid.seqid,
3532 0, 0, NULL, recallp);
3539 error = NFSERR_NOMATCHLAYOUT;
3541 error = NFSERR_NOMATCHLAYOUT;
3544 error = NFSERR_NOMATCHLAYOUT;
3545 if (recallp != NULL) {
3546 free(recallp, M_NFSLAYRECALL);
3550 case NFSV4OP_CBSEQUENCE:
3551 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3553 bcopy(tl, sessionid, NFSX_V4SESSIONID);
3554 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3555 seqid = fxdr_unsigned(uint32_t, *tl++);
3556 slotid = fxdr_unsigned(uint32_t, *tl++);
3557 highslot = fxdr_unsigned(uint32_t, *tl++);
3559 /* Throw away the referring call stuff. */
3560 clist = fxdr_unsigned(int, *tl);
3561 for (j = 0; j < clist; j++) {
3562 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3564 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3565 rcalls = fxdr_unsigned(int, *tl);
3566 for (k = 0; k < rcalls; k++) {
3567 NFSM_DISSECT(tl, uint32_t *,
3573 clp = nfscl_getclntsess(sessionid);
3575 error = NFSERR_SERVERFAULT;
3577 error = NFSERR_SEQUENCEPOS;
3579 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3580 error = nfsv4_seqsession(seqid, slotid,
3581 highslot, tsep->nfsess_cbslots, &rep,
3582 tsep->nfsess_backslots);
3585 if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3589 * Handle a reply for a retried
3590 * callback. The reply will be
3591 * re-inserted in the session cache
3592 * by the nfsv4_seqsess_cacherep() call
3595 KASSERT(error == NFSERR_REPLYFROMCACHE,
3596 ("cbsequence: non-NULL rep"));
3597 NFSCL_DEBUG(4, "Got cbretry\n");
3598 m_freem(nd->nd_mreq);
3603 NFSM_BUILD(tl, uint32_t *,
3604 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3605 bcopy(sessionid, tl, NFSX_V4SESSIONID);
3606 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3607 *tl++ = txdr_unsigned(seqid);
3608 *tl++ = txdr_unsigned(slotid);
3609 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3610 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3614 if (i == 0 && minorvers == NFSV41_MINORVERSION)
3615 error = NFSERR_OPNOTINSESS;
3617 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
3618 error = NFSERR_NOTSUPP;
3623 if (error == EBADRPC || error == NFSERR_BADXDR) {
3624 nd->nd_repstat = NFSERR_BADXDR;
3626 nd->nd_repstat = error;
3631 if (nd->nd_repstat) {
3632 *repp = nfscl_errmap(nd, minorvers);
3635 *repp = 0; /* NFS4_OK */
3638 if (recallp != NULL)
3639 free(recallp, M_NFSLAYRECALL);
3641 if (error == EBADRPC || error == NFSERR_BADXDR)
3642 nd->nd_repstat = NFSERR_BADXDR;
3644 printf("nfsv4 comperr1=%d\n", error);
3647 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3651 *retopsp = txdr_unsigned(retops);
3653 *nd->nd_errp = nfscl_errmap(nd, minorvers);
3655 if (gotseq_ok != 0) {
3656 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
3658 clp = nfscl_getclntsess(sessionid);
3660 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3661 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
3672 * Generate the next cbident value. Basically just increment a static value
3673 * and then check that it isn't already in the list, if it has wrapped around.
3676 nfscl_nextcbident(void)
3678 struct nfsclclient *clp;
3680 static u_int32_t nextcbident = 0;
3681 static int haswrapped = 0;
3684 if (nextcbident == 0)
3688 * Search the clientid list for one already using this cbident.
3693 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3694 if (clp->nfsc_cbident == nextcbident) {
3704 return (nextcbident);
3708 * Get the mount point related to a given cbident or session and busy it.
3711 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
3712 struct nfsclclient **clpp)
3714 struct nfsclclient *clp;
3717 struct nfsclsession *tsep;
3721 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3722 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3723 if (minorvers == NFSV4_MINORVERSION) {
3724 if (clp->nfsc_cbident == cbident)
3726 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3734 mp = clp->nfsc_nmp->nm_mountp;
3737 error = vfs_busy(mp, 0);
3746 * Get the clientid pointer related to a given cbident.
3748 static struct nfsclclient *
3749 nfscl_getclnt(u_int32_t cbident)
3751 struct nfsclclient *clp;
3753 LIST_FOREACH(clp, &nfsclhead, nfsc_list)
3754 if (clp->nfsc_cbident == cbident)
3760 * Get the clientid pointer related to a given sessionid.
3762 static struct nfsclclient *
3763 nfscl_getclntsess(uint8_t *sessionid)
3765 struct nfsclclient *clp;
3766 struct nfsclsession *tsep;
3768 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3769 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3770 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3778 * Search for a lock conflict locally on the client. A conflict occurs if
3779 * - not same owner and overlapping byte range and at least one of them is
3780 * a write lock or this is an unlock.
3783 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
3784 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
3785 struct nfscllock **lopp)
3787 struct nfsclowner *owp;
3788 struct nfsclopen *op;
3792 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3796 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3797 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3798 if (op->nfso_fhlen == fhlen &&
3799 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
3800 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
3811 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3812 u_int8_t *own, struct nfscllock **lopp)
3814 struct nfscllockowner *lp;
3815 struct nfscllock *lop;
3817 LIST_FOREACH(lp, lhp, nfsl_list) {
3818 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3819 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3820 if (lop->nfslo_first >= nlop->nfslo_end)
3822 if (lop->nfslo_end <= nlop->nfslo_first)
3824 if (lop->nfslo_type == F_WRLCK ||
3825 nlop->nfslo_type == F_WRLCK ||
3826 nlop->nfslo_type == F_UNLCK) {
3829 return (NFSERR_DENIED);
3838 * Check for a local conflicting lock.
3841 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3842 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
3844 struct nfscllock *lop, nlck;
3845 struct nfscldeleg *dp;
3847 u_int8_t own[NFSV4CL_LOCKNAMELEN];
3850 nlck.nfslo_type = fl->l_type;
3851 nlck.nfslo_first = off;
3852 if (len == NFS64BITSSET) {
3853 nlck.nfslo_end = NFS64BITSSET;
3855 nlck.nfslo_end = off + len;
3856 if (nlck.nfslo_end <= nlck.nfslo_first)
3857 return (NFSERR_INVAL);
3860 nfscl_filllockowner(id, own, flags);
3862 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3863 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
3864 &nlck, own, dp, &lop);
3866 fl->l_whence = SEEK_SET;
3867 fl->l_start = lop->nfslo_first;
3868 if (lop->nfslo_end == NFS64BITSSET)
3871 fl->l_len = lop->nfslo_end - lop->nfslo_first;
3872 fl->l_pid = (pid_t)0;
3873 fl->l_type = lop->nfslo_type;
3874 error = -1; /* no RPC required */
3875 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3876 fl->l_type == F_RDLCK)) {
3878 * The delegation ensures that there isn't a conflicting
3879 * lock on the server, so return -1 to indicate an RPC
3882 fl->l_type = F_UNLCK;
3890 * Handle Recall of a delegation.
3891 * The clp must be exclusive locked when this is called.
3894 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3895 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
3896 int called_from_renewthread)
3898 struct nfsclowner *owp, *lowp, *nowp;
3899 struct nfsclopen *op, *lop;
3900 struct nfscllockowner *lp;
3901 struct nfscllock *lckp;
3903 int error = 0, ret, gotvp = 0;
3907 * First, get a vnode for the file. This is needed to do RPCs.
3909 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3910 dp->nfsdl_fhlen, p, &np);
3913 * File isn't open, so nothing to move over to the
3923 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3926 * Ok, if it's a write delegation, flush data to the server, so
3927 * that close/open consistency is retained.
3931 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3932 np->n_flag |= NDELEGRECALL;
3934 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
3936 np->n_flag &= ~NDELEGRECALL;
3938 NFSINVALATTRCACHE(np);
3940 if (ret == EIO && called_from_renewthread != 0) {
3942 * If the flush failed with EIO for the renew thread,
3943 * return now, so that the dirty buffer will be flushed
3952 * Now, for each openowner with opens issued locally, move them
3953 * over to state against the server.
3955 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3956 lop = LIST_FIRST(&lowp->nfsow_open);
3958 if (LIST_NEXT(lop, nfso_list) != NULL)
3959 panic("nfsdlg mult opens");
3961 * Look for the same openowner against the server.
3963 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3964 if (!NFSBCMP(lowp->nfsow_owner,
3965 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3966 newnfs_copycred(&dp->nfsdl_cred, cred);
3967 ret = nfscl_moveopen(vp, clp, nmp, lop,
3969 if (ret == NFSERR_STALECLIENTID ||
3970 ret == NFSERR_STALEDONTRECOVER ||
3971 ret == NFSERR_BADSESSION) {
3977 nfscl_freeopen(lop, 1);
3986 * If no openowner found, create one and get an open
3991 sizeof (struct nfsclowner), M_NFSCLOWNER,
3993 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
3994 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
3995 dp->nfsdl_fhlen, NULL, NULL);
3996 newnfs_copycred(&dp->nfsdl_cred, cred);
3997 ret = nfscl_moveopen(vp, clp, nmp, lop,
4000 nfscl_freeopenowner(owp, 0);
4001 if (ret == NFSERR_STALECLIENTID ||
4002 ret == NFSERR_STALEDONTRECOVER ||
4003 ret == NFSERR_BADSESSION) {
4009 nfscl_freeopen(lop, 1);
4019 * Now, get byte range locks for any locks done locally.
4021 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4022 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4023 newnfs_copycred(&dp->nfsdl_cred, cred);
4024 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4025 if (ret == NFSERR_STALESTATEID ||
4026 ret == NFSERR_STALEDONTRECOVER ||
4027 ret == NFSERR_STALECLIENTID ||
4028 ret == NFSERR_BADSESSION) {
4043 * Move a locally issued open over to an owner on the state list.
4044 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4045 * returns with it unlocked.
4048 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4049 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4050 struct ucred *cred, NFSPROC_T *p)
4052 struct nfsclopen *op, *nop;
4053 struct nfscldeleg *ndp;
4055 int error = 0, newone;
4058 * First, look for an appropriate open, If found, just increment the
4061 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4062 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4063 op->nfso_fhlen == lop->nfso_fhlen &&
4064 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4065 op->nfso_opencnt += lop->nfso_opencnt;
4066 nfscl_freeopen(lop, 1);
4071 /* No appropriate open, so we have to do one against the server. */
4073 nop = malloc(sizeof (struct nfsclopen) +
4074 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4076 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4077 lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4079 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
4080 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4081 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4084 nfscl_freeopen(op, 0);
4086 op->nfso_mode |= lop->nfso_mode;
4087 op->nfso_opencnt += lop->nfso_opencnt;
4088 nfscl_freeopen(lop, 1);
4091 free(nop, M_NFSCLOPEN);
4094 * What should I do with the returned delegation, since the
4095 * delegation is being recalled? For now, just printf and
4098 printf("Moveopen returned deleg\n");
4099 free(ndp, M_NFSCLDELEG);
4105 * Recall all delegations on this client.
4108 nfscl_totalrecall(struct nfsclclient *clp)
4110 struct nfscldeleg *dp;
4112 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4113 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4114 dp->nfsdl_flags |= NFSCLDL_RECALL;
4119 * Relock byte ranges. Called for delegation recall and state expiry.
4122 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4123 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4126 struct nfscllockowner *nlp;
4129 int error, newone, donelocally;
4131 off = lop->nfslo_first;
4132 len = lop->nfslo_end - lop->nfslo_first;
4133 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4134 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4135 lp->nfsl_openowner, &nlp, &newone, &donelocally);
4136 if (error || donelocally)
4138 nfhp = VTONFS(vp)->n_fhp;
4139 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4140 nfhp->nfh_len, nlp, newone, 0, off,
4141 len, lop->nfslo_type, cred, p);
4143 nfscl_freelockowner(nlp, 0);
4148 * Called to re-open a file. Basically get a vnode for the file handle
4149 * and then call nfsrpc_openrpc() to do the rest.
4152 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4153 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4154 struct ucred *cred, NFSPROC_T *p)
4160 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4164 if (np->n_v4 != NULL) {
4165 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4166 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4167 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4177 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4178 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4182 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4183 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4184 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4185 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4190 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4191 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4193 if (error == NFSERR_DELAY)
4194 (void) nfs_catnap(PZERO, error, "nfstryop");
4195 } while (error == NFSERR_DELAY);
4196 if (error == EAUTH || error == EACCES) {
4197 /* Try again using system credentials */
4198 newnfs_setroot(cred);
4200 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4201 newfhlen, mode, op, name, namelen, ndpp, reclaim,
4202 delegtype, cred, p, 1, 0);
4203 if (error == NFSERR_DELAY)
4204 (void) nfs_catnap(PZERO, error, "nfstryop");
4205 } while (error == NFSERR_DELAY);
4211 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4212 * NFSERR_DELAY. Also, retry with system credentials, if the provided
4216 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4217 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4218 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4220 struct nfsrv_descript nfsd, *nd = &nfsd;
4224 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4225 reclaim, off, len, type, cred, p, 0);
4226 if (!error && nd->nd_repstat == NFSERR_DELAY)
4227 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4229 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4231 error = nd->nd_repstat;
4232 if (error == EAUTH || error == EACCES) {
4233 /* Try again using root credentials */
4234 newnfs_setroot(cred);
4236 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4237 newone, reclaim, off, len, type, cred, p, 1);
4238 if (!error && nd->nd_repstat == NFSERR_DELAY)
4239 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4241 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4243 error = nd->nd_repstat;
4249 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4250 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4254 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4255 struct nfsmount *nmp, NFSPROC_T *p)
4260 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4261 if (error == NFSERR_DELAY)
4262 (void) nfs_catnap(PZERO, error, "nfstrydp");
4263 } while (error == NFSERR_DELAY);
4264 if (error == EAUTH || error == EACCES) {
4265 /* Try again using system credentials */
4266 newnfs_setroot(cred);
4268 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4269 if (error == NFSERR_DELAY)
4270 (void) nfs_catnap(PZERO, error, "nfstrydp");
4271 } while (error == NFSERR_DELAY);
4277 * Try a close against the server. Just call nfsrpc_closerpc(),
4278 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4282 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4283 struct nfsmount *nmp, NFSPROC_T *p)
4285 struct nfsrv_descript nfsd, *nd = &nfsd;
4289 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4290 if (error == NFSERR_DELAY)
4291 (void) nfs_catnap(PZERO, error, "nfstrycl");
4292 } while (error == NFSERR_DELAY);
4293 if (error == EAUTH || error == EACCES) {
4294 /* Try again using system credentials */
4295 newnfs_setroot(cred);
4297 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4298 if (error == NFSERR_DELAY)
4299 (void) nfs_catnap(PZERO, error, "nfstrycl");
4300 } while (error == NFSERR_DELAY);
4306 * Decide if a delegation on a file permits close without flushing writes
4307 * to the server. This might be a big performance win in some environments.
4308 * (Not useful until the client does caching on local stable storage.)
4311 nfscl_mustflush(vnode_t vp)
4313 struct nfsclclient *clp;
4314 struct nfscldeleg *dp;
4316 struct nfsmount *nmp;
4319 nmp = VFSTONFS(vnode_mount(vp));
4320 if (!NFSHASNFSV4(nmp))
4323 clp = nfscl_findcl(nmp);
4328 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4329 if (dp != NULL && (dp->nfsdl_flags &
4330 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4332 (dp->nfsdl_sizelimit >= np->n_size ||
4333 !NFSHASSTRICT3530(nmp))) {
4342 * See if a (write) delegation exists for this file.
4345 nfscl_nodeleg(vnode_t vp, int writedeleg)
4347 struct nfsclclient *clp;
4348 struct nfscldeleg *dp;
4350 struct nfsmount *nmp;
4353 nmp = VFSTONFS(vnode_mount(vp));
4354 if (!NFSHASNFSV4(nmp))
4357 clp = nfscl_findcl(nmp);
4362 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4364 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4365 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4375 * Look for an associated delegation that should be DelegReturned.
4378 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4380 struct nfsclclient *clp;
4381 struct nfscldeleg *dp;
4382 struct nfsclowner *owp;
4383 struct nfscllockowner *lp;
4384 struct nfsmount *nmp;
4387 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4389 nmp = VFSTONFS(vnode_mount(vp));
4393 * Loop around waiting for:
4394 * - outstanding I/O operations on delegations to complete
4395 * - for a delegation on vp that has state, lock the client and
4397 * - return delegation with no state
4400 clp = nfscl_findcl(nmp);
4405 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4406 np->n_fhp->nfh_len);
4409 * Wait for outstanding I/O ops to be done.
4411 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4413 nfsv4_unlock(&clp->nfsc_lock, 0);
4416 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4417 (void) nfsmsleep(&dp->nfsdl_rwlock,
4418 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4422 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4423 if (!LIST_EMPTY(&owp->nfsow_open)) {
4429 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4430 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4436 if (needsrecall && !triedrecall) {
4437 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4440 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4441 &islept, NFSCLSTATEMUTEXPTR, NULL);
4448 cred = newnfs_getcred();
4449 newnfs_copycred(&dp->nfsdl_cred, cred);
4450 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0);
4454 nfsv4_unlock(&clp->nfsc_lock, 0);
4458 *stp = dp->nfsdl_stateid;
4460 nfscl_cleandeleg(dp);
4461 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4464 nfsv4_unlock(&clp->nfsc_lock, 0);
4471 * Look for associated delegation(s) that should be DelegReturned.
4474 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4475 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4477 struct nfsclclient *clp;
4478 struct nfscldeleg *dp;
4479 struct nfsclowner *owp;
4480 struct nfscllockowner *lp;
4481 struct nfsmount *nmp;
4484 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4486 nmp = VFSTONFS(vnode_mount(fvp));
4491 * Loop around waiting for:
4492 * - outstanding I/O operations on delegations to complete
4493 * - for a delegation on fvp that has state, lock the client and
4495 * - return delegation(s) with no state.
4498 clp = nfscl_findcl(nmp);
4504 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4505 np->n_fhp->nfh_len);
4506 if (dp != NULL && *gotfdp == 0) {
4508 * Wait for outstanding I/O ops to be done.
4510 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4512 nfsv4_unlock(&clp->nfsc_lock, 0);
4515 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4516 (void) nfsmsleep(&dp->nfsdl_rwlock,
4517 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4521 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4522 if (!LIST_EMPTY(&owp->nfsow_open)) {
4528 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4529 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4535 if (needsrecall && !triedrecall) {
4536 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4539 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4540 &islept, NFSCLSTATEMUTEXPTR, NULL);
4547 cred = newnfs_getcred();
4548 newnfs_copycred(&dp->nfsdl_cred, cred);
4549 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0);
4553 nfsv4_unlock(&clp->nfsc_lock, 0);
4557 *fstp = dp->nfsdl_stateid;
4560 nfscl_cleandeleg(dp);
4561 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4564 nfsv4_unlock(&clp->nfsc_lock, 0);
4569 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4570 np->n_fhp->nfh_len);
4571 if (dp != NULL && *gottdp == 0) {
4573 * Wait for outstanding I/O ops to be done.
4575 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4576 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4577 (void) nfsmsleep(&dp->nfsdl_rwlock,
4578 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4581 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4582 if (!LIST_EMPTY(&owp->nfsow_open)) {
4587 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4588 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4593 *tstp = dp->nfsdl_stateid;
4596 nfscl_cleandeleg(dp);
4597 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4606 * Get a reference on the clientid associated with the mount point.
4607 * Return 1 if success, 0 otherwise.
4610 nfscl_getref(struct nfsmount *nmp)
4612 struct nfsclclient *clp;
4615 clp = nfscl_findcl(nmp);
4620 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL);
4626 * Release a reference on a clientid acquired with the above call.
4629 nfscl_relref(struct nfsmount *nmp)
4631 struct nfsclclient *clp;
4634 clp = nfscl_findcl(nmp);
4639 nfsv4_relref(&clp->nfsc_lock);
4644 * Save the size attribute in the delegation, since the nfsnode
4648 nfscl_reclaimnode(vnode_t vp)
4650 struct nfsclclient *clp;
4651 struct nfscldeleg *dp;
4652 struct nfsnode *np = VTONFS(vp);
4653 struct nfsmount *nmp;
4655 nmp = VFSTONFS(vnode_mount(vp));
4656 if (!NFSHASNFSV4(nmp))
4659 clp = nfscl_findcl(nmp);
4664 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4665 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4666 dp->nfsdl_size = np->n_size;
4671 * Get the saved size attribute in the delegation, since it is a
4672 * newly allocated nfsnode.
4675 nfscl_newnode(vnode_t vp)
4677 struct nfsclclient *clp;
4678 struct nfscldeleg *dp;
4679 struct nfsnode *np = VTONFS(vp);
4680 struct nfsmount *nmp;
4682 nmp = VFSTONFS(vnode_mount(vp));
4683 if (!NFSHASNFSV4(nmp))
4686 clp = nfscl_findcl(nmp);
4691 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4692 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4693 np->n_size = dp->nfsdl_size;
4698 * If there is a valid write delegation for this file, set the modtime
4699 * to the local clock time.
4702 nfscl_delegmodtime(vnode_t vp)
4704 struct nfsclclient *clp;
4705 struct nfscldeleg *dp;
4706 struct nfsnode *np = VTONFS(vp);
4707 struct nfsmount *nmp;
4709 nmp = VFSTONFS(vnode_mount(vp));
4710 if (!NFSHASNFSV4(nmp))
4713 clp = nfscl_findcl(nmp);
4718 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4719 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4720 nanotime(&dp->nfsdl_modtime);
4721 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4727 * If there is a valid write delegation for this file with a modtime set,
4728 * put that modtime in mtime.
4731 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4733 struct nfsclclient *clp;
4734 struct nfscldeleg *dp;
4735 struct nfsnode *np = VTONFS(vp);
4736 struct nfsmount *nmp;
4738 nmp = VFSTONFS(vnode_mount(vp));
4739 if (!NFSHASNFSV4(nmp))
4742 clp = nfscl_findcl(nmp);
4747 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4749 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4750 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4751 *mtime = dp->nfsdl_modtime;
4756 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
4758 short *defaulterrp, *errp;
4760 if (!nd->nd_repstat)
4762 if (nd->nd_procnum == NFSPROC_NOOP)
4763 return (txdr_unsigned(nd->nd_repstat & 0xffff));
4764 if (nd->nd_repstat == EBADRPC)
4765 return (txdr_unsigned(NFSERR_BADXDR));
4766 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4767 nd->nd_repstat == NFSERR_OPILLEGAL)
4768 return (txdr_unsigned(nd->nd_repstat));
4769 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
4770 minorvers > NFSV4_MINORVERSION) {
4771 /* NFSv4.n error. */
4772 return (txdr_unsigned(nd->nd_repstat));
4774 if (nd->nd_procnum < NFSV4OP_CBNOPS)
4775 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4777 return (txdr_unsigned(nd->nd_repstat));
4779 if (*errp == (short)nd->nd_repstat)
4780 return (txdr_unsigned(nd->nd_repstat));
4781 return (txdr_unsigned(*defaulterrp));
4785 * Called to find/add a layout to a client.
4786 * This function returns the layout with a refcnt (shared lock) upon
4787 * success (returns 0) or with no lock/refcnt on the layout when an
4788 * error is returned.
4789 * If a layout is passed in via lypp, it is locked (exclusively locked).
4792 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4793 nfsv4stateid_t *stateidp, int layouttype, int retonclose,
4794 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
4795 struct ucred *cred, NFSPROC_T *p)
4797 struct nfsclclient *clp;
4798 struct nfscllayout *lyp, *tlyp;
4799 struct nfsclflayout *flp;
4800 struct nfsnode *np = VTONFS(vp);
4802 int layout_passed_in;
4804 mp = nmp->nm_mountp;
4805 layout_passed_in = 1;
4809 layout_passed_in = 0;
4810 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
4817 if (layout_passed_in != 0)
4818 nfsv4_unlock(&lyp->nfsly_lock, 0);
4821 free(tlyp, M_NFSLAYOUT);
4826 * Although no lyp was passed in, another thread might have
4827 * allocated one. If one is found, just increment it's ref
4828 * count and return it.
4830 lyp = nfscl_findlayout(clp, fhp, fhlen);
4834 lyp->nfsly_stateid.seqid = stateidp->seqid;
4835 lyp->nfsly_stateid.other[0] = stateidp->other[0];
4836 lyp->nfsly_stateid.other[1] = stateidp->other[1];
4837 lyp->nfsly_stateid.other[2] = stateidp->other[2];
4838 lyp->nfsly_lastbyte = 0;
4839 LIST_INIT(&lyp->nfsly_flayread);
4840 LIST_INIT(&lyp->nfsly_flayrw);
4841 LIST_INIT(&lyp->nfsly_recall);
4842 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
4843 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
4844 lyp->nfsly_clp = clp;
4845 if (layouttype == NFSLAYOUT_FLEXFILE)
4846 lyp->nfsly_flags = NFSLY_FLEXFILE;
4848 lyp->nfsly_flags = NFSLY_FILES;
4849 if (retonclose != 0)
4850 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
4851 lyp->nfsly_fhlen = fhlen;
4852 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
4853 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4854 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
4856 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4859 if (retonclose != 0)
4860 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
4861 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4862 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4863 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4865 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
4866 if (NFSCL_FORCEDISM(mp)) {
4869 free(tlyp, M_NFSLAYOUT);
4874 lyp->nfsly_stateid.seqid = stateidp->seqid;
4876 /* Merge the new list of File Layouts into the list. */
4877 flp = LIST_FIRST(fhlp);
4879 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
4880 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
4882 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
4884 if (layout_passed_in != 0)
4885 nfsv4_unlock(&lyp->nfsly_lock, 1);
4888 free(tlyp, M_NFSLAYOUT);
4893 * Search for a layout by MDS file handle.
4894 * If one is found, it is returned with a refcnt (shared lock) iff
4895 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
4898 struct nfscllayout *
4899 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
4900 uint64_t off, struct nfsclflayout **retflpp, int *recalledp)
4902 struct nfscllayout *lyp;
4904 int error, igotlock;
4906 mp = clp->nfsc_nmp->nm_mountp;
4910 lyp = nfscl_findlayout(clp, fhp, fhlen);
4912 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
4913 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4914 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4915 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4916 error = nfscl_findlayoutforio(lyp, off,
4917 NFSV4OPEN_ACCESSREAD, retflpp);
4919 nfsv4_getref(&lyp->nfsly_lock, NULL,
4920 NFSCLSTATEMUTEXPTR, mp);
4923 igotlock = nfsv4_lock(&lyp->nfsly_lock,
4924 1, NULL, NFSCLSTATEMUTEXPTR, mp);
4925 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
4928 if (NFSCL_FORCEDISM(mp)) {
4942 * Search for a layout by MDS file handle. If one is found, mark in to be
4943 * recalled, if it already marked "return on close".
4946 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
4947 int fhlen, struct nfsclrecalllayout **recallpp)
4949 struct nfscllayout *lyp;
4952 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vnode_mount(vp))) ||
4953 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
4954 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
4956 lyp = nfscl_findlayout(clp, fhp, fhlen);
4957 if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE |
4958 NFSLY_RECALL)) == NFSLY_RETONCLOSE) {
4960 if (!LIST_EMPTY(&lyp->nfsly_flayread))
4961 iomode |= NFSLAYOUTIOMODE_READ;
4962 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
4963 iomode |= NFSLAYOUTIOMODE_RW;
4964 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
4965 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL,
4967 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
4973 * Mark the layout to be recalled and with an error.
4974 * Also, disable the dsp from further use.
4977 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp,
4978 struct nfscllayout *lyp, struct nfsclds *dsp)
4980 struct nfsclrecalllayout *recallp;
4983 printf("DS being disabled, error=%d\n", stat);
4984 /* Set up the return of the layout. */
4985 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
4988 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
4989 if (!LIST_EMPTY(&lyp->nfsly_flayread))
4990 iomode |= NFSLAYOUTIOMODE_READ;
4991 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
4992 iomode |= NFSLAYOUTIOMODE_RW;
4993 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
4994 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op,
4995 dp->nfsdi_deviceid, recallp);
4997 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode);
5000 free(recallp, M_NFSLAYRECALL);
5003 /* And shut the TCP connection down. */
5004 nfscl_cancelreqs(dsp);
5008 * Cancel all RPCs for this "dsp" by closing the connection.
5009 * Also, mark the session as defunct.
5010 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and
5011 * cannot be shut down.
5014 nfscl_cancelreqs(struct nfsclds *dsp)
5016 struct __rpc_client *cl;
5017 static int non_event;
5020 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 &&
5021 dsp->nfsclds_sockp != NULL &&
5022 dsp->nfsclds_sockp->nr_client != NULL) {
5023 dsp->nfsclds_flags |= NFSCLDS_CLOSED;
5024 cl = dsp->nfsclds_sockp->nr_client;
5025 dsp->nfsclds_sess.nfsess_defunct = 1;
5029 * This 1sec sleep is done to reduce the number of reconnect
5030 * attempts made on the DS while it has failed.
5032 tsleep(&non_event, PVFS, "ndscls", hz);
5039 * Dereference a layout.
5042 nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
5047 nfsv4_unlock(&lyp->nfsly_lock, 0);
5049 nfsv4_relref(&lyp->nfsly_lock);
5054 * Search for a devinfo by deviceid. If one is found, return it after
5055 * acquiring a reference count on it.
5057 struct nfscldevinfo *
5058 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
5059 struct nfscldevinfo *dip)
5064 dip = nfscl_finddevinfo(clp, deviceid);
5066 dip->nfsdi_refcnt++;
5072 * Dereference a devinfo structure.
5075 nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
5078 dip->nfsdi_refcnt--;
5079 if (dip->nfsdi_refcnt == 0)
5080 wakeup(&dip->nfsdi_refcnt);
5084 * Dereference a devinfo structure.
5087 nfscl_reldevinfo(struct nfscldevinfo *dip)
5091 nfscl_reldevinfo_locked(dip);
5096 * Find a layout for this file handle. Return NULL upon failure.
5098 static struct nfscllayout *
5099 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5101 struct nfscllayout *lyp;
5103 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5104 if (lyp->nfsly_fhlen == fhlen &&
5105 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5111 * Find a devinfo for this deviceid. Return NULL upon failure.
5113 static struct nfscldevinfo *
5114 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5116 struct nfscldevinfo *dip;
5118 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5119 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5126 * Merge the new file layout list into the main one, maintaining it in
5127 * increasing offset order.
5130 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5131 struct nfsclflayouthead *newfhlp)
5133 struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5135 flp = LIST_FIRST(fhlp);
5137 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5138 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5140 flp = LIST_NEXT(flp, nfsfl_list);
5142 if (prevflp == NULL)
5143 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5145 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5151 * Add this nfscldevinfo to the client, if it doesn't already exist.
5152 * This function consumes the structure pointed at by dip, if not NULL.
5155 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind,
5156 struct nfsclflayout *flp)
5158 struct nfsclclient *clp;
5159 struct nfscldevinfo *tdip;
5167 free(dip, M_NFSDEVINFO);
5170 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5171 dev = flp->nfsfl_dev;
5173 dev = flp->nfsfl_ffm[ind].dev;
5174 tdip = nfscl_finddevinfo(clp, dev);
5176 tdip->nfsdi_layoutrefs++;
5177 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5178 flp->nfsfl_devp = tdip;
5180 flp->nfsfl_ffm[ind].devp = tdip;
5181 nfscl_reldevinfo_locked(tdip);
5184 free(dip, M_NFSDEVINFO);
5188 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5189 dip->nfsdi_layoutrefs = 1;
5190 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5191 flp->nfsfl_devp = dip;
5193 flp->nfsfl_ffm[ind].devp = dip;
5202 * Free up a layout structure and associated file layout structure(s).
5205 nfscl_freelayout(struct nfscllayout *layp)
5207 struct nfsclflayout *flp, *nflp;
5208 struct nfsclrecalllayout *rp, *nrp;
5210 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5211 LIST_REMOVE(flp, nfsfl_list);
5212 nfscl_freeflayout(flp);
5214 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5215 LIST_REMOVE(flp, nfsfl_list);
5216 nfscl_freeflayout(flp);
5218 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5219 LIST_REMOVE(rp, nfsrecly_list);
5220 free(rp, M_NFSLAYRECALL);
5223 free(layp, M_NFSLAYOUT);
5227 * Free up a file layout structure.
5230 nfscl_freeflayout(struct nfsclflayout *flp)
5234 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) {
5235 for (i = 0; i < flp->nfsfl_fhcnt; i++)
5236 free(flp->nfsfl_fh[i], M_NFSFH);
5237 if (flp->nfsfl_devp != NULL)
5238 flp->nfsfl_devp->nfsdi_layoutrefs--;
5240 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0)
5241 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) {
5242 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++)
5243 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH);
5244 if (flp->nfsfl_ffm[i].devp != NULL)
5245 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--;
5247 free(flp, M_NFSFLAYOUT);
5251 * Free up a file layout devinfo structure.
5254 nfscl_freedevinfo(struct nfscldevinfo *dip)
5257 free(dip, M_NFSDEVINFO);
5261 * Mark any layouts that match as recalled.
5264 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5265 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op,
5266 char *devid, struct nfsclrecalllayout *recallp)
5268 struct nfsclrecalllayout *rp, *orp;
5270 recallp->nfsrecly_recalltype = recalltype;
5271 recallp->nfsrecly_iomode = iomode;
5272 recallp->nfsrecly_stateseqid = stateseqid;
5273 recallp->nfsrecly_off = off;
5274 recallp->nfsrecly_len = len;
5275 recallp->nfsrecly_stat = stat;
5276 recallp->nfsrecly_op = op;
5278 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID);
5280 * Order the list as file returns first, followed by fsid and any
5281 * returns, both in increasing stateseqid order.
5282 * Note that the seqids wrap around, so 1 is after 0xffffffff.
5283 * (I'm not sure this is correct because I find RFC5661 confusing
5284 * on this, but hopefully it will work ok.)
5287 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5289 if ((recalltype == NFSLAYOUTRETURN_FILE &&
5290 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5291 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5292 (recalltype != NFSLAYOUTRETURN_FILE &&
5293 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5294 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5295 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5300 * Put any error return on all the file returns that will
5303 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE &&
5304 stat != 0 && rp->nfsrecly_stat == 0) {
5305 rp->nfsrecly_stat = stat;
5306 rp->nfsrecly_op = op;
5308 NFSBCOPY(devid, rp->nfsrecly_devid,
5314 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5317 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5319 lyp->nfsly_flags |= NFSLY_RECALL;
5320 wakeup(lyp->nfsly_clp);
5325 * Compare the two seqids for ordering. The trick is that the seqids can
5326 * wrap around from 0xffffffff->0, so check for the cases where one
5327 * has wrapped around.
5328 * Return 1 if seqid1 comes before seqid2, 0 otherwise.
5331 nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5334 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5335 /* seqid2 has wrapped around. */
5337 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5338 /* seqid1 has wrapped around. */
5340 if (seqid1 <= seqid2)
5346 * Do a layout return for each of the recalls.
5349 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5350 struct ucred *cred, NFSPROC_T *p)
5352 struct nfsclrecalllayout *rp;
5353 nfsv4stateid_t stateid;
5356 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5357 stateid.seqid = lyp->nfsly_stateid.seqid;
5358 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5359 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5361 layouttype = NFSLAYOUT_FLEXFILE;
5362 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5363 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5364 lyp->nfsly_fhlen, 0, layouttype,
5365 rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5366 rp->nfsrecly_off, rp->nfsrecly_len,
5367 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op,
5368 rp->nfsrecly_devid);
5373 * Do the layout commit for a file layout.
5376 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5377 struct ucred *cred, NFSPROC_T *p)
5379 struct nfsclflayout *flp;
5381 int error, layouttype;
5383 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5384 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5386 layouttype = NFSLAYOUT_FLEXFILE;
5387 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5388 if (layouttype == NFSLAYOUT_FLEXFILE &&
5389 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) {
5390 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n");
5391 /* If not supported, don't bother doing it. */
5393 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5396 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5397 len = flp->nfsfl_end - flp->nfsfl_off;
5398 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5399 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5400 lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5401 layouttype, cred, p, NULL);
5402 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5403 if (error == NFSERR_NOTSUPP) {
5404 /* If not supported, don't bother doing it. */
5406 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5415 * Commit all layouts for a file (vnode).
5418 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5420 struct nfsclclient *clp;
5421 struct nfscllayout *lyp;
5422 struct nfsnode *np = VTONFS(vp);
5424 struct nfsmount *nmp;
5426 mp = vnode_mount(vp);
5428 if (NFSHASNOLAYOUTCOMMIT(nmp))
5436 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5441 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5442 if (NFSCL_FORCEDISM(mp)) {
5447 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5448 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5450 NFSCL_DEBUG(4, "do layoutcommit2\n");
5451 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5455 nfsv4_relref(&lyp->nfsly_lock);