1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
7 * Copyright (c) 2000 Manuel Bouyer.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
56 #include <sys/types.h>
58 #include <sys/socket.h>
59 #include <sys/param.h>
60 #include <sys/mount.h>
62 #include <rpcsvc/sm_inter.h>
63 #include <rpcsvc/nlm_prot.h>
64 #include "lockd_lock.h"
67 #define MAXOBJECTSIZE 64
68 #define MAXBUFFERSIZE 1024
71 * A set of utilities for managing file locking
73 * XXX: All locks are in a linked list, a better structure should be used
74 * to improve search/access efficiency.
77 /* struct describing a lock */
79 LIST_ENTRY(file_lock) nfslocklist;
80 fhandle_t filehandle; /* NFS filehandle */
81 struct sockaddr *addr;
82 struct nlm4_holder client; /* lock holder */
83 /* XXX: client_cookie used *only* in send_granted */
84 netobj client_cookie; /* cookie sent by the client */
85 int nsm_status; /* status from the remote lock manager */
86 int status; /* lock status, see below */
87 int flags; /* lock flags, see lockd_lock.h */
88 int blocking; /* blocking lock or not */
89 char client_name[SM_MAXSTRLEN]; /* client_name is really variable
90 length and must be last! */
93 LIST_HEAD(nfslocklist_head, file_lock);
94 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
96 LIST_HEAD(blockedlocklist_head, file_lock);
97 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
100 #define LKST_LOCKED 1 /* lock is locked */
101 /* XXX: Is this flag file specific or lock specific? */
102 #define LKST_WAITING 2 /* file is already locked by another host */
103 #define LKST_PROCESSING 3 /* child is trying to acquire the lock */
104 #define LKST_DYING 4 /* must dies when we get news from the child */
106 /* struct describing a monitored host */
108 LIST_ENTRY(host) hostlst;
110 char name[SM_MAXSTRLEN]; /* name is really variable length and
113 /* list of hosts we monitor */
114 LIST_HEAD(hostlst_head, host);
115 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head);
118 * File monitoring handlers
119 * XXX: These might be able to be removed when kevent support
120 * is placed into the hardware lock/unlock routines. (ie.
121 * let the kernel do all the file monitoring)
124 /* Struct describing a monitored file */
126 LIST_ENTRY(monfile) monfilelist;
127 fhandle_t filehandle; /* Local access filehandle */
128 int fd; /* file descriptor: remains open until unlock! */
133 /* List of files we monitor */
134 LIST_HEAD(monfilelist_head, monfile);
135 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
137 static int debugdelay = 0;
139 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
140 NFS_DENIED, NFS_DENIED_NOLOCK,
143 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
144 HW_DENIED, HW_DENIED_NOLOCK,
145 HW_STALEFH, HW_READONLY, HW_RESERR };
147 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
148 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
149 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
151 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
152 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
153 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */
154 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
156 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
158 void send_granted(struct file_lock *fl, int opcode);
160 void sigunlock(void);
161 void monitor_lock_host(const char *hostname);
162 void unmonitor_lock_host(char *hostname);
164 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
165 const bool_t exclusive, struct nlm4_holder *dest);
166 struct file_lock * allocate_file_lock(const netobj *lockowner,
167 const netobj *matchcookie,
168 const struct sockaddr *addr,
169 const char *caller_name);
170 void deallocate_file_lock(struct file_lock *fl);
171 void fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
172 const bool_t exclusive, const int32_t svid,
173 const u_int64_t offset, const u_int64_t len,
174 const int state, const int status, const int flags, const int blocking);
175 int regions_overlap(const u_int64_t start1, const u_int64_t len1,
176 const u_int64_t start2, const u_int64_t len2);
177 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene,
178 const u_int64_t startu, const u_int64_t lenu,
179 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
180 int same_netobj(const netobj *n0, const netobj *n1);
181 int same_filelock_identity(const struct file_lock *fl0,
182 const struct file_lock *fl2);
184 static void debuglog(char const *fmt, ...);
185 void dump_static_object(const unsigned char* object, const int sizeof_object,
186 unsigned char* hbuff, const int sizeof_hbuff,
187 unsigned char* cbuff, const int sizeof_cbuff);
188 void dump_netobj(const struct netobj *nobj);
189 void dump_filelock(const struct file_lock *fl);
190 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl);
191 enum nfslock_status test_nfslock(const struct file_lock *fl,
192 struct file_lock **conflicting_fl);
193 enum nfslock_status lock_nfslock(struct file_lock *fl);
194 enum nfslock_status delete_nfslock(struct file_lock *fl);
195 enum nfslock_status unlock_nfslock(const struct file_lock *fl,
196 struct file_lock **released_lock, struct file_lock **left_lock,
197 struct file_lock **right_lock);
198 enum hwlock_status lock_hwlock(struct file_lock *fl);
199 enum split_status split_nfslock(const struct file_lock *exist_lock,
200 const struct file_lock *unlock_lock, struct file_lock **left_lock,
201 struct file_lock **right_lock);
202 int duplicate_block(struct file_lock *fl);
203 void add_blockingfilelock(struct file_lock *fl);
204 enum hwlock_status unlock_hwlock(const struct file_lock *fl);
205 enum hwlock_status test_hwlock(const struct file_lock *fl,
206 struct file_lock **conflicting_fl);
207 void remove_blockingfilelock(struct file_lock *fl);
208 void clear_blockingfilelock(const char *hostname);
209 void retry_blockingfilelocklist(void);
210 enum partialfilelock_status unlock_partialfilelock(
211 const struct file_lock *fl);
212 void clear_partialfilelock(const char *hostname);
213 enum partialfilelock_status test_partialfilelock(
214 const struct file_lock *fl, struct file_lock **conflicting_fl);
215 enum nlm_stats do_test(struct file_lock *fl,
216 struct file_lock **conflicting_fl);
217 enum nlm_stats do_unlock(struct file_lock *fl);
218 enum nlm_stats do_lock(struct file_lock *fl);
219 void do_clear(const char *hostname);
220 size_t strnlen(const char *, size_t);
223 debuglog(char const *fmt, ...)
227 if (debug_level < 1) {
234 vsyslog(LOG_DEBUG, fmt, ap);
239 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
240 const unsigned char *object;
241 const int size_object;
242 unsigned char *hbuff;
243 const int size_hbuff;
244 unsigned char *cbuff;
245 const int size_cbuff;
249 if (debug_level < 2) {
253 objectsize = size_object;
255 if (objectsize == 0) {
256 debuglog("object is size 0\n");
258 if (objectsize > MAXOBJECTSIZE) {
259 debuglog("Object of size %d being clamped"
260 "to size %d\n", objectsize, MAXOBJECTSIZE);
261 objectsize = MAXOBJECTSIZE;
265 if (size_hbuff < objectsize*2+1) {
266 debuglog("Hbuff not large enough."
269 for(i=0;i<objectsize;i++) {
270 sprintf(hbuff+i*2,"%02x",*(object+i));
277 if (size_cbuff < objectsize+1) {
278 debuglog("Cbuff not large enough."
282 for(i=0;i<objectsize;i++) {
283 if (*(object+i) >= 32 && *(object+i) <= 127) {
284 *(cbuff+i) = *(object+i);
295 dump_netobj(const struct netobj *nobj)
297 char hbuff[MAXBUFFERSIZE*2];
298 char cbuff[MAXBUFFERSIZE];
300 if (debug_level < 2) {
305 debuglog("Null netobj pointer\n");
307 else if (nobj->n_len == 0) {
308 debuglog("Size zero netobj\n");
310 dump_static_object(nobj->n_bytes, nobj->n_len,
311 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
312 debuglog("netobj: len: %d data: %s ::: %s\n",
313 nobj->n_len, hbuff, cbuff);
317 /* #define DUMP_FILELOCK_VERBOSE */
319 dump_filelock(const struct file_lock *fl)
321 #ifdef DUMP_FILELOCK_VERBOSE
322 char hbuff[MAXBUFFERSIZE*2];
323 char cbuff[MAXBUFFERSIZE];
326 if (debug_level < 2) {
331 debuglog("Dumping file lock structure @ %p\n", fl);
333 #ifdef DUMP_FILELOCK_VERBOSE
334 dump_static_object((unsigned char *)&fl->filehandle,
335 sizeof(fl->filehandle), hbuff, sizeof(hbuff),
336 cbuff, sizeof(cbuff));
337 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff);
340 debuglog("Dumping nlm4_holder:\n"
341 "exc: %x svid: %x offset:len %llx:%llx\n",
342 fl->client.exclusive, fl->client.svid,
343 fl->client.l_offset, fl->client.l_len);
345 #ifdef DUMP_FILELOCK_VERBOSE
346 debuglog("Dumping client identity:\n");
347 dump_netobj(&fl->client.oh);
349 debuglog("Dumping client cookie:\n");
350 dump_netobj(&fl->client_cookie);
352 debuglog("nsm: %d status: %d flags: %d svid: %x"
353 " client_name: %s\n", fl->nsm_status, fl->status,
354 fl->flags, fl->client.svid, fl->client_name);
357 debuglog("NULL file lock structure\n");
362 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
363 const struct nlm4_lock *src;
364 const bool_t exclusive;
365 struct nlm4_holder *dest;
368 dest->exclusive = exclusive;
369 dest->oh.n_len = src->oh.n_len;
370 dest->oh.n_bytes = src->oh.n_bytes;
371 dest->svid = src->svid;
372 dest->l_offset = src->l_offset;
373 dest->l_len = src->l_len;
378 strnlen(const char *s, size_t len)
382 for (n = 0; s[n] != 0 && n < len; n++)
388 * allocate_file_lock: Create a lock with the given parameters
392 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie,
393 const struct sockaddr *addr, const char *caller_name)
395 struct file_lock *newfl;
398 /* Beware of rubbish input! */
399 n = strnlen(caller_name, SM_MAXSTRLEN);
400 if (n == SM_MAXSTRLEN) {
404 newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1);
408 bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name));
409 memcpy(newfl->client_name, caller_name, n);
410 newfl->client_name[n] = 0;
412 newfl->client.oh.n_bytes = malloc(lockowner->n_len);
413 if (newfl->client.oh.n_bytes == NULL) {
417 newfl->client.oh.n_len = lockowner->n_len;
418 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
420 newfl->client_cookie.n_bytes = malloc(matchcookie->n_len);
421 if (newfl->client_cookie.n_bytes == NULL) {
422 free(newfl->client.oh.n_bytes);
426 newfl->client_cookie.n_len = matchcookie->n_len;
427 bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len);
429 newfl->addr = malloc(addr->sa_len);
430 if (newfl->addr == NULL) {
431 free(newfl->client_cookie.n_bytes);
432 free(newfl->client.oh.n_bytes);
436 memcpy(newfl->addr, addr, addr->sa_len);
442 * file_file_lock: Force creation of a valid file lock
445 fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
446 const bool_t exclusive, const int32_t svid,
447 const u_int64_t offset, const u_int64_t len,
448 const int state, const int status, const int flags, const int blocking)
450 bcopy(fh, &fl->filehandle, sizeof(fhandle_t));
452 fl->client.exclusive = exclusive;
453 fl->client.svid = svid;
454 fl->client.l_offset = offset;
455 fl->client.l_len = len;
457 fl->nsm_status = state;
460 fl->blocking = blocking;
464 * deallocate_file_lock: Free all storage associated with a file lock
467 deallocate_file_lock(struct file_lock *fl)
470 free(fl->client.oh.n_bytes);
471 free(fl->client_cookie.n_bytes);
476 * regions_overlap(): This function examines the two provided regions for
480 regions_overlap(start1, len1, start2, len2)
481 const u_int64_t start1, len1, start2, len2;
483 u_int64_t d1,d2,d3,d4;
484 enum split_status result;
486 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
487 start1, len1, start2, len2);
489 result = region_compare(start1, len1, start2, len2,
492 debuglog("Exiting region overlap with val: %d\n",result);
494 if (result == SPL_DISJOINT) {
502 * region_compare(): Examine lock regions and split appropriately
504 * XXX: Fix 64 bit overflow problems
505 * XXX: Check to make sure I got *ALL* the cases.
506 * XXX: This DESPERATELY needs a regression test.
509 region_compare(starte, lene, startu, lenu,
510 start1, len1, start2, len2)
511 const u_int64_t starte, lene, startu, lenu;
512 u_int64_t *start1, *len1, *start2, *len2;
515 * Please pay attention to the sequential exclusions
516 * of the if statements!!!
520 enum split_status retval;
522 retval = SPL_DISJOINT;
524 if (lene == 0 && lenu == 0) {
525 /* Examine left edge of locker */
526 lflags = LEDGE_INSIDE;
527 if (startu < starte) {
529 } else if (startu == starte) {
530 lflags = LEDGE_LBOUNDARY;
533 rflags = REDGE_RBOUNDARY; /* Both are infiinite */
535 if (lflags == LEDGE_INSIDE) {
537 *len1 = startu - starte;
540 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
541 retval = SPL_CONTAINED;
545 } else if (lene == 0 && lenu != 0) {
546 /* Established lock is infinite */
547 /* Examine left edge of unlocker */
548 lflags = LEDGE_INSIDE;
549 if (startu < starte) {
551 } else if (startu == starte) {
552 lflags = LEDGE_LBOUNDARY;
555 /* Examine right edge of unlocker */
556 if (startu + lenu < starte) {
557 /* Right edge of unlocker left of established lock */
560 } else if (startu + lenu == starte) {
561 /* Right edge of unlocker on start of established lock */
562 rflags = REDGE_LBOUNDARY;
564 } else { /* Infinifty is right of finity */
565 /* Right edge of unlocker inside established lock */
566 rflags = REDGE_INSIDE;
569 if (lflags == LEDGE_INSIDE) {
571 *len1 = startu - starte;
575 if (rflags == REDGE_INSIDE) {
576 /* Create right lock */
577 *start2 = startu+lenu;
581 } else if (lene != 0 && lenu == 0) {
582 /* Unlocker is infinite */
583 /* Examine left edge of unlocker */
584 lflags = LEDGE_RIGHT;
585 if (startu < starte) {
587 retval = SPL_CONTAINED;
589 } else if (startu == starte) {
590 lflags = LEDGE_LBOUNDARY;
591 retval = SPL_CONTAINED;
593 } else if ((startu > starte) && (startu < starte + lene - 1)) {
594 lflags = LEDGE_INSIDE;
595 } else if (startu == starte + lene - 1) {
596 lflags = LEDGE_RBOUNDARY;
597 } else { /* startu > starte + lene -1 */
598 lflags = LEDGE_RIGHT;
602 rflags = REDGE_RIGHT; /* Infinity is right of finity */
604 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
606 *len1 = startu - starte;
611 /* Both locks are finite */
613 /* Examine left edge of unlocker */
614 lflags = LEDGE_RIGHT;
615 if (startu < starte) {
617 } else if (startu == starte) {
618 lflags = LEDGE_LBOUNDARY;
619 } else if ((startu > starte) && (startu < starte + lene - 1)) {
620 lflags = LEDGE_INSIDE;
621 } else if (startu == starte + lene - 1) {
622 lflags = LEDGE_RBOUNDARY;
623 } else { /* startu > starte + lene -1 */
624 lflags = LEDGE_RIGHT;
628 /* Examine right edge of unlocker */
629 if (startu + lenu < starte) {
630 /* Right edge of unlocker left of established lock */
633 } else if (startu + lenu == starte) {
634 /* Right edge of unlocker on start of established lock */
635 rflags = REDGE_LBOUNDARY;
637 } else if (startu + lenu < starte + lene) {
638 /* Right edge of unlocker inside established lock */
639 rflags = REDGE_INSIDE;
640 } else if (startu + lenu == starte + lene) {
641 /* Right edge of unlocker on right edge of established lock */
642 rflags = REDGE_RBOUNDARY;
643 } else { /* startu + lenu > starte + lene */
644 /* Right edge of unlocker is right of established lock */
645 rflags = REDGE_RIGHT;
648 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
649 /* Create left lock */
651 *len1 = (startu - starte);
655 if (rflags == REDGE_INSIDE) {
656 /* Create right lock */
657 *start2 = startu+lenu;
658 *len2 = starte+lene-(startu+lenu);
662 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
663 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
664 retval = SPL_CONTAINED;
671 * same_netobj: Compares the apprpriate bits of a netobj for identity
674 same_netobj(const netobj *n0, const netobj *n1)
680 debuglog("Entering netobj identity check\n");
682 if (n0->n_len == n1->n_len) {
683 debuglog("Preliminary length check passed\n");
684 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
685 debuglog("netobj %smatch\n", retval ? "" : "mis");
692 * same_filelock_identity: Compares the appropriate bits of a file_lock
695 same_filelock_identity(fl0, fl1)
696 const struct file_lock *fl0, *fl1;
702 debuglog("Checking filelock identity\n");
705 * Check process ids and host information.
707 retval = (fl0->client.svid == fl1->client.svid &&
708 same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
710 debuglog("Exiting checking filelock identity: retval: %d\n",retval);
716 * Below here are routines associated with manipulating the NFS
721 * get_lock_matching_unlock: Return a lock which matches the given unlock lock
723 * XXX: It is a shame that this duplicates so much code from test_nfslock.
726 get_lock_matching_unlock(const struct file_lock *fl)
728 struct file_lock *ifl; /* Iterator */
730 debuglog("Entering get_lock_matching_unlock\n");
731 debuglog("********Dump of fl*****************\n");
734 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
735 debuglog("Pointer to file lock: %p\n",ifl);
737 debuglog("****Dump of ifl****\n");
739 debuglog("*******************\n");
742 * XXX: It is conceivable that someone could use the NLM RPC
743 * system to directly access filehandles. This may be a
744 * security hazard as the filehandle code may bypass normal
745 * file access controls
747 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
750 debuglog("get_lock_matching_unlock: Filehandles match, "
751 "checking regions\n");
753 /* Filehandles match, check for region overlap */
754 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
755 ifl->client.l_offset, ifl->client.l_len))
758 debuglog("get_lock_matching_unlock: Region overlap"
759 " found %llu : %llu -- %llu : %llu\n",
760 fl->client.l_offset,fl->client.l_len,
761 ifl->client.l_offset,ifl->client.l_len);
763 /* Regions overlap, check the identity */
764 if (!same_filelock_identity(fl,ifl))
767 debuglog("get_lock_matching_unlock: Duplicate lock id. Granting\n");
771 debuglog("Exiting bet_lock_matching_unlock\n");
777 * test_nfslock: check for NFS lock in lock list
779 * This routine makes the following assumptions:
780 * 1) Nothing will adjust the lock list during a lookup
782 * This routine has an intersting quirk which bit me hard.
783 * The conflicting_fl is the pointer to the conflicting lock.
784 * However, to modify the "*pointer* to the conflicting lock" rather
785 * that the "conflicting lock itself" one must pass in a "pointer to
786 * the pointer of the conflicting lock". Gross.
790 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
792 struct file_lock *ifl; /* Iterator */
793 enum nfslock_status retval;
795 debuglog("Entering test_nfslock\n");
797 retval = NFS_GRANTED;
798 (*conflicting_fl) = NULL;
800 debuglog("Entering lock search loop\n");
802 debuglog("***********************************\n");
803 debuglog("Dumping match filelock\n");
804 debuglog("***********************************\n");
806 debuglog("***********************************\n");
808 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
809 if (retval == NFS_DENIED)
812 debuglog("Top of lock loop\n");
813 debuglog("Pointer to file lock: %p\n",ifl);
815 debuglog("***********************************\n");
816 debuglog("Dumping test filelock\n");
817 debuglog("***********************************\n");
819 debuglog("***********************************\n");
822 * XXX: It is conceivable that someone could use the NLM RPC
823 * system to directly access filehandles. This may be a
824 * security hazard as the filehandle code may bypass normal
825 * file access controls
827 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
830 debuglog("test_nfslock: filehandle match found\n");
832 /* Filehandles match, check for region overlap */
833 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
834 ifl->client.l_offset, ifl->client.l_len))
837 debuglog("test_nfslock: Region overlap found"
838 " %llu : %llu -- %llu : %llu\n",
839 fl->client.l_offset,fl->client.l_len,
840 ifl->client.l_offset,ifl->client.l_len);
842 /* Regions overlap, check the exclusivity */
843 if (!(fl->client.exclusive || ifl->client.exclusive))
846 debuglog("test_nfslock: Exclusivity failure: %d %d\n",
847 fl->client.exclusive,
848 ifl->client.exclusive);
850 if (same_filelock_identity(fl,ifl)) {
851 debuglog("test_nfslock: Duplicate id. Granting\n");
852 (*conflicting_fl) = ifl;
853 retval = NFS_GRANTED_DUPLICATE;
855 /* locking attempt fails */
856 debuglog("test_nfslock: Lock attempt failed\n");
857 debuglog("Desired lock\n");
859 debuglog("Conflicting lock\n");
861 (*conflicting_fl) = ifl;
866 debuglog("Dumping file locks\n");
867 debuglog("Exiting test_nfslock\n");
873 * lock_nfslock: attempt to create a lock in the NFS lock list
875 * This routine tests whether the lock will be granted and then adds
876 * the entry to the lock list if so.
878 * Argument fl gets modified as its list housekeeping entries get modified
879 * upon insertion into the NFS lock list
881 * This routine makes several assumptions:
882 * 1) It is perfectly happy to grant a duplicate lock from the same pid.
883 * While this seems to be intuitively wrong, it is required for proper
884 * Posix semantics during unlock. It is absolutely imperative to not
885 * unlock the main lock before the two child locks are established. Thus,
886 * one has to be able to create duplicate locks over an existing lock
887 * 2) It currently accepts duplicate locks from the same id,pid
891 lock_nfslock(struct file_lock *fl)
893 enum nfslock_status retval;
894 struct file_lock *dummy_fl;
898 debuglog("Entering lock_nfslock...\n");
900 retval = test_nfslock(fl,&dummy_fl);
902 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
903 debuglog("Inserting lock...\n");
905 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
908 debuglog("Exiting lock_nfslock...\n");
914 * delete_nfslock: delete an NFS lock list entry
916 * This routine is used to delete a lock out of the NFS lock list
917 * without regard to status, underlying locks, regions or anything else
919 * Note that this routine *does not deallocate memory* of the lock.
920 * It just disconnects it from the list. The lock can then be used
921 * by other routines without fear of trashing the list.
925 delete_nfslock(struct file_lock *fl)
928 LIST_REMOVE(fl, nfslocklist);
930 return (NFS_GRANTED);
934 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
935 const struct file_lock *exist_lock, *unlock_lock;
936 struct file_lock **left_lock, **right_lock;
938 u_int64_t start1, len1, start2, len2;
939 enum split_status spstatus;
941 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
942 unlock_lock->client.l_offset, unlock_lock->client.l_len,
943 &start1, &len1, &start2, &len2);
945 if ((spstatus & SPL_LOCK1) != 0) {
946 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
947 if (*left_lock == NULL) {
948 debuglog("Unable to allocate resource for split 1\n");
952 fill_file_lock(*left_lock, &exist_lock->filehandle,
953 exist_lock->client.exclusive, exist_lock->client.svid,
955 exist_lock->nsm_status,
956 exist_lock->status, exist_lock->flags, exist_lock->blocking);
959 if ((spstatus & SPL_LOCK2) != 0) {
960 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
961 if (*right_lock == NULL) {
962 debuglog("Unable to allocate resource for split 1\n");
963 if (*left_lock != NULL) {
964 deallocate_file_lock(*left_lock);
969 fill_file_lock(*right_lock, &exist_lock->filehandle,
970 exist_lock->client.exclusive, exist_lock->client.svid,
972 exist_lock->nsm_status,
973 exist_lock->status, exist_lock->flags, exist_lock->blocking);
980 unlock_nfslock(fl, released_lock, left_lock, right_lock)
981 const struct file_lock *fl;
982 struct file_lock **released_lock;
983 struct file_lock **left_lock;
984 struct file_lock **right_lock;
986 struct file_lock *mfl; /* Matching file lock */
987 enum nfslock_status retval;
988 enum split_status spstatus;
990 debuglog("Entering unlock_nfslock\n");
992 *released_lock = NULL;
996 retval = NFS_DENIED_NOLOCK;
998 debuglog("Attempting to match lock...\n");
999 mfl = get_lock_matching_unlock(fl);
1002 debuglog("Unlock matched. Querying for split\n");
1004 spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
1006 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
1007 debuglog("********Split dumps********");
1010 dump_filelock(*left_lock);
1011 dump_filelock(*right_lock);
1012 debuglog("********End Split dumps********");
1014 if (spstatus == SPL_RESERR) {
1015 if (*left_lock != NULL) {
1016 deallocate_file_lock(*left_lock);
1020 if (*right_lock != NULL) {
1021 deallocate_file_lock(*right_lock);
1028 /* Insert new locks from split if required */
1029 if (*left_lock != NULL) {
1030 debuglog("Split left activated\n");
1031 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1034 if (*right_lock != NULL) {
1035 debuglog("Split right activated\n");
1036 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1039 /* Unlock the lock since it matches identity */
1040 LIST_REMOVE(mfl, nfslocklist);
1041 *released_lock = mfl;
1042 retval = NFS_GRANTED;
1045 debuglog("Exiting unlock_nfslock\n");
1051 * Below here are the routines for manipulating the file lock directly
1052 * on the disk hardware itself
1055 lock_hwlock(struct file_lock *fl)
1057 struct monfile *imf,*nmf;
1058 int lflags, flerror;
1060 /* Scan to see if filehandle already present */
1061 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1062 if (bcmp(&fl->filehandle, &imf->filehandle,
1063 sizeof(fl->filehandle)) == 0) {
1064 /* imf is the correct filehandle */
1070 * Filehandle already exists (we control the file)
1071 * *AND* NFS has already cleared the lock for availability
1072 * Grant it and bump the refcount.
1076 return (HW_GRANTED);
1079 /* No filehandle found, create and go */
1080 nmf = malloc(sizeof(struct monfile));
1082 debuglog("hwlock resource allocation failure\n");
1086 /* XXX: Is O_RDWR always the correct mode? */
1087 nmf->fd = fhopen(&fl->filehandle, O_RDWR);
1089 debuglog("fhopen failed (from %16s): %32s\n",
1090 fl->client_name, strerror(errno));
1094 return (HW_STALEFH);
1096 return (HW_READONLY);
1102 /* File opened correctly, fill the monitor struct */
1103 bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle));
1105 nmf->exclusive = fl->client.exclusive;
1107 lflags = (nmf->exclusive == 1) ?
1108 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1110 flerror = flock(nmf->fd, lflags);
1113 debuglog("flock failed (from %16s): %32s\n",
1114 fl->client_name, strerror(errno));
1121 return (HW_STALEFH);
1123 return (HW_READONLY);
1130 /* File opened and locked */
1131 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1133 debuglog("flock succeeded (from %16s)\n", fl->client_name);
1134 return (HW_GRANTED);
1138 unlock_hwlock(const struct file_lock *fl)
1140 struct monfile *imf;
1142 debuglog("Entering unlock_hwlock\n");
1143 debuglog("Entering loop interation\n");
1145 /* Scan to see if filehandle already present */
1146 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1147 if (bcmp(&fl->filehandle, &imf->filehandle,
1148 sizeof(fl->filehandle)) == 0) {
1149 /* imf is the correct filehandle */
1154 debuglog("Completed iteration. Proceeding\n");
1158 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1159 return (HW_DENIED_NOLOCK);
1165 if (imf->refcount < 0) {
1166 debuglog("Negative hardware reference count\n");
1169 if (imf->refcount <= 0) {
1171 LIST_REMOVE(imf, monfilelist);
1174 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1175 return (HW_GRANTED);
1179 test_hwlock(fl, conflicting_fl)
1180 const struct file_lock *fl __unused;
1181 struct file_lock **conflicting_fl __unused;
1185 * XXX: lock tests on hardware are not required until
1186 * true partial file testing is done on the underlying file
1194 * Below here are routines for manipulating blocked lock requests
1195 * They should only be called from the XXX_partialfilelock routines
1196 * if at all possible
1200 duplicate_block(struct file_lock *fl)
1202 struct file_lock *ifl;
1205 debuglog("Entering duplicate_block");
1208 * Is this lock request already on the blocking list?
1209 * Consider it a dupe if the file handles, offset, length,
1210 * exclusivity and client match.
1212 LIST_FOREACH(ifl, &blockedlocklist_head, nfslocklist) {
1213 if (!bcmp(&fl->filehandle, &ifl->filehandle,
1214 sizeof(fhandle_t)) &&
1215 fl->client.exclusive == ifl->client.exclusive &&
1216 fl->client.l_offset == ifl->client.l_offset &&
1217 fl->client.l_len == ifl->client.l_len &&
1218 same_filelock_identity(fl, ifl)) {
1224 debuglog("Exiting duplicate_block: %s\n", retval ? "already blocked"
1225 : "not already blocked");
1230 add_blockingfilelock(struct file_lock *fl)
1232 debuglog("Entering add_blockingfilelock\n");
1235 * A blocking lock request _should_ never be duplicated as a client
1236 * that is already blocked shouldn't be able to request another
1237 * lock. Alas, there are some buggy clients that do request the same
1238 * lock repeatedly. Make sure only unique locks are on the blocked
1241 if (duplicate_block(fl)) {
1242 debuglog("Exiting add_blockingfilelock: already blocked\n");
1247 * Clear the blocking flag so that it can be reused without
1248 * adding it to the blocking queue a second time
1252 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1254 debuglog("Exiting add_blockingfilelock: added blocked lock\n");
1258 remove_blockingfilelock(struct file_lock *fl)
1261 debuglog("Entering remove_blockingfilelock\n");
1263 LIST_REMOVE(fl, nfslocklist);
1265 debuglog("Exiting remove_blockingfilelock\n");
1269 clear_blockingfilelock(const char *hostname)
1271 struct file_lock *ifl,*nfl;
1274 * Normally, LIST_FOREACH is called for, but since
1275 * the current element *is* the iterator, deleting it
1276 * would mess up the iteration. Thus, a next element
1277 * must be used explicitly
1280 ifl = LIST_FIRST(&blockedlocklist_head);
1282 while (ifl != NULL) {
1283 nfl = LIST_NEXT(ifl, nfslocklist);
1285 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1286 remove_blockingfilelock(ifl);
1287 deallocate_file_lock(ifl);
1295 retry_blockingfilelocklist(void)
1297 /* Retry all locks in the blocked list */
1298 struct file_lock *ifl, *nfl; /* Iterator */
1299 enum partialfilelock_status pflstatus;
1301 debuglog("Entering retry_blockingfilelocklist\n");
1303 LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) {
1304 debuglog("Iterator choice %p\n",ifl);
1305 debuglog("Next iterator choice %p\n",nfl);
1308 * SUBTLE BUG: The file_lock must be removed from the
1309 * old list so that it's list pointers get disconnected
1310 * before being allowed to participate in the new list
1311 * which will automatically add it in if necessary.
1314 LIST_REMOVE(ifl, nfslocklist);
1315 pflstatus = lock_partialfilelock(ifl);
1317 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1318 debuglog("Granted blocked lock\n");
1319 /* lock granted and is now being used */
1320 send_granted(ifl,0);
1322 /* Reinsert lock back into blocked list */
1323 debuglog("Replacing blocked lock\n");
1324 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1328 debuglog("Exiting retry_blockingfilelocklist\n");
1332 * Below here are routines associated with manipulating all
1333 * aspects of the partial file locking system (list, hardware, etc.)
1337 * Please note that lock monitoring must be done at this level which
1338 * keeps track of *individual* lock requests on lock and unlock
1340 * XXX: Split unlocking is going to make the unlock code miserable
1344 * lock_partialfilelock:
1346 * Argument fl gets modified as its list housekeeping entries get modified
1347 * upon insertion into the NFS lock list
1349 * This routine makes several assumptions:
1350 * 1) It (will) pass locks through to flock to lock the entire underlying file
1351 * and then parcel out NFS locks if it gets control of the file.
1352 * This matches the old rpc.lockd file semantics (except where it
1353 * is now more correct). It is the safe solution, but will cause
1354 * overly restrictive blocking if someone is trying to use the
1355 * underlying files without using NFS. This appears to be an
1356 * acceptable tradeoff since most people use standalone NFS servers.
1357 * XXX: The right solution is probably kevent combined with fcntl
1359 * 2) Nothing modifies the lock lists between testing and granting
1360 * I have no idea whether this is a useful assumption or not
1363 enum partialfilelock_status
1364 lock_partialfilelock(struct file_lock *fl)
1366 enum partialfilelock_status retval;
1367 enum nfslock_status lnlstatus;
1368 enum hwlock_status hwstatus;
1370 debuglog("Entering lock_partialfilelock\n");
1372 retval = PFL_DENIED;
1375 * Execute the NFS lock first, if possible, as it is significantly
1376 * easier and less expensive to undo than the filesystem lock
1379 lnlstatus = lock_nfslock(fl);
1381 switch (lnlstatus) {
1383 case NFS_GRANTED_DUPLICATE:
1385 * At this point, the NFS lock is allocated and active.
1386 * Remember to clean it up if the hardware lock fails
1388 hwstatus = lock_hwlock(fl);
1392 case HW_GRANTED_DUPLICATE:
1393 debuglog("HW GRANTED\n");
1395 * XXX: Fixme: Check hwstatus for duplicate when
1396 * true partial file locking and accounting is
1397 * done on the hardware.
1399 if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1400 retval = PFL_GRANTED_DUPLICATE;
1402 retval = PFL_GRANTED;
1404 monitor_lock_host(fl->client_name);
1407 debuglog("HW RESERR\n");
1408 retval = PFL_HWRESERR;
1411 debuglog("HW DENIED\n");
1412 retval = PFL_HWDENIED;
1415 debuglog("Unmatched hwstatus %d\n",hwstatus);
1419 if (retval != PFL_GRANTED &&
1420 retval != PFL_GRANTED_DUPLICATE) {
1421 /* Clean up the NFS lock */
1422 debuglog("Deleting trial NFS lock\n");
1427 retval = PFL_NFSDENIED;
1430 retval = PFL_NFSRESERR;
1433 debuglog("Unmatched lnlstatus %d\n");
1434 retval = PFL_NFSDENIED_NOLOCK;
1439 * By the time fl reaches here, it is completely free again on
1440 * failure. The NFS lock done before attempting the
1441 * hardware lock has been backed out
1444 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1445 /* Once last chance to check the lock */
1446 if (fl->blocking == 1) {
1447 if (retval == PFL_NFSDENIED) {
1448 /* Queue the lock */
1449 debuglog("BLOCKING LOCK RECEIVED\n");
1450 retval = PFL_NFSBLOCKED;
1451 add_blockingfilelock(fl);
1454 /* retval is okay as PFL_HWDENIED */
1455 debuglog("BLOCKING LOCK DENIED IN HARDWARE\n");
1459 /* Leave retval alone, it's already correct */
1460 debuglog("Lock denied. Non-blocking failure\n");
1465 debuglog("Exiting lock_partialfilelock\n");
1471 * unlock_partialfilelock:
1473 * Given a file_lock, unlock all locks which match.
1475 * Note that a given lock might have to unlock ITSELF! See
1476 * clear_partialfilelock for example.
1479 enum partialfilelock_status
1480 unlock_partialfilelock(const struct file_lock *fl)
1482 struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1483 enum partialfilelock_status retval;
1484 enum nfslock_status unlstatus;
1485 enum hwlock_status unlhwstatus, lhwstatus;
1487 debuglog("Entering unlock_partialfilelock\n");
1493 retval = PFL_DENIED;
1496 * There are significant overlap and atomicity issues
1497 * with partially releasing a lock. For example, releasing
1498 * part of an NFS shared lock does *not* always release the
1499 * corresponding part of the file since there is only one
1500 * rpc.lockd UID but multiple users could be requesting it
1501 * from NFS. Also, an unlock request should never allow
1502 * another process to gain a lock on the remaining parts.
1503 * ie. Always apply the new locks before releasing the
1508 * Loop is required since multiple little locks
1509 * can be allocated and then deallocated with one
1512 * The loop is required to be here so that the nfs &
1513 * hw subsystems do not need to communicate with one
1518 debuglog("Value of releasedfl: %p\n",releasedfl);
1519 /* lfl&rfl are created *AND* placed into the NFS lock list if required */
1520 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1521 debuglog("Value of releasedfl: %p\n",releasedfl);
1524 /* XXX: This is grungy. It should be refactored to be cleaner */
1526 lhwstatus = lock_hwlock(lfl);
1527 if (lhwstatus != HW_GRANTED &&
1528 lhwstatus != HW_GRANTED_DUPLICATE) {
1529 debuglog("HW duplicate lock failure for left split\n");
1531 monitor_lock_host(lfl->client_name);
1535 lhwstatus = lock_hwlock(rfl);
1536 if (lhwstatus != HW_GRANTED &&
1537 lhwstatus != HW_GRANTED_DUPLICATE) {
1538 debuglog("HW duplicate lock failure for right split\n");
1540 monitor_lock_host(rfl->client_name);
1543 switch (unlstatus) {
1545 /* Attempt to unlock on the hardware */
1546 debuglog("NFS unlock granted. Attempting hardware unlock\n");
1548 /* This call *MUST NOT* unlock the two newly allocated locks */
1549 unlhwstatus = unlock_hwlock(fl);
1550 debuglog("HW unlock returned with code %d\n",unlhwstatus);
1552 switch (unlhwstatus) {
1554 debuglog("HW unlock granted\n");
1555 unmonitor_lock_host(releasedfl->client_name);
1556 retval = PFL_GRANTED;
1558 case HW_DENIED_NOLOCK:
1559 /* Huh?!?! This shouldn't happen */
1560 debuglog("HW unlock denied no lock\n");
1561 retval = PFL_HWRESERR;
1562 /* Break out of do-while */
1563 unlstatus = NFS_RESERR;
1566 debuglog("HW unlock failed\n");
1567 retval = PFL_HWRESERR;
1568 /* Break out of do-while */
1569 unlstatus = NFS_RESERR;
1573 debuglog("Exiting with status retval: %d\n",retval);
1575 retry_blockingfilelocklist();
1577 case NFS_DENIED_NOLOCK:
1578 retval = PFL_GRANTED;
1579 debuglog("All locks cleaned out\n");
1582 retval = PFL_NFSRESERR;
1583 debuglog("NFS unlock failure\n");
1588 if (releasedfl != NULL) {
1589 if (fl == releasedfl) {
1591 * XXX: YECHHH!!! Attempt to unlock self succeeded
1592 * but we can't deallocate the space yet. This is what
1593 * happens when you don't write malloc and free together
1595 debuglog("Attempt to unlock self\n");
1596 selffl = releasedfl;
1599 * XXX: this deallocation *still* needs to migrate closer
1600 * to the allocation code way up in get_lock or the allocation
1601 * code needs to migrate down (violation of "When you write
1602 * malloc you must write free")
1605 deallocate_file_lock(releasedfl);
1610 } while (unlstatus == NFS_GRANTED);
1612 if (selffl != NULL) {
1614 * This statement wipes out the incoming file lock (fl)
1615 * in spite of the fact that it is declared const
1617 debuglog("WARNING! Destroying incoming lock pointer\n");
1618 deallocate_file_lock(selffl);
1621 debuglog("Exiting unlock_partialfilelock\n");
1627 * clear_partialfilelock
1629 * Normally called in response to statd state number change.
1630 * Wipe out all locks held by a host. As a bonus, the act of
1631 * doing so should automatically clear their statd entries and
1632 * unmonitor the host.
1636 clear_partialfilelock(const char *hostname)
1638 struct file_lock *ifl, *nfl;
1640 /* Clear blocking file lock list */
1641 clear_blockingfilelock(hostname);
1643 /* do all required unlocks */
1644 /* Note that unlock can smash the current pointer to a lock */
1647 * Normally, LIST_FOREACH is called for, but since
1648 * the current element *is* the iterator, deleting it
1649 * would mess up the iteration. Thus, a next element
1650 * must be used explicitly
1653 ifl = LIST_FIRST(&nfslocklist_head);
1655 while (ifl != NULL) {
1656 nfl = LIST_NEXT(ifl, nfslocklist);
1658 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1659 /* Unlock destroys ifl out from underneath */
1660 unlock_partialfilelock(ifl);
1661 /* ifl is NO LONGER VALID AT THIS POINT */
1668 * test_partialfilelock:
1670 enum partialfilelock_status
1671 test_partialfilelock(const struct file_lock *fl,
1672 struct file_lock **conflicting_fl)
1674 enum partialfilelock_status retval;
1675 enum nfslock_status teststatus;
1677 debuglog("Entering testpartialfilelock...\n");
1679 retval = PFL_DENIED;
1681 teststatus = test_nfslock(fl, conflicting_fl);
1682 debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1684 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1685 /* XXX: Add the underlying filesystem locking code */
1686 retval = (teststatus == NFS_GRANTED) ?
1687 PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1688 debuglog("Dumping locks...\n");
1690 dump_filelock(*conflicting_fl);
1691 debuglog("Done dumping locks...\n");
1693 retval = PFL_NFSDENIED;
1694 debuglog("NFS test denied.\n");
1696 debuglog("Conflicting.\n");
1697 dump_filelock(*conflicting_fl);
1700 debuglog("Exiting testpartialfilelock...\n");
1706 * Below here are routines associated with translating the partial file locking
1707 * codes into useful codes to send back to the NFS RPC messaging system
1711 * These routines translate the (relatively) useful return codes back onto
1712 * the few return codes which the nlm subsystems wishes to trasmit
1716 do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1718 enum partialfilelock_status pfsret;
1719 enum nlm_stats retval;
1721 debuglog("Entering do_test...\n");
1723 pfsret = test_partialfilelock(fl,conflicting_fl);
1727 debuglog("PFL test lock granted\n");
1729 dump_filelock(*conflicting_fl);
1730 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1732 case PFL_GRANTED_DUPLICATE:
1733 debuglog("PFL test lock granted--duplicate id detected\n");
1735 dump_filelock(*conflicting_fl);
1736 debuglog("Clearing conflicting_fl for call semantics\n");
1737 *conflicting_fl = NULL;
1738 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1742 debuglog("PFL test lock denied\n");
1744 dump_filelock(*conflicting_fl);
1745 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1749 debuglog("PFL test lock resource fail\n");
1751 dump_filelock(*conflicting_fl);
1752 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1755 debuglog("PFL test lock *FAILED*\n");
1757 dump_filelock(*conflicting_fl);
1758 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1762 debuglog("Exiting do_test...\n");
1768 * do_lock: Try to acquire a lock
1770 * This routine makes a distinction between NLM versions. I am pretty
1771 * convinced that this should be abstracted out and bounced up a level
1775 do_lock(struct file_lock *fl)
1777 enum partialfilelock_status pfsret;
1778 enum nlm_stats retval;
1780 debuglog("Entering do_lock...\n");
1782 pfsret = lock_partialfilelock(fl);
1786 debuglog("PFL lock granted");
1788 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1790 case PFL_GRANTED_DUPLICATE:
1791 debuglog("PFL lock granted--duplicate id detected");
1793 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1797 debuglog("PFL_NFS lock denied");
1799 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1801 case PFL_NFSBLOCKED:
1803 debuglog("PFL_NFS blocking lock denied. Queued.\n");
1805 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1809 debuglog("PFL lock resource alocation fail\n");
1811 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1814 debuglog("PFL lock *FAILED*");
1816 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1820 debuglog("Exiting do_lock...\n");
1826 do_unlock(struct file_lock *fl)
1828 enum partialfilelock_status pfsret;
1829 enum nlm_stats retval;
1831 debuglog("Entering do_unlock...\n");
1832 pfsret = unlock_partialfilelock(fl);
1836 debuglog("PFL unlock granted");
1838 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1842 debuglog("PFL_NFS unlock denied");
1844 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1846 case PFL_NFSDENIED_NOLOCK:
1847 case PFL_HWDENIED_NOLOCK:
1848 debuglog("PFL_NFS no lock found\n");
1849 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1853 debuglog("PFL unlock resource failure");
1855 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1858 debuglog("PFL unlock *FAILED*");
1860 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1864 debuglog("Exiting do_unlock...\n");
1872 * This routine is non-existent because it doesn't have a return code.
1873 * It is here for completeness in case someone *does* need to do return
1874 * codes later. A decent compiler should optimize this away.
1878 do_clear(const char *hostname)
1881 clear_partialfilelock(hostname);
1885 * The following routines are all called from the code which the
1890 * testlock(): inform the caller if the requested lock would be granted
1892 * returns NULL if lock would granted
1893 * returns pointer to a conflicting nlm4_holder if not
1896 struct nlm4_holder *
1897 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
1899 struct file_lock test_fl, *conflicting_fl;
1901 bzero(&test_fl, sizeof(test_fl));
1903 bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t));
1904 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1907 do_test(&test_fl, &conflicting_fl);
1909 if (conflicting_fl == NULL) {
1910 debuglog("No conflicting lock found\n");
1914 debuglog("Found conflicting lock\n");
1915 dump_filelock(conflicting_fl);
1917 return (&conflicting_fl->client);
1922 * getlock: try to acquire the lock.
1923 * If file is already locked and we can sleep, put the lock in the list with
1924 * status LKST_WAITING; it'll be processed later.
1925 * Otherwise try to lock. If we're allowed to block, fork a child which
1926 * will do the blocking lock.
1930 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1932 struct file_lock *newfl;
1933 enum nlm_stats retval;
1935 debuglog("Entering getlock...\n");
1937 if (grace_expired == 0 && lckarg->reclaim == 0)
1938 return (flags & LOCK_V4) ?
1939 nlm4_denied_grace_period : nlm_denied_grace_period;
1941 /* allocate new file_lock for this request */
1942 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie,
1943 (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name);
1944 if (newfl == NULL) {
1945 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
1947 return (flags & LOCK_V4) ?
1948 nlm4_denied_nolocks : nlm_denied_nolocks;
1951 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
1952 debuglog("received fhandle size %d, local size %d",
1953 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
1956 fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes,
1957 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
1958 lckarg->alock.l_len,
1959 lckarg->state, 0, flags, lckarg->block);
1962 * newfl is now fully constructed and deallocate_file_lock
1963 * can now be used to delete it
1967 debuglog("Pointer to new lock is %p\n",newfl);
1969 retval = do_lock(newfl);
1971 debuglog("Pointer to new lock is %p\n",newfl);
1977 /* case nlm_granted: is the same as nlm4_granted */
1978 /* do_mon(lckarg->alock.caller_name); */
1981 /* case nlm_blocked: is the same as nlm4_blocked */
1982 /* do_mon(lckarg->alock.caller_name); */
1985 deallocate_file_lock(newfl);
1989 debuglog("Exiting getlock...\n");
1995 /* unlock a filehandle */
1997 unlock(nlm4_lock *lock, const int flags __unused)
1999 struct file_lock fl;
2004 debuglog("Entering unlock...\n");
2006 bzero(&fl,sizeof(struct file_lock));
2007 bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t));
2009 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
2011 err = do_unlock(&fl);
2015 debuglog("Exiting unlock...\n");
2021 * XXX: The following monitor/unmonitor routines
2022 * have not been extensively tested (ie. no regression
2023 * script exists like for the locking sections
2027 * monitor_lock_host: monitor lock hosts locally with a ref count and
2031 monitor_lock_host(const char *hostname)
2033 struct host *ihp, *nhp;
2035 struct sm_stat_res sres;
2036 int rpcret, statflag;
2042 LIST_FOREACH(ihp, &hostlst_head, hostlst) {
2043 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2044 /* Host is already monitored, bump refcount */
2046 /* Host should only be in the monitor list once */
2051 /* Host is not yet monitored, add it */
2052 n = strnlen(hostname, SM_MAXSTRLEN);
2053 if (n == SM_MAXSTRLEN) {
2056 nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1);
2058 debuglog("Unable to allocate entry for statd mon\n");
2062 /* Allocated new host entry, now fill the fields */
2063 memcpy(nhp->name, hostname, n);
2066 debuglog("Locally Monitoring host %16s\n",hostname);
2068 debuglog("Attempting to tell statd\n");
2070 bzero(&smon,sizeof(smon));
2072 smon.mon_id.mon_name = nhp->name;
2073 smon.mon_id.my_id.my_name = "localhost";
2074 smon.mon_id.my_id.my_prog = NLM_PROG;
2075 smon.mon_id.my_id.my_vers = NLM_SM;
2076 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2078 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON,
2079 (xdrproc_t)xdr_mon, &smon,
2080 (xdrproc_t)xdr_sm_stat_res, &sres);
2083 if (sres.res_stat == stat_fail) {
2084 debuglog("Statd call failed\n");
2090 debuglog("Rpc call to statd failed with return value: %d\n",
2095 if (statflag == 1) {
2096 LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2104 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2107 unmonitor_lock_host(char *hostname)
2110 struct mon_id smon_id;
2111 struct sm_stat smstat;
2116 for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL;
2117 ihp=LIST_NEXT(ihp, hostlst)) {
2118 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2119 /* Host is monitored, bump refcount */
2121 /* Host should only be in the monitor list once */
2127 debuglog("Could not find host %16s in mon list\n", hostname);
2131 if (ihp->refcnt > 0)
2134 if (ihp->refcnt < 0) {
2135 debuglog("Negative refcount!: %d\n",
2139 debuglog("Attempting to unmonitor host %16s\n", hostname);
2141 bzero(&smon_id,sizeof(smon_id));
2143 smon_id.mon_name = hostname;
2144 smon_id.my_id.my_name = "localhost";
2145 smon_id.my_id.my_prog = NLM_PROG;
2146 smon_id.my_id.my_vers = NLM_SM;
2147 smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2149 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON,
2150 (xdrproc_t)xdr_mon_id, &smon_id,
2151 (xdrproc_t)xdr_sm_stat, &smstat);
2154 debuglog("Rpc call to unmonitor statd failed with "
2155 " return value: %d\n", rpcret);
2158 LIST_REMOVE(ihp, hostlst);
2163 * notify: Clear all locks from a host if statd complains
2165 * XXX: This routine has not been thoroughly tested. However, neither
2166 * had the old one been. It used to compare the statd crash state counter
2167 * to the current lock state. The upshot of this was that it basically
2168 * cleared all locks from the specified host 99% of the time (with the
2169 * other 1% being a bug). Consequently, the assumption is that clearing
2170 * all locks from a host when notified by statd is acceptable.
2172 * Please note that this routine skips the usual level of redirection
2173 * through a do_* type routine. This introduces a possible level of
2174 * error and might better be written as do_notify and take this one out.
2179 notify(const char *hostname, const int state)
2181 debuglog("notify from %s, new state %d", hostname, state);
2187 debuglog("Leaving notify\n");
2191 send_granted(fl, opcode)
2192 struct file_lock *fl;
2193 int opcode __unused;
2197 struct timeval timeo;
2199 static struct nlm_res retval;
2200 static struct nlm4_res retval4;
2202 debuglog("About to send granted on blocked lock\n");
2204 cli = get_client(fl->addr,
2205 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2207 syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2210 * We fail to notify remote that the lock has been granted.
2211 * The client will timeout and retry, the lock will be
2212 * granted at this time.
2217 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2219 if (fl->flags & LOCK_V4) {
2220 static nlm4_testargs res;
2221 res.cookie = fl->client_cookie;
2222 res.exclusive = fl->client.exclusive;
2223 res.alock.caller_name = fl->client_name;
2224 res.alock.fh.n_len = sizeof(fhandle_t);
2225 res.alock.fh.n_bytes = (char*)&fl->filehandle;
2226 res.alock.oh = fl->client.oh;
2227 res.alock.svid = fl->client.svid;
2228 res.alock.l_offset = fl->client.l_offset;
2229 res.alock.l_len = fl->client.l_len;
2230 debuglog("sending v4 reply%s",
2231 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2232 if (fl->flags & LOCK_ASYNC) {
2233 success = clnt_call(cli, NLM4_GRANTED_MSG,
2234 (xdrproc_t)xdr_nlm4_testargs, &res,
2235 (xdrproc_t)xdr_void, &dummy, timeo);
2237 success = clnt_call(cli, NLM4_GRANTED,
2238 (xdrproc_t)xdr_nlm4_testargs, &res,
2239 (xdrproc_t)xdr_nlm4_res, &retval4, timeo);
2242 static nlm_testargs res;
2244 res.cookie = fl->client_cookie;
2245 res.exclusive = fl->client.exclusive;
2246 res.alock.caller_name = fl->client_name;
2247 res.alock.fh.n_len = sizeof(fhandle_t);
2248 res.alock.fh.n_bytes = (char*)&fl->filehandle;
2249 res.alock.oh = fl->client.oh;
2250 res.alock.svid = fl->client.svid;
2251 res.alock.l_offset = fl->client.l_offset;
2252 res.alock.l_len = fl->client.l_len;
2253 debuglog("sending v1 reply%s",
2254 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2255 if (fl->flags & LOCK_ASYNC) {
2256 success = clnt_call(cli, NLM_GRANTED_MSG,
2257 (xdrproc_t)xdr_nlm_testargs, &res,
2258 (xdrproc_t)xdr_void, &dummy, timeo);
2260 success = clnt_call(cli, NLM_GRANTED,
2261 (xdrproc_t)xdr_nlm_testargs, &res,
2262 (xdrproc_t)xdr_nlm_res, &retval, timeo);
2265 if (debug_level > 2)
2266 debuglog("clnt_call returns %d(%s) for granted",
2267 success, clnt_sperrno(success));
2272 * Routines below here have not been modified in the overhaul
2276 * Are these two routines still required since lockd is not spawning off
2277 * children to service locks anymore? Presumably they were originally
2278 * put in place to prevent a one child from changing the lock list out
2279 * from under another one.
2287 sigemptyset(&block);
2288 sigaddset(&block, SIGCHLD);
2290 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2291 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2300 sigemptyset(&block);
2301 sigaddset(&block, SIGCHLD);
2303 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2304 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));