]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/kern/kern_lockf.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / kern / kern_lockf.c
1 /*-
2  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3  * Authors: Doug Rabson <dfr@rabson.org>
4  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 /*-
28  * Copyright (c) 1982, 1986, 1989, 1993
29  *      The Regents of the University of California.  All rights reserved.
30  *
31  * This code is derived from software contributed to Berkeley by
32  * Scooter Morris at Genentech Inc.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 4. Neither the name of the University nor the names of its contributors
43  *    may be used to endorse or promote products derived from this software
44  *    without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  *      @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
59  */
60
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
63
64 #include "opt_debug_lockf.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/hash.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
71 #include <sys/lock.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sx.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
82
83 #ifdef LOCKF_DEBUG
84 #include <sys/sysctl.h>
85
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
88
89 static int      lockf_debug = 0; /* control debug output */
90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
91 #endif
92
93 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
94
95 struct owner_edge;
96 struct owner_vertex;
97 struct owner_vertex_list;
98 struct owner_graph;
99
100 #define NOLOCKF (struct lockf_entry *)0
101 #define SELF    0x1
102 #define OTHERS  0x2
103 static void      lf_init(void *);
104 static int       lf_hash_owner(caddr_t, struct flock *, int);
105 static int       lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
106     int);
107 static struct lockf_entry *
108                  lf_alloc_lock(struct lock_owner *);
109 static int       lf_free_lock(struct lockf_entry *);
110 static int       lf_clearlock(struct lockf *, struct lockf_entry *);
111 static int       lf_overlaps(struct lockf_entry *, struct lockf_entry *);
112 static int       lf_blocks(struct lockf_entry *, struct lockf_entry *);
113 static void      lf_free_edge(struct lockf_edge *);
114 static struct lockf_edge *
115                  lf_alloc_edge(void);
116 static void      lf_alloc_vertex(struct lockf_entry *);
117 static int       lf_add_edge(struct lockf_entry *, struct lockf_entry *);
118 static void      lf_remove_edge(struct lockf_edge *);
119 static void      lf_remove_outgoing(struct lockf_entry *);
120 static void      lf_remove_incoming(struct lockf_entry *);
121 static int       lf_add_outgoing(struct lockf *, struct lockf_entry *);
122 static int       lf_add_incoming(struct lockf *, struct lockf_entry *);
123 static int       lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
124     int);
125 static struct lockf_entry *
126                  lf_getblock(struct lockf *, struct lockf_entry *);
127 static int       lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
128 static void      lf_insert_lock(struct lockf *, struct lockf_entry *);
129 static void      lf_wakeup_lock(struct lockf *, struct lockf_entry *);
130 static void      lf_update_dependancies(struct lockf *, struct lockf_entry *,
131     int all, struct lockf_entry_list *);
132 static void      lf_set_start(struct lockf *, struct lockf_entry *, off_t,
133         struct lockf_entry_list*);
134 static void      lf_set_end(struct lockf *, struct lockf_entry *, off_t,
135         struct lockf_entry_list*);
136 static int       lf_setlock(struct lockf *, struct lockf_entry *,
137     struct vnode *, void **cookiep);
138 static int       lf_cancel(struct lockf *, struct lockf_entry *, void *);
139 static void      lf_split(struct lockf *, struct lockf_entry *,
140     struct lockf_entry *, struct lockf_entry_list *);
141 #ifdef LOCKF_DEBUG
142 static int       graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
143     struct owner_vertex_list *path);
144 static void      graph_check(struct owner_graph *g, int checkorder);
145 static void      graph_print_vertices(struct owner_vertex_list *set);
146 #endif
147 static int       graph_delta_forward(struct owner_graph *g,
148     struct owner_vertex *x, struct owner_vertex *y,
149     struct owner_vertex_list *delta);
150 static int       graph_delta_backward(struct owner_graph *g,
151     struct owner_vertex *x, struct owner_vertex *y,
152     struct owner_vertex_list *delta);
153 static int       graph_add_indices(int *indices, int n,
154     struct owner_vertex_list *set);
155 static int       graph_assign_indices(struct owner_graph *g, int *indices,
156     int nextunused, struct owner_vertex_list *set);
157 static int       graph_add_edge(struct owner_graph *g,
158     struct owner_vertex *x, struct owner_vertex *y);
159 static void      graph_remove_edge(struct owner_graph *g,
160     struct owner_vertex *x, struct owner_vertex *y);
161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
162     struct lock_owner *lo);
163 static void      graph_free_vertex(struct owner_graph *g,
164     struct owner_vertex *v);
165 static struct owner_graph * graph_init(struct owner_graph *g);
166 #ifdef LOCKF_DEBUG
167 static void      lf_print(char *, struct lockf_entry *);
168 static void      lf_printlist(char *, struct lockf_entry *);
169 static void      lf_print_owner(struct lock_owner *);
170 #endif
171
172 /*
173  * This structure is used to keep track of both local and remote lock
174  * owners. The lf_owner field of the struct lockf_entry points back at
175  * the lock owner structure. Each possible lock owner (local proc for
176  * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
177  * pair for remote locks) is represented by a unique instance of
178  * struct lock_owner.
179  *
180  * If a lock owner has a lock that blocks some other lock or a lock
181  * that is waiting for some other lock, it also has a vertex in the
182  * owner_graph below.
183  *
184  * Locks:
185  * (s)          locked by state->ls_lock
186  * (S)          locked by lf_lock_states_lock
187  * (l)          locked by lf_lock_owners_lock
188  * (g)          locked by lf_owner_graph_lock
189  * (c)          const until freeing
190  */
191 #define LOCK_OWNER_HASH_SIZE    256
192
193 struct lock_owner {
194         LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
195         int     lo_refs;            /* (l) Number of locks referring to this */
196         int     lo_flags;           /* (c) Flags passwd to lf_advlock */
197         caddr_t lo_id;              /* (c) Id value passed to lf_advlock */
198         pid_t   lo_pid;             /* (c) Process Id of the lock owner */
199         int     lo_sysid;           /* (c) System Id of the lock owner */
200         struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
201 };
202
203 LIST_HEAD(lock_owner_list, lock_owner);
204
205 static struct sx                lf_lock_states_lock;
206 static struct lockf_list        lf_lock_states; /* (S) */
207 static struct sx                lf_lock_owners_lock;
208 static struct lock_owner_list   lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
209
210 /*
211  * Structures for deadlock detection.
212  *
213  * We have two types of directed graph, the first is the set of locks,
214  * both active and pending on a vnode. Within this graph, active locks
215  * are terminal nodes in the graph (i.e. have no out-going
216  * edges). Pending locks have out-going edges to each blocking active
217  * lock that prevents the lock from being granted and also to each
218  * older pending lock that would block them if it was active. The
219  * graph for each vnode is naturally acyclic; new edges are only ever
220  * added to or from new nodes (either new pending locks which only add
221  * out-going edges or new active locks which only add in-coming edges)
222  * therefore they cannot create loops in the lock graph.
223  *
224  * The second graph is a global graph of lock owners. Each lock owner
225  * is a vertex in that graph and an edge is added to the graph
226  * whenever an edge is added to a vnode graph, with end points
227  * corresponding to owner of the new pending lock and the owner of the
228  * lock upon which it waits. In order to prevent deadlock, we only add
229  * an edge to this graph if the new edge would not create a cycle.
230  * 
231  * The lock owner graph is topologically sorted, i.e. if a node has
232  * any outgoing edges, then it has an order strictly less than any
233  * node to which it has an outgoing edge. We preserve this ordering
234  * (and detect cycles) on edge insertion using Algorithm PK from the
235  * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
236  * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
237  * No. 1.7)
238  */
239 struct owner_vertex;
240
241 struct owner_edge {
242         LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
243         LIST_ENTRY(owner_edge) e_inlink;  /* (g) link to's in-edge list */
244         int             e_refs;           /* (g) number of times added */
245         struct owner_vertex *e_from;      /* (c) out-going from here */
246         struct owner_vertex *e_to;        /* (c) in-coming to here */
247 };
248 LIST_HEAD(owner_edge_list, owner_edge);
249
250 struct owner_vertex {
251         TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
252         uint32_t        v_gen;            /* (g) workspace for edge insertion */
253         int             v_order;          /* (g) order of vertex in graph */
254         struct owner_edge_list v_outedges;/* (g) list of out-edges */
255         struct owner_edge_list v_inedges; /* (g) list of in-edges */
256         struct lock_owner *v_owner;       /* (c) corresponding lock owner */
257 };
258 TAILQ_HEAD(owner_vertex_list, owner_vertex);
259
260 struct owner_graph {
261         struct owner_vertex** g_vertices; /* (g) pointers to vertices */
262         int             g_size;           /* (g) number of vertices */
263         int             g_space;          /* (g) space allocated for vertices */
264         int             *g_indexbuf;      /* (g) workspace for loop detection */
265         uint32_t        g_gen;            /* (g) increment when re-ordering */
266 };
267
268 static struct sx                lf_owner_graph_lock;
269 static struct owner_graph       lf_owner_graph;
270
271 /*
272  * Initialise various structures and locks.
273  */
274 static void
275 lf_init(void *dummy)
276 {
277         int i;
278
279         sx_init(&lf_lock_states_lock, "lock states lock");
280         LIST_INIT(&lf_lock_states);
281
282         sx_init(&lf_lock_owners_lock, "lock owners lock");
283         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
284                 LIST_INIT(&lf_lock_owners[i]);
285
286         sx_init(&lf_owner_graph_lock, "owner graph lock");
287         graph_init(&lf_owner_graph);
288 }
289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
290
291 /*
292  * Generate a hash value for a lock owner.
293  */
294 static int
295 lf_hash_owner(caddr_t id, struct flock *fl, int flags)
296 {
297         uint32_t h;
298
299         if (flags & F_REMOTE) {
300                 h = HASHSTEP(0, fl->l_pid);
301                 h = HASHSTEP(h, fl->l_sysid);
302         } else if (flags & F_FLOCK) {
303                 h = ((uintptr_t) id) >> 7;
304         } else {
305                 struct proc *p = (struct proc *) id;
306                 h = HASHSTEP(0, p->p_pid);
307                 h = HASHSTEP(h, 0);
308         }
309
310         return (h % LOCK_OWNER_HASH_SIZE);
311 }
312
313 /*
314  * Return true if a lock owner matches the details passed to
315  * lf_advlock.
316  */
317 static int
318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
319     int flags)
320 {
321         if (flags & F_REMOTE) {
322                 return lo->lo_pid == fl->l_pid
323                         && lo->lo_sysid == fl->l_sysid;
324         } else {
325                 return lo->lo_id == id;
326         }
327 }
328
329 static struct lockf_entry *
330 lf_alloc_lock(struct lock_owner *lo)
331 {
332         struct lockf_entry *lf;
333
334         lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
335
336 #ifdef LOCKF_DEBUG
337         if (lockf_debug & 4)
338                 printf("Allocated lock %p\n", lf);
339 #endif
340         if (lo) {
341                 sx_xlock(&lf_lock_owners_lock);
342                 lo->lo_refs++;
343                 sx_xunlock(&lf_lock_owners_lock);
344                 lf->lf_owner = lo;
345         }
346
347         return (lf);
348 }
349
350 static int
351 lf_free_lock(struct lockf_entry *lock)
352 {
353
354         KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
355         if (--lock->lf_refs > 0)
356                 return (0);
357         /*
358          * Adjust the lock_owner reference count and
359          * reclaim the entry if this is the last lock
360          * for that owner.
361          */
362         struct lock_owner *lo = lock->lf_owner;
363         if (lo) {
364                 KASSERT(LIST_EMPTY(&lock->lf_outedges),
365                     ("freeing lock with dependancies"));
366                 KASSERT(LIST_EMPTY(&lock->lf_inedges),
367                     ("freeing lock with dependants"));
368                 sx_xlock(&lf_lock_owners_lock);
369                 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
370                 lo->lo_refs--;
371                 if (lo->lo_refs == 0) {
372 #ifdef LOCKF_DEBUG
373                         if (lockf_debug & 1)
374                                 printf("lf_free_lock: freeing lock owner %p\n",
375                                     lo);
376 #endif
377                         if (lo->lo_vertex) {
378                                 sx_xlock(&lf_owner_graph_lock);
379                                 graph_free_vertex(&lf_owner_graph,
380                                     lo->lo_vertex);
381                                 sx_xunlock(&lf_owner_graph_lock);
382                         }
383                         LIST_REMOVE(lo, lo_link);
384                         free(lo, M_LOCKF);
385 #ifdef LOCKF_DEBUG
386                         if (lockf_debug & 4)
387                                 printf("Freed lock owner %p\n", lo);
388 #endif
389                 }
390                 sx_unlock(&lf_lock_owners_lock);
391         }
392         if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
393                 vrele(lock->lf_vnode);
394                 lock->lf_vnode = NULL;
395         }
396 #ifdef LOCKF_DEBUG
397         if (lockf_debug & 4)
398                 printf("Freed lock %p\n", lock);
399 #endif
400         free(lock, M_LOCKF);
401         return (1);
402 }
403
404 /*
405  * Advisory record locking support
406  */
407 int
408 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
409     u_quad_t size)
410 {
411         struct lockf *state, *freestate = NULL;
412         struct flock *fl = ap->a_fl;
413         struct lockf_entry *lock;
414         struct vnode *vp = ap->a_vp;
415         caddr_t id = ap->a_id;
416         int flags = ap->a_flags;
417         int hash;
418         struct lock_owner *lo;
419         off_t start, end, oadd;
420         int error;
421
422         /*
423          * Handle the F_UNLKSYS case first - no need to mess about
424          * creating a lock owner for this one.
425          */
426         if (ap->a_op == F_UNLCKSYS) {
427                 lf_clearremotesys(fl->l_sysid);
428                 return (0);
429         }
430
431         /*
432          * Convert the flock structure into a start and end.
433          */
434         switch (fl->l_whence) {
435
436         case SEEK_SET:
437         case SEEK_CUR:
438                 /*
439                  * Caller is responsible for adding any necessary offset
440                  * when SEEK_CUR is used.
441                  */
442                 start = fl->l_start;
443                 break;
444
445         case SEEK_END:
446                 if (size > OFF_MAX ||
447                     (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
448                         return (EOVERFLOW);
449                 start = size + fl->l_start;
450                 break;
451
452         default:
453                 return (EINVAL);
454         }
455         if (start < 0)
456                 return (EINVAL);
457         if (fl->l_len < 0) {
458                 if (start == 0)
459                         return (EINVAL);
460                 end = start - 1;
461                 start += fl->l_len;
462                 if (start < 0)
463                         return (EINVAL);
464         } else if (fl->l_len == 0) {
465                 end = OFF_MAX;
466         } else {
467                 oadd = fl->l_len - 1;
468                 if (oadd > OFF_MAX - start)
469                         return (EOVERFLOW);
470                 end = start + oadd;
471         }
472
473 retry_setlock:
474
475         /*
476          * Avoid the common case of unlocking when inode has no locks.
477          */
478         VI_LOCK(vp);
479         if ((*statep) == NULL) {
480                 if (ap->a_op != F_SETLK) {
481                         fl->l_type = F_UNLCK;
482                         VI_UNLOCK(vp);
483                         return (0);
484                 }
485         }
486         VI_UNLOCK(vp);
487
488         /*
489          * Map our arguments to an existing lock owner or create one
490          * if this is the first time we have seen this owner.
491          */
492         hash = lf_hash_owner(id, fl, flags);
493         sx_xlock(&lf_lock_owners_lock);
494         LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
495                 if (lf_owner_matches(lo, id, fl, flags))
496                         break;
497         if (!lo) {
498                 /*
499                  * We initialise the lock with a reference
500                  * count which matches the new lockf_entry
501                  * structure created below.
502                  */
503                 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
504                     M_WAITOK|M_ZERO);
505 #ifdef LOCKF_DEBUG
506                 if (lockf_debug & 4)
507                         printf("Allocated lock owner %p\n", lo);
508 #endif
509
510                 lo->lo_refs = 1;
511                 lo->lo_flags = flags;
512                 lo->lo_id = id;
513                 if (flags & F_REMOTE) {
514                         lo->lo_pid = fl->l_pid;
515                         lo->lo_sysid = fl->l_sysid;
516                 } else if (flags & F_FLOCK) {
517                         lo->lo_pid = -1;
518                         lo->lo_sysid = 0;
519                 } else {
520                         struct proc *p = (struct proc *) id;
521                         lo->lo_pid = p->p_pid;
522                         lo->lo_sysid = 0;
523                 }
524                 lo->lo_vertex = NULL;
525
526 #ifdef LOCKF_DEBUG
527                 if (lockf_debug & 1) {
528                         printf("lf_advlockasync: new lock owner %p ", lo);
529                         lf_print_owner(lo);
530                         printf("\n");
531                 }
532 #endif
533
534                 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
535         } else {
536                 /*
537                  * We have seen this lock owner before, increase its
538                  * reference count to account for the new lockf_entry
539                  * structure we create below.
540                  */
541                 lo->lo_refs++;
542         }
543         sx_xunlock(&lf_lock_owners_lock);
544
545         /*
546          * Create the lockf structure. We initialise the lf_owner
547          * field here instead of in lf_alloc_lock() to avoid paying
548          * the lf_lock_owners_lock tax twice.
549          */
550         lock = lf_alloc_lock(NULL);
551         lock->lf_refs = 1;
552         lock->lf_start = start;
553         lock->lf_end = end;
554         lock->lf_owner = lo;
555         lock->lf_vnode = vp;
556         if (flags & F_REMOTE) {
557                 /*
558                  * For remote locks, the caller may release its ref to
559                  * the vnode at any time - we have to ref it here to
560                  * prevent it from being recycled unexpectedly.
561                  */
562                 vref(vp);
563         }
564
565         /*
566          * XXX The problem is that VTOI is ufs specific, so it will
567          * break LOCKF_DEBUG for all other FS's other than UFS because
568          * it casts the vnode->data ptr to struct inode *.
569          */
570 /*      lock->lf_inode = VTOI(ap->a_vp); */
571         lock->lf_inode = (struct inode *)0;
572         lock->lf_type = fl->l_type;
573         LIST_INIT(&lock->lf_outedges);
574         LIST_INIT(&lock->lf_inedges);
575         lock->lf_async_task = ap->a_task;
576         lock->lf_flags = ap->a_flags;
577
578         /*
579          * Do the requested operation. First find our state structure
580          * and create a new one if necessary - the caller's *statep
581          * variable and the state's ls_threads count is protected by
582          * the vnode interlock.
583          */
584         VI_LOCK(vp);
585         if (vp->v_iflag & VI_DOOMED) {
586                 VI_UNLOCK(vp);
587                 lf_free_lock(lock);
588                 return (ENOENT);
589         }
590
591         /*
592          * Allocate a state structure if necessary.
593          */
594         state = *statep;
595         if (state == NULL) {
596                 struct lockf *ls;
597
598                 VI_UNLOCK(vp);
599
600                 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
601                 sx_init(&ls->ls_lock, "ls_lock");
602                 LIST_INIT(&ls->ls_active);
603                 LIST_INIT(&ls->ls_pending);
604                 ls->ls_threads = 1;
605
606                 sx_xlock(&lf_lock_states_lock);
607                 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
608                 sx_xunlock(&lf_lock_states_lock);
609
610                 /*
611                  * Cope if we lost a race with some other thread while
612                  * trying to allocate memory.
613                  */
614                 VI_LOCK(vp);
615                 if (vp->v_iflag & VI_DOOMED) {
616                         VI_UNLOCK(vp);
617                         sx_xlock(&lf_lock_states_lock);
618                         LIST_REMOVE(ls, ls_link);
619                         sx_xunlock(&lf_lock_states_lock);
620                         sx_destroy(&ls->ls_lock);
621                         free(ls, M_LOCKF);
622                         lf_free_lock(lock);
623                         return (ENOENT);
624                 }
625                 if ((*statep) == NULL) {
626                         state = *statep = ls;
627                         VI_UNLOCK(vp);
628                 } else {
629                         state = *statep;
630                         state->ls_threads++;
631                         VI_UNLOCK(vp);
632
633                         sx_xlock(&lf_lock_states_lock);
634                         LIST_REMOVE(ls, ls_link);
635                         sx_xunlock(&lf_lock_states_lock);
636                         sx_destroy(&ls->ls_lock);
637                         free(ls, M_LOCKF);
638                 }
639         } else {
640                 state->ls_threads++;
641                 VI_UNLOCK(vp);
642         }
643
644         sx_xlock(&state->ls_lock);
645         /*
646          * Recheck the doomed vnode after state->ls_lock is
647          * locked. lf_purgelocks() requires that no new threads add
648          * pending locks when vnode is marked by VI_DOOMED flag.
649          */
650         VI_LOCK(vp);
651         if (vp->v_iflag & VI_DOOMED) {
652                 state->ls_threads--;
653                 wakeup(state);
654                 VI_UNLOCK(vp);
655                 sx_xunlock(&state->ls_lock);
656                 lf_free_lock(lock);
657                 return (ENOENT);
658         }
659         VI_UNLOCK(vp);
660
661         switch (ap->a_op) {
662         case F_SETLK:
663                 error = lf_setlock(state, lock, vp, ap->a_cookiep);
664                 break;
665
666         case F_UNLCK:
667                 error = lf_clearlock(state, lock);
668                 lf_free_lock(lock);
669                 break;
670
671         case F_GETLK:
672                 error = lf_getlock(state, lock, fl);
673                 lf_free_lock(lock);
674                 break;
675
676         case F_CANCEL:
677                 if (ap->a_cookiep)
678                         error = lf_cancel(state, lock, *ap->a_cookiep);
679                 else
680                         error = EINVAL;
681                 lf_free_lock(lock);
682                 break;
683
684         default:
685                 lf_free_lock(lock);
686                 error = EINVAL;
687                 break;
688         }
689
690 #ifdef INVARIANTS
691         /*
692          * Check for some can't happen stuff. In this case, the active
693          * lock list becoming disordered or containing mutually
694          * blocking locks. We also check the pending list for locks
695          * which should be active (i.e. have no out-going edges).
696          */
697         LIST_FOREACH(lock, &state->ls_active, lf_link) {
698                 struct lockf_entry *lf;
699                 if (LIST_NEXT(lock, lf_link))
700                         KASSERT((lock->lf_start
701                                 <= LIST_NEXT(lock, lf_link)->lf_start),
702                             ("locks disordered"));
703                 LIST_FOREACH(lf, &state->ls_active, lf_link) {
704                         if (lock == lf)
705                                 break;
706                         KASSERT(!lf_blocks(lock, lf),
707                             ("two conflicting active locks"));
708                         if (lock->lf_owner == lf->lf_owner)
709                                 KASSERT(!lf_overlaps(lock, lf),
710                                     ("two overlapping locks from same owner"));
711                 }
712         }
713         LIST_FOREACH(lock, &state->ls_pending, lf_link) {
714                 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
715                     ("pending lock which should be active"));
716         }
717 #endif
718         sx_xunlock(&state->ls_lock);
719
720         /*
721          * If we have removed the last active lock on the vnode and
722          * this is the last thread that was in-progress, we can free
723          * the state structure. We update the caller's pointer inside
724          * the vnode interlock but call free outside.
725          *
726          * XXX alternatively, keep the state structure around until
727          * the filesystem recycles - requires a callback from the
728          * filesystem.
729          */
730         VI_LOCK(vp);
731
732         state->ls_threads--;
733         wakeup(state);
734         if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
735                 KASSERT(LIST_EMPTY(&state->ls_pending),
736                     ("freeing state with pending locks"));
737                 freestate = state;
738                 *statep = NULL;
739         }
740
741         VI_UNLOCK(vp);
742
743         if (freestate != NULL) {
744                 sx_xlock(&lf_lock_states_lock);
745                 LIST_REMOVE(freestate, ls_link);
746                 sx_xunlock(&lf_lock_states_lock);
747                 sx_destroy(&freestate->ls_lock);
748                 free(freestate, M_LOCKF);
749                 freestate = NULL;
750         }
751
752         if (error == EDOOFUS) {
753                 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
754                 goto retry_setlock;
755         }
756         return (error);
757 }
758
759 int
760 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
761 {
762         struct vop_advlockasync_args a;
763
764         a.a_vp = ap->a_vp;
765         a.a_id = ap->a_id;
766         a.a_op = ap->a_op;
767         a.a_fl = ap->a_fl;
768         a.a_flags = ap->a_flags;
769         a.a_task = NULL;
770         a.a_cookiep = NULL;
771
772         return (lf_advlockasync(&a, statep, size));
773 }
774
775 void
776 lf_purgelocks(struct vnode *vp, struct lockf **statep)
777 {
778         struct lockf *state;
779         struct lockf_entry *lock, *nlock;
780
781         /*
782          * For this to work correctly, the caller must ensure that no
783          * other threads enter the locking system for this vnode,
784          * e.g. by checking VI_DOOMED. We wake up any threads that are
785          * sleeping waiting for locks on this vnode and then free all
786          * the remaining locks.
787          */
788         VI_LOCK(vp);
789         KASSERT(vp->v_iflag & VI_DOOMED,
790             ("lf_purgelocks: vp %p has not vgone yet", vp));
791         state = *statep;
792         if (state) {
793                 *statep = NULL;
794                 state->ls_threads++;
795                 VI_UNLOCK(vp);
796
797                 sx_xlock(&state->ls_lock);
798                 sx_xlock(&lf_owner_graph_lock);
799                 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
800                         LIST_REMOVE(lock, lf_link);
801                         lf_remove_outgoing(lock);
802                         lf_remove_incoming(lock);
803
804                         /*
805                          * If its an async lock, we can just free it
806                          * here, otherwise we let the sleeping thread
807                          * free it.
808                          */
809                         if (lock->lf_async_task) {
810                                 lf_free_lock(lock);
811                         } else {
812                                 lock->lf_flags |= F_INTR;
813                                 wakeup(lock);
814                         }
815                 }
816                 sx_xunlock(&lf_owner_graph_lock);
817                 sx_xunlock(&state->ls_lock);
818
819                 /*
820                  * Wait for all other threads, sleeping and otherwise
821                  * to leave.
822                  */
823                 VI_LOCK(vp);
824                 while (state->ls_threads > 1)
825                         msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
826                 VI_UNLOCK(vp);
827
828                 /*
829                  * We can just free all the active locks since they
830                  * will have no dependancies (we removed them all
831                  * above). We don't need to bother locking since we
832                  * are the last thread using this state structure.
833                  */
834                 KASSERT(LIST_EMPTY(&state->ls_pending),
835                     ("lock pending for %p", state));
836                 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
837                         LIST_REMOVE(lock, lf_link);
838                         lf_free_lock(lock);
839                 }
840                 sx_xlock(&lf_lock_states_lock);
841                 LIST_REMOVE(state, ls_link);
842                 sx_xunlock(&lf_lock_states_lock);
843                 sx_destroy(&state->ls_lock);
844                 free(state, M_LOCKF);
845         } else {
846                 VI_UNLOCK(vp);
847         }
848 }
849
850 /*
851  * Return non-zero if locks 'x' and 'y' overlap.
852  */
853 static int
854 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
855 {
856
857         return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
858 }
859
860 /*
861  * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
862  */
863 static int
864 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
865 {
866
867         return x->lf_owner != y->lf_owner
868                 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
869                 && lf_overlaps(x, y);
870 }
871
872 /*
873  * Allocate a lock edge from the free list
874  */
875 static struct lockf_edge *
876 lf_alloc_edge(void)
877 {
878
879         return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
880 }
881
882 /*
883  * Free a lock edge.
884  */
885 static void
886 lf_free_edge(struct lockf_edge *e)
887 {
888
889         free(e, M_LOCKF);
890 }
891
892
893 /*
894  * Ensure that the lock's owner has a corresponding vertex in the
895  * owner graph.
896  */
897 static void
898 lf_alloc_vertex(struct lockf_entry *lock)
899 {
900         struct owner_graph *g = &lf_owner_graph;
901
902         if (!lock->lf_owner->lo_vertex)
903                 lock->lf_owner->lo_vertex =
904                         graph_alloc_vertex(g, lock->lf_owner);
905 }
906
907 /*
908  * Attempt to record an edge from lock x to lock y. Return EDEADLK if
909  * the new edge would cause a cycle in the owner graph.
910  */
911 static int
912 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
913 {
914         struct owner_graph *g = &lf_owner_graph;
915         struct lockf_edge *e;
916         int error;
917
918 #ifdef INVARIANTS
919         LIST_FOREACH(e, &x->lf_outedges, le_outlink)
920                 KASSERT(e->le_to != y, ("adding lock edge twice"));
921 #endif
922
923         /*
924          * Make sure the two owners have entries in the owner graph.
925          */
926         lf_alloc_vertex(x);
927         lf_alloc_vertex(y);
928
929         error = graph_add_edge(g, x->lf_owner->lo_vertex,
930             y->lf_owner->lo_vertex);
931         if (error)
932                 return (error);
933
934         e = lf_alloc_edge();
935         LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
936         LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
937         e->le_from = x;
938         e->le_to = y;
939
940         return (0);
941 }
942
943 /*
944  * Remove an edge from the lock graph.
945  */
946 static void
947 lf_remove_edge(struct lockf_edge *e)
948 {
949         struct owner_graph *g = &lf_owner_graph;
950         struct lockf_entry *x = e->le_from;
951         struct lockf_entry *y = e->le_to;
952
953         graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
954         LIST_REMOVE(e, le_outlink);
955         LIST_REMOVE(e, le_inlink);
956         e->le_from = NULL;
957         e->le_to = NULL;
958         lf_free_edge(e);
959 }
960
961 /*
962  * Remove all out-going edges from lock x.
963  */
964 static void
965 lf_remove_outgoing(struct lockf_entry *x)
966 {
967         struct lockf_edge *e;
968
969         while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
970                 lf_remove_edge(e);
971         }
972 }
973
974 /*
975  * Remove all in-coming edges from lock x.
976  */
977 static void
978 lf_remove_incoming(struct lockf_entry *x)
979 {
980         struct lockf_edge *e;
981
982         while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
983                 lf_remove_edge(e);
984         }
985 }
986
987 /*
988  * Walk the list of locks for the file and create an out-going edge
989  * from lock to each blocking lock.
990  */
991 static int
992 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
993 {
994         struct lockf_entry *overlap;
995         int error;
996
997         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
998                 /*
999                  * We may assume that the active list is sorted by
1000                  * lf_start.
1001                  */
1002                 if (overlap->lf_start > lock->lf_end)
1003                         break;
1004                 if (!lf_blocks(lock, overlap))
1005                         continue;
1006
1007                 /*
1008                  * We've found a blocking lock. Add the corresponding
1009                  * edge to the graphs and see if it would cause a
1010                  * deadlock.
1011                  */
1012                 error = lf_add_edge(lock, overlap);
1013
1014                 /*
1015                  * The only error that lf_add_edge returns is EDEADLK.
1016                  * Remove any edges we added and return the error.
1017                  */
1018                 if (error) {
1019                         lf_remove_outgoing(lock);
1020                         return (error);
1021                 }
1022         }
1023
1024         /*
1025          * We also need to add edges to sleeping locks that block
1026          * us. This ensures that lf_wakeup_lock cannot grant two
1027          * mutually blocking locks simultaneously and also enforces a
1028          * 'first come, first served' fairness model. Note that this
1029          * only happens if we are blocked by at least one active lock
1030          * due to the call to lf_getblock in lf_setlock below.
1031          */
1032         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1033                 if (!lf_blocks(lock, overlap))
1034                         continue;
1035                 /*
1036                  * We've found a blocking lock. Add the corresponding
1037                  * edge to the graphs and see if it would cause a
1038                  * deadlock.
1039                  */
1040                 error = lf_add_edge(lock, overlap);
1041
1042                 /*
1043                  * The only error that lf_add_edge returns is EDEADLK.
1044                  * Remove any edges we added and return the error.
1045                  */
1046                 if (error) {
1047                         lf_remove_outgoing(lock);
1048                         return (error);
1049                 }
1050         }
1051
1052         return (0);
1053 }
1054
1055 /*
1056  * Walk the list of pending locks for the file and create an in-coming
1057  * edge from lock to each blocking lock.
1058  */
1059 static int
1060 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1061 {
1062         struct lockf_entry *overlap;
1063         int error;
1064
1065         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1066                 if (!lf_blocks(lock, overlap))
1067                         continue;
1068
1069                 /*
1070                  * We've found a blocking lock. Add the corresponding
1071                  * edge to the graphs and see if it would cause a
1072                  * deadlock.
1073                  */
1074                 error = lf_add_edge(overlap, lock);
1075
1076                 /*
1077                  * The only error that lf_add_edge returns is EDEADLK.
1078                  * Remove any edges we added and return the error.
1079                  */
1080                 if (error) {
1081                         lf_remove_incoming(lock);
1082                         return (error);
1083                 }
1084         }
1085         return (0);
1086 }
1087
1088 /*
1089  * Insert lock into the active list, keeping list entries ordered by
1090  * increasing values of lf_start.
1091  */
1092 static void
1093 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1094 {
1095         struct lockf_entry *lf, *lfprev;
1096
1097         if (LIST_EMPTY(&state->ls_active)) {
1098                 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1099                 return;
1100         }
1101
1102         lfprev = NULL;
1103         LIST_FOREACH(lf, &state->ls_active, lf_link) {
1104                 if (lf->lf_start > lock->lf_start) {
1105                         LIST_INSERT_BEFORE(lf, lock, lf_link);
1106                         return;
1107                 }
1108                 lfprev = lf;
1109         }
1110         LIST_INSERT_AFTER(lfprev, lock, lf_link);
1111 }
1112
1113 /*
1114  * Wake up a sleeping lock and remove it from the pending list now
1115  * that all its dependancies have been resolved. The caller should
1116  * arrange for the lock to be added to the active list, adjusting any
1117  * existing locks for the same owner as needed.
1118  */
1119 static void
1120 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1121 {
1122
1123         /*
1124          * Remove from ls_pending list and wake up the caller
1125          * or start the async notification, as appropriate.
1126          */
1127         LIST_REMOVE(wakelock, lf_link);
1128 #ifdef LOCKF_DEBUG
1129         if (lockf_debug & 1)
1130                 lf_print("lf_wakeup_lock: awakening", wakelock);
1131 #endif /* LOCKF_DEBUG */
1132         if (wakelock->lf_async_task) {
1133                 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1134         } else {
1135                 wakeup(wakelock);
1136         }
1137 }
1138
1139 /*
1140  * Re-check all dependant locks and remove edges to locks that we no
1141  * longer block. If 'all' is non-zero, the lock has been removed and
1142  * we must remove all the dependancies, otherwise it has simply been
1143  * reduced but remains active. Any pending locks which have been been
1144  * unblocked are added to 'granted'
1145  */
1146 static void
1147 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1148         struct lockf_entry_list *granted)
1149 {
1150         struct lockf_edge *e, *ne;
1151         struct lockf_entry *deplock;
1152
1153         LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1154                 deplock = e->le_from;
1155                 if (all || !lf_blocks(lock, deplock)) {
1156                         sx_xlock(&lf_owner_graph_lock);
1157                         lf_remove_edge(e);
1158                         sx_xunlock(&lf_owner_graph_lock);
1159                         if (LIST_EMPTY(&deplock->lf_outedges)) {
1160                                 lf_wakeup_lock(state, deplock);
1161                                 LIST_INSERT_HEAD(granted, deplock, lf_link);
1162                         }
1163                 }
1164         }
1165 }
1166
1167 /*
1168  * Set the start of an existing active lock, updating dependancies and
1169  * adding any newly woken locks to 'granted'.
1170  */
1171 static void
1172 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1173         struct lockf_entry_list *granted)
1174 {
1175
1176         KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1177         lock->lf_start = new_start;
1178         LIST_REMOVE(lock, lf_link);
1179         lf_insert_lock(state, lock);
1180         lf_update_dependancies(state, lock, FALSE, granted);
1181 }
1182
1183 /*
1184  * Set the end of an existing active lock, updating dependancies and
1185  * adding any newly woken locks to 'granted'.
1186  */
1187 static void
1188 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1189         struct lockf_entry_list *granted)
1190 {
1191
1192         KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1193         lock->lf_end = new_end;
1194         lf_update_dependancies(state, lock, FALSE, granted);
1195 }
1196
1197 /*
1198  * Add a lock to the active list, updating or removing any current
1199  * locks owned by the same owner and processing any pending locks that
1200  * become unblocked as a result. This code is also used for unlock
1201  * since the logic for updating existing locks is identical.
1202  *
1203  * As a result of processing the new lock, we may unblock existing
1204  * pending locks as a result of downgrading/unlocking. We simply
1205  * activate the newly granted locks by looping.
1206  *
1207  * Since the new lock already has its dependancies set up, we always
1208  * add it to the list (unless its an unlock request). This may
1209  * fragment the lock list in some pathological cases but its probably
1210  * not a real problem.
1211  */
1212 static void
1213 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1214 {
1215         struct lockf_entry *overlap, *lf;
1216         struct lockf_entry_list granted;
1217         int ovcase;
1218
1219         LIST_INIT(&granted);
1220         LIST_INSERT_HEAD(&granted, lock, lf_link);
1221
1222         while (!LIST_EMPTY(&granted)) {
1223                 lock = LIST_FIRST(&granted);
1224                 LIST_REMOVE(lock, lf_link);
1225
1226                 /*
1227                  * Skip over locks owned by other processes.  Handle
1228                  * any locks that overlap and are owned by ourselves.
1229                  */
1230                 overlap = LIST_FIRST(&state->ls_active);
1231                 for (;;) {
1232                         ovcase = lf_findoverlap(&overlap, lock, SELF);
1233
1234 #ifdef LOCKF_DEBUG
1235                         if (ovcase && (lockf_debug & 2)) {
1236                                 printf("lf_setlock: overlap %d", ovcase);
1237                                 lf_print("", overlap);
1238                         }
1239 #endif
1240                         /*
1241                          * Six cases:
1242                          *      0) no overlap
1243                          *      1) overlap == lock
1244                          *      2) overlap contains lock
1245                          *      3) lock contains overlap
1246                          *      4) overlap starts before lock
1247                          *      5) overlap ends after lock
1248                          */
1249                         switch (ovcase) {
1250                         case 0: /* no overlap */
1251                                 break;
1252
1253                         case 1: /* overlap == lock */
1254                                 /*
1255                                  * We have already setup the
1256                                  * dependants for the new lock, taking
1257                                  * into account a possible downgrade
1258                                  * or unlock. Remove the old lock.
1259                                  */
1260                                 LIST_REMOVE(overlap, lf_link);
1261                                 lf_update_dependancies(state, overlap, TRUE,
1262                                         &granted);
1263                                 lf_free_lock(overlap);
1264                                 break;
1265
1266                         case 2: /* overlap contains lock */
1267                                 /*
1268                                  * Just split the existing lock.
1269                                  */
1270                                 lf_split(state, overlap, lock, &granted);
1271                                 break;
1272
1273                         case 3: /* lock contains overlap */
1274                                 /*
1275                                  * Delete the overlap and advance to
1276                                  * the next entry in the list.
1277                                  */
1278                                 lf = LIST_NEXT(overlap, lf_link);
1279                                 LIST_REMOVE(overlap, lf_link);
1280                                 lf_update_dependancies(state, overlap, TRUE,
1281                                         &granted);
1282                                 lf_free_lock(overlap);
1283                                 overlap = lf;
1284                                 continue;
1285
1286                         case 4: /* overlap starts before lock */
1287                                 /*
1288                                  * Just update the overlap end and
1289                                  * move on.
1290                                  */
1291                                 lf_set_end(state, overlap, lock->lf_start - 1,
1292                                     &granted);
1293                                 overlap = LIST_NEXT(overlap, lf_link);
1294                                 continue;
1295
1296                         case 5: /* overlap ends after lock */
1297                                 /*
1298                                  * Change the start of overlap and
1299                                  * re-insert.
1300                                  */
1301                                 lf_set_start(state, overlap, lock->lf_end + 1,
1302                                     &granted);
1303                                 break;
1304                         }
1305                         break;
1306                 }
1307 #ifdef LOCKF_DEBUG
1308                 if (lockf_debug & 1) {
1309                         if (lock->lf_type != F_UNLCK)
1310                                 lf_print("lf_activate_lock: activated", lock);
1311                         else
1312                                 lf_print("lf_activate_lock: unlocked", lock);
1313                         lf_printlist("lf_activate_lock", lock);
1314                 }
1315 #endif /* LOCKF_DEBUG */
1316                 if (lock->lf_type != F_UNLCK)
1317                         lf_insert_lock(state, lock);
1318         }
1319 }
1320
1321 /*
1322  * Cancel a pending lock request, either as a result of a signal or a
1323  * cancel request for an async lock.
1324  */
1325 static void
1326 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1327 {
1328         struct lockf_entry_list granted;
1329
1330         /*
1331          * Note it is theoretically possible that cancelling this lock
1332          * may allow some other pending lock to become
1333          * active. Consider this case:
1334          *
1335          * Owner        Action          Result          Dependancies
1336          * 
1337          * A:           lock [0..0]     succeeds        
1338          * B:           lock [2..2]     succeeds        
1339          * C:           lock [1..2]     blocked         C->B
1340          * D:           lock [0..1]     blocked         C->B,D->A,D->C
1341          * A:           unlock [0..0]                   C->B,D->C
1342          * C:           cancel [1..2]   
1343          */
1344
1345         LIST_REMOVE(lock, lf_link);
1346
1347         /*
1348          * Removing out-going edges is simple.
1349          */
1350         sx_xlock(&lf_owner_graph_lock);
1351         lf_remove_outgoing(lock);
1352         sx_xunlock(&lf_owner_graph_lock);
1353
1354         /*
1355          * Removing in-coming edges may allow some other lock to
1356          * become active - we use lf_update_dependancies to figure
1357          * this out.
1358          */
1359         LIST_INIT(&granted);
1360         lf_update_dependancies(state, lock, TRUE, &granted);
1361         lf_free_lock(lock);
1362
1363         /*
1364          * Feed any newly active locks to lf_activate_lock.
1365          */
1366         while (!LIST_EMPTY(&granted)) {
1367                 lock = LIST_FIRST(&granted);
1368                 LIST_REMOVE(lock, lf_link);
1369                 lf_activate_lock(state, lock);
1370         }
1371 }
1372
1373 /*
1374  * Set a byte-range lock.
1375  */
1376 static int
1377 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1378     void **cookiep)
1379 {
1380         static char lockstr[] = "lockf";
1381         int priority, error;
1382
1383 #ifdef LOCKF_DEBUG
1384         if (lockf_debug & 1)
1385                 lf_print("lf_setlock", lock);
1386 #endif /* LOCKF_DEBUG */
1387
1388         /*
1389          * Set the priority
1390          */
1391         priority = PLOCK;
1392         if (lock->lf_type == F_WRLCK)
1393                 priority += 4;
1394         if (!(lock->lf_flags & F_NOINTR))
1395                 priority |= PCATCH;
1396         /*
1397          * Scan lock list for this file looking for locks that would block us.
1398          */
1399         if (lf_getblock(state, lock)) {
1400                 /*
1401                  * Free the structure and return if nonblocking.
1402                  */
1403                 if ((lock->lf_flags & F_WAIT) == 0
1404                     && lock->lf_async_task == NULL) {
1405                         lf_free_lock(lock);
1406                         error = EAGAIN;
1407                         goto out;
1408                 }
1409
1410                 /*
1411                  * For flock type locks, we must first remove
1412                  * any shared locks that we hold before we sleep
1413                  * waiting for an exclusive lock.
1414                  */
1415                 if ((lock->lf_flags & F_FLOCK) &&
1416                     lock->lf_type == F_WRLCK) {
1417                         lock->lf_type = F_UNLCK;
1418                         lf_activate_lock(state, lock);
1419                         lock->lf_type = F_WRLCK;
1420                 }
1421
1422                 /*
1423                  * We are blocked. Create edges to each blocking lock,
1424                  * checking for deadlock using the owner graph. For
1425                  * simplicity, we run deadlock detection for all
1426                  * locks, posix and otherwise.
1427                  */
1428                 sx_xlock(&lf_owner_graph_lock);
1429                 error = lf_add_outgoing(state, lock);
1430                 sx_xunlock(&lf_owner_graph_lock);
1431
1432                 if (error) {
1433 #ifdef LOCKF_DEBUG
1434                         if (lockf_debug & 1)
1435                                 lf_print("lf_setlock: deadlock", lock);
1436 #endif
1437                         lf_free_lock(lock);
1438                         goto out;
1439                 }
1440
1441                 /*
1442                  * We have added edges to everything that blocks
1443                  * us. Sleep until they all go away.
1444                  */
1445                 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1446 #ifdef LOCKF_DEBUG
1447                 if (lockf_debug & 1) {
1448                         struct lockf_edge *e;
1449                         LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1450                                 lf_print("lf_setlock: blocking on", e->le_to);
1451                                 lf_printlist("lf_setlock", e->le_to);
1452                         }
1453                 }
1454 #endif /* LOCKF_DEBUG */
1455
1456                 if ((lock->lf_flags & F_WAIT) == 0) {
1457                         /*
1458                          * The caller requested async notification -
1459                          * this callback happens when the blocking
1460                          * lock is released, allowing the caller to
1461                          * make another attempt to take the lock.
1462                          */
1463                         *cookiep = (void *) lock;
1464                         error = EINPROGRESS;
1465                         goto out;
1466                 }
1467
1468                 lock->lf_refs++;
1469                 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1470                 if (lf_free_lock(lock)) {
1471                         error = EDOOFUS;
1472                         goto out;
1473                 }
1474
1475                 /*
1476                  * We may have been awakened by a signal and/or by a
1477                  * debugger continuing us (in which cases we must
1478                  * remove our lock graph edges) and/or by another
1479                  * process releasing a lock (in which case our edges
1480                  * have already been removed and we have been moved to
1481                  * the active list). We may also have been woken by
1482                  * lf_purgelocks which we report to the caller as
1483                  * EINTR. In that case, lf_purgelocks will have
1484                  * removed our lock graph edges.
1485                  *
1486                  * Note that it is possible to receive a signal after
1487                  * we were successfully woken (and moved to the active
1488                  * list) but before we resumed execution. In this
1489                  * case, our lf_outedges list will be clear. We
1490                  * pretend there was no error.
1491                  *
1492                  * Note also, if we have been sleeping long enough, we
1493                  * may now have incoming edges from some newer lock
1494                  * which is waiting behind us in the queue.
1495                  */
1496                 if (lock->lf_flags & F_INTR) {
1497                         error = EINTR;
1498                         lf_free_lock(lock);
1499                         goto out;
1500                 }
1501                 if (LIST_EMPTY(&lock->lf_outedges)) {
1502                         error = 0;
1503                 } else {
1504                         lf_cancel_lock(state, lock);
1505                         goto out;
1506                 }
1507 #ifdef LOCKF_DEBUG
1508                 if (lockf_debug & 1) {
1509                         lf_print("lf_setlock: granted", lock);
1510                 }
1511 #endif
1512                 goto out;
1513         }
1514         /*
1515          * It looks like we are going to grant the lock. First add
1516          * edges from any currently pending lock that the new lock
1517          * would block.
1518          */
1519         sx_xlock(&lf_owner_graph_lock);
1520         error = lf_add_incoming(state, lock);
1521         sx_xunlock(&lf_owner_graph_lock);
1522         if (error) {
1523 #ifdef LOCKF_DEBUG
1524                 if (lockf_debug & 1)
1525                         lf_print("lf_setlock: deadlock", lock);
1526 #endif
1527                 lf_free_lock(lock);
1528                 goto out;
1529         }
1530
1531         /*
1532          * No blocks!!  Add the lock.  Note that we will
1533          * downgrade or upgrade any overlapping locks this
1534          * process already owns.
1535          */
1536         lf_activate_lock(state, lock);
1537         error = 0;
1538 out:
1539         return (error);
1540 }
1541
1542 /*
1543  * Remove a byte-range lock on an inode.
1544  *
1545  * Generally, find the lock (or an overlap to that lock)
1546  * and remove it (or shrink it), then wakeup anyone we can.
1547  */
1548 static int
1549 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1550 {
1551         struct lockf_entry *overlap;
1552
1553         overlap = LIST_FIRST(&state->ls_active);
1554
1555         if (overlap == NOLOCKF)
1556                 return (0);
1557 #ifdef LOCKF_DEBUG
1558         if (unlock->lf_type != F_UNLCK)
1559                 panic("lf_clearlock: bad type");
1560         if (lockf_debug & 1)
1561                 lf_print("lf_clearlock", unlock);
1562 #endif /* LOCKF_DEBUG */
1563
1564         lf_activate_lock(state, unlock);
1565
1566         return (0);
1567 }
1568
1569 /*
1570  * Check whether there is a blocking lock, and if so return its
1571  * details in '*fl'.
1572  */
1573 static int
1574 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1575 {
1576         struct lockf_entry *block;
1577
1578 #ifdef LOCKF_DEBUG
1579         if (lockf_debug & 1)
1580                 lf_print("lf_getlock", lock);
1581 #endif /* LOCKF_DEBUG */
1582
1583         if ((block = lf_getblock(state, lock))) {
1584                 fl->l_type = block->lf_type;
1585                 fl->l_whence = SEEK_SET;
1586                 fl->l_start = block->lf_start;
1587                 if (block->lf_end == OFF_MAX)
1588                         fl->l_len = 0;
1589                 else
1590                         fl->l_len = block->lf_end - block->lf_start + 1;
1591                 fl->l_pid = block->lf_owner->lo_pid;
1592                 fl->l_sysid = block->lf_owner->lo_sysid;
1593         } else {
1594                 fl->l_type = F_UNLCK;
1595         }
1596         return (0);
1597 }
1598
1599 /*
1600  * Cancel an async lock request.
1601  */
1602 static int
1603 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1604 {
1605         struct lockf_entry *reallock;
1606
1607         /*
1608          * We need to match this request with an existing lock
1609          * request.
1610          */
1611         LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1612                 if ((void *) reallock == cookie) {
1613                         /*
1614                          * Double-check that this lock looks right
1615                          * (maybe use a rolling ID for the cancel
1616                          * cookie instead?)
1617                          */
1618                         if (!(reallock->lf_vnode == lock->lf_vnode
1619                                 && reallock->lf_start == lock->lf_start
1620                                 && reallock->lf_end == lock->lf_end)) {
1621                                 return (ENOENT);
1622                         }
1623
1624                         /*
1625                          * Make sure this lock was async and then just
1626                          * remove it from its wait lists.
1627                          */
1628                         if (!reallock->lf_async_task) {
1629                                 return (ENOENT);
1630                         }
1631
1632                         /*
1633                          * Note that since any other thread must take
1634                          * state->ls_lock before it can possibly
1635                          * trigger the async callback, we are safe
1636                          * from a race with lf_wakeup_lock, i.e. we
1637                          * can free the lock (actually our caller does
1638                          * this).
1639                          */
1640                         lf_cancel_lock(state, reallock);
1641                         return (0);
1642                 }
1643         }
1644
1645         /*
1646          * We didn't find a matching lock - not much we can do here.
1647          */
1648         return (ENOENT);
1649 }
1650
1651 /*
1652  * Walk the list of locks for an inode and
1653  * return the first blocking lock.
1654  */
1655 static struct lockf_entry *
1656 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1657 {
1658         struct lockf_entry *overlap;
1659
1660         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1661                 /*
1662                  * We may assume that the active list is sorted by
1663                  * lf_start.
1664                  */
1665                 if (overlap->lf_start > lock->lf_end)
1666                         break;
1667                 if (!lf_blocks(lock, overlap))
1668                         continue;
1669                 return (overlap);
1670         }
1671         return (NOLOCKF);
1672 }
1673
1674 /*
1675  * Walk the list of locks for an inode to find an overlapping lock (if
1676  * any) and return a classification of that overlap.
1677  *
1678  * Arguments:
1679  *      *overlap        The place in the lock list to start looking
1680  *      lock            The lock which is being tested
1681  *      type            Pass 'SELF' to test only locks with the same
1682  *                      owner as lock, or 'OTHER' to test only locks
1683  *                      with a different owner
1684  *
1685  * Returns one of six values:
1686  *      0) no overlap
1687  *      1) overlap == lock
1688  *      2) overlap contains lock
1689  *      3) lock contains overlap
1690  *      4) overlap starts before lock
1691  *      5) overlap ends after lock
1692  *
1693  * If there is an overlapping lock, '*overlap' is set to point at the
1694  * overlapping lock.
1695  *
1696  * NOTE: this returns only the FIRST overlapping lock.  There
1697  *       may be more than one.
1698  */
1699 static int
1700 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1701 {
1702         struct lockf_entry *lf;
1703         off_t start, end;
1704         int res;
1705
1706         if ((*overlap) == NOLOCKF) {
1707                 return (0);
1708         }
1709 #ifdef LOCKF_DEBUG
1710         if (lockf_debug & 2)
1711                 lf_print("lf_findoverlap: looking for overlap in", lock);
1712 #endif /* LOCKF_DEBUG */
1713         start = lock->lf_start;
1714         end = lock->lf_end;
1715         res = 0;
1716         while (*overlap) {
1717                 lf = *overlap;
1718                 if (lf->lf_start > end)
1719                         break;
1720                 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1721                     ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1722                         *overlap = LIST_NEXT(lf, lf_link);
1723                         continue;
1724                 }
1725 #ifdef LOCKF_DEBUG
1726                 if (lockf_debug & 2)
1727                         lf_print("\tchecking", lf);
1728 #endif /* LOCKF_DEBUG */
1729                 /*
1730                  * OK, check for overlap
1731                  *
1732                  * Six cases:
1733                  *      0) no overlap
1734                  *      1) overlap == lock
1735                  *      2) overlap contains lock
1736                  *      3) lock contains overlap
1737                  *      4) overlap starts before lock
1738                  *      5) overlap ends after lock
1739                  */
1740                 if (start > lf->lf_end) {
1741                         /* Case 0 */
1742 #ifdef LOCKF_DEBUG
1743                         if (lockf_debug & 2)
1744                                 printf("no overlap\n");
1745 #endif /* LOCKF_DEBUG */
1746                         *overlap = LIST_NEXT(lf, lf_link);
1747                         continue;
1748                 }
1749                 if (lf->lf_start == start && lf->lf_end == end) {
1750                         /* Case 1 */
1751 #ifdef LOCKF_DEBUG
1752                         if (lockf_debug & 2)
1753                                 printf("overlap == lock\n");
1754 #endif /* LOCKF_DEBUG */
1755                         res = 1;
1756                         break;
1757                 }
1758                 if (lf->lf_start <= start && lf->lf_end >= end) {
1759                         /* Case 2 */
1760 #ifdef LOCKF_DEBUG
1761                         if (lockf_debug & 2)
1762                                 printf("overlap contains lock\n");
1763 #endif /* LOCKF_DEBUG */
1764                         res = 2;
1765                         break;
1766                 }
1767                 if (start <= lf->lf_start && end >= lf->lf_end) {
1768                         /* Case 3 */
1769 #ifdef LOCKF_DEBUG
1770                         if (lockf_debug & 2)
1771                                 printf("lock contains overlap\n");
1772 #endif /* LOCKF_DEBUG */
1773                         res = 3;
1774                         break;
1775                 }
1776                 if (lf->lf_start < start && lf->lf_end >= start) {
1777                         /* Case 4 */
1778 #ifdef LOCKF_DEBUG
1779                         if (lockf_debug & 2)
1780                                 printf("overlap starts before lock\n");
1781 #endif /* LOCKF_DEBUG */
1782                         res = 4;
1783                         break;
1784                 }
1785                 if (lf->lf_start > start && lf->lf_end > end) {
1786                         /* Case 5 */
1787 #ifdef LOCKF_DEBUG
1788                         if (lockf_debug & 2)
1789                                 printf("overlap ends after lock\n");
1790 #endif /* LOCKF_DEBUG */
1791                         res = 5;
1792                         break;
1793                 }
1794                 panic("lf_findoverlap: default");
1795         }
1796         return (res);
1797 }
1798
1799 /*
1800  * Split an the existing 'lock1', based on the extent of the lock
1801  * described by 'lock2'. The existing lock should cover 'lock2'
1802  * entirely.
1803  *
1804  * Any pending locks which have been been unblocked are added to
1805  * 'granted'
1806  */
1807 static void
1808 lf_split(struct lockf *state, struct lockf_entry *lock1,
1809     struct lockf_entry *lock2, struct lockf_entry_list *granted)
1810 {
1811         struct lockf_entry *splitlock;
1812
1813 #ifdef LOCKF_DEBUG
1814         if (lockf_debug & 2) {
1815                 lf_print("lf_split", lock1);
1816                 lf_print("splitting from", lock2);
1817         }
1818 #endif /* LOCKF_DEBUG */
1819         /*
1820          * Check to see if we don't need to split at all.
1821          */
1822         if (lock1->lf_start == lock2->lf_start) {
1823                 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1824                 return;
1825         }
1826         if (lock1->lf_end == lock2->lf_end) {
1827                 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1828                 return;
1829         }
1830         /*
1831          * Make a new lock consisting of the last part of
1832          * the encompassing lock.
1833          */
1834         splitlock = lf_alloc_lock(lock1->lf_owner);
1835         memcpy(splitlock, lock1, sizeof *splitlock);
1836         splitlock->lf_refs = 1;
1837         if (splitlock->lf_flags & F_REMOTE)
1838                 vref(splitlock->lf_vnode);
1839
1840         /*
1841          * This cannot cause a deadlock since any edges we would add
1842          * to splitlock already exist in lock1. We must be sure to add
1843          * necessary dependancies to splitlock before we reduce lock1
1844          * otherwise we may accidentally grant a pending lock that
1845          * was blocked by the tail end of lock1.
1846          */
1847         splitlock->lf_start = lock2->lf_end + 1;
1848         LIST_INIT(&splitlock->lf_outedges);
1849         LIST_INIT(&splitlock->lf_inedges);
1850         sx_xlock(&lf_owner_graph_lock);
1851         lf_add_incoming(state, splitlock);
1852         sx_xunlock(&lf_owner_graph_lock);
1853
1854         lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1855
1856         /*
1857          * OK, now link it in
1858          */
1859         lf_insert_lock(state, splitlock);
1860 }
1861
1862 struct lockdesc {
1863         STAILQ_ENTRY(lockdesc) link;
1864         struct vnode *vp;
1865         struct flock fl;
1866 };
1867 STAILQ_HEAD(lockdesclist, lockdesc);
1868
1869 int
1870 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1871 {
1872         struct lockf *ls;
1873         struct lockf_entry *lf;
1874         struct lockdesc *ldesc;
1875         struct lockdesclist locks;
1876         int error;
1877
1878         /*
1879          * In order to keep the locking simple, we iterate over the
1880          * active lock lists to build a list of locks that need
1881          * releasing. We then call the iterator for each one in turn.
1882          *
1883          * We take an extra reference to the vnode for the duration to
1884          * make sure it doesn't go away before we are finished.
1885          */
1886         STAILQ_INIT(&locks);
1887         sx_xlock(&lf_lock_states_lock);
1888         LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1889                 sx_xlock(&ls->ls_lock);
1890                 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1891                         if (lf->lf_owner->lo_sysid != sysid)
1892                                 continue;
1893
1894                         ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1895                             M_WAITOK);
1896                         ldesc->vp = lf->lf_vnode;
1897                         vref(ldesc->vp);
1898                         ldesc->fl.l_start = lf->lf_start;
1899                         if (lf->lf_end == OFF_MAX)
1900                                 ldesc->fl.l_len = 0;
1901                         else
1902                                 ldesc->fl.l_len =
1903                                         lf->lf_end - lf->lf_start + 1;
1904                         ldesc->fl.l_whence = SEEK_SET;
1905                         ldesc->fl.l_type = F_UNLCK;
1906                         ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1907                         ldesc->fl.l_sysid = sysid;
1908                         STAILQ_INSERT_TAIL(&locks, ldesc, link);
1909                 }
1910                 sx_xunlock(&ls->ls_lock);
1911         }
1912         sx_xunlock(&lf_lock_states_lock);
1913
1914         /*
1915          * Call the iterator function for each lock in turn. If the
1916          * iterator returns an error code, just free the rest of the
1917          * lockdesc structures.
1918          */
1919         error = 0;
1920         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1921                 STAILQ_REMOVE_HEAD(&locks, link);
1922                 if (!error)
1923                         error = fn(ldesc->vp, &ldesc->fl, arg);
1924                 vrele(ldesc->vp);
1925                 free(ldesc, M_LOCKF);
1926         }
1927
1928         return (error);
1929 }
1930
1931 int
1932 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1933 {
1934         struct lockf *ls;
1935         struct lockf_entry *lf;
1936         struct lockdesc *ldesc;
1937         struct lockdesclist locks;
1938         int error;
1939
1940         /*
1941          * In order to keep the locking simple, we iterate over the
1942          * active lock lists to build a list of locks that need
1943          * releasing. We then call the iterator for each one in turn.
1944          *
1945          * We take an extra reference to the vnode for the duration to
1946          * make sure it doesn't go away before we are finished.
1947          */
1948         STAILQ_INIT(&locks);
1949         VI_LOCK(vp);
1950         ls = vp->v_lockf;
1951         if (!ls) {
1952                 VI_UNLOCK(vp);
1953                 return (0);
1954         }
1955         ls->ls_threads++;
1956         VI_UNLOCK(vp);
1957
1958         sx_xlock(&ls->ls_lock);
1959         LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1960                 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1961                     M_WAITOK);
1962                 ldesc->vp = lf->lf_vnode;
1963                 vref(ldesc->vp);
1964                 ldesc->fl.l_start = lf->lf_start;
1965                 if (lf->lf_end == OFF_MAX)
1966                         ldesc->fl.l_len = 0;
1967                 else
1968                         ldesc->fl.l_len =
1969                                 lf->lf_end - lf->lf_start + 1;
1970                 ldesc->fl.l_whence = SEEK_SET;
1971                 ldesc->fl.l_type = F_UNLCK;
1972                 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1973                 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1974                 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1975         }
1976         sx_xunlock(&ls->ls_lock);
1977         VI_LOCK(vp);
1978         ls->ls_threads--;
1979         wakeup(ls);
1980         VI_UNLOCK(vp);
1981
1982         /*
1983          * Call the iterator function for each lock in turn. If the
1984          * iterator returns an error code, just free the rest of the
1985          * lockdesc structures.
1986          */
1987         error = 0;
1988         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1989                 STAILQ_REMOVE_HEAD(&locks, link);
1990                 if (!error)
1991                         error = fn(ldesc->vp, &ldesc->fl, arg);
1992                 vrele(ldesc->vp);
1993                 free(ldesc, M_LOCKF);
1994         }
1995
1996         return (error);
1997 }
1998
1999 static int
2000 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
2001 {
2002
2003         VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2004         return (0);
2005 }
2006
2007 void
2008 lf_clearremotesys(int sysid)
2009 {
2010
2011         KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2012         lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2013 }
2014
2015 int
2016 lf_countlocks(int sysid)
2017 {
2018         int i;
2019         struct lock_owner *lo;
2020         int count;
2021
2022         count = 0;
2023         sx_xlock(&lf_lock_owners_lock);
2024         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
2025                 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
2026                         if (lo->lo_sysid == sysid)
2027                                 count += lo->lo_refs;
2028         sx_xunlock(&lf_lock_owners_lock);
2029
2030         return (count);
2031 }
2032
2033 #ifdef LOCKF_DEBUG
2034
2035 /*
2036  * Return non-zero if y is reachable from x using a brute force
2037  * search. If reachable and path is non-null, return the route taken
2038  * in path.
2039  */
2040 static int
2041 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2042     struct owner_vertex_list *path)
2043 {
2044         struct owner_edge *e;
2045
2046         if (x == y) {
2047                 if (path)
2048                         TAILQ_INSERT_HEAD(path, x, v_link);
2049                 return 1;
2050         }
2051
2052         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2053                 if (graph_reaches(e->e_to, y, path)) {
2054                         if (path)
2055                                 TAILQ_INSERT_HEAD(path, x, v_link);
2056                         return 1;
2057                 }
2058         }
2059         return 0;
2060 }
2061
2062 /*
2063  * Perform consistency checks on the graph. Make sure the values of
2064  * v_order are correct. If checkorder is non-zero, check no vertex can
2065  * reach any other vertex with a smaller order.
2066  */
2067 static void
2068 graph_check(struct owner_graph *g, int checkorder)
2069 {
2070         int i, j;
2071
2072         for (i = 0; i < g->g_size; i++) {
2073                 if (!g->g_vertices[i]->v_owner)
2074                         continue;
2075                 KASSERT(g->g_vertices[i]->v_order == i,
2076                     ("lock graph vertices disordered"));
2077                 if (checkorder) {
2078                         for (j = 0; j < i; j++) {
2079                                 if (!g->g_vertices[j]->v_owner)
2080                                         continue;
2081                                 KASSERT(!graph_reaches(g->g_vertices[i],
2082                                         g->g_vertices[j], NULL),
2083                                     ("lock graph vertices disordered"));
2084                         }
2085                 }
2086         }
2087 }
2088
2089 static void
2090 graph_print_vertices(struct owner_vertex_list *set)
2091 {
2092         struct owner_vertex *v;
2093
2094         printf("{ ");
2095         TAILQ_FOREACH(v, set, v_link) {
2096                 printf("%d:", v->v_order);
2097                 lf_print_owner(v->v_owner);
2098                 if (TAILQ_NEXT(v, v_link))
2099                         printf(", ");
2100         }
2101         printf(" }\n");
2102 }
2103
2104 #endif
2105
2106 /*
2107  * Calculate the sub-set of vertices v from the affected region [y..x]
2108  * where v is reachable from y. Return -1 if a loop was detected
2109  * (i.e. x is reachable from y, otherwise the number of vertices in
2110  * this subset.
2111  */
2112 static int
2113 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2114     struct owner_vertex *y, struct owner_vertex_list *delta)
2115 {
2116         uint32_t gen;
2117         struct owner_vertex *v;
2118         struct owner_edge *e;
2119         int n;
2120
2121         /*
2122          * We start with a set containing just y. Then for each vertex
2123          * v in the set so far unprocessed, we add each vertex that v
2124          * has an out-edge to and that is within the affected region
2125          * [y..x]. If we see the vertex x on our travels, stop
2126          * immediately.
2127          */
2128         TAILQ_INIT(delta);
2129         TAILQ_INSERT_TAIL(delta, y, v_link);
2130         v = y;
2131         n = 1;
2132         gen = g->g_gen;
2133         while (v) {
2134                 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2135                         if (e->e_to == x)
2136                                 return -1;
2137                         if (e->e_to->v_order < x->v_order
2138                             && e->e_to->v_gen != gen) {
2139                                 e->e_to->v_gen = gen;
2140                                 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2141                                 n++;
2142                         }
2143                 }
2144                 v = TAILQ_NEXT(v, v_link);
2145         }
2146
2147         return (n);
2148 }
2149
2150 /*
2151  * Calculate the sub-set of vertices v from the affected region [y..x]
2152  * where v reaches x. Return the number of vertices in this subset.
2153  */
2154 static int
2155 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2156     struct owner_vertex *y, struct owner_vertex_list *delta)
2157 {
2158         uint32_t gen;
2159         struct owner_vertex *v;
2160         struct owner_edge *e;
2161         int n;
2162
2163         /*
2164          * We start with a set containing just x. Then for each vertex
2165          * v in the set so far unprocessed, we add each vertex that v
2166          * has an in-edge from and that is within the affected region
2167          * [y..x].
2168          */
2169         TAILQ_INIT(delta);
2170         TAILQ_INSERT_TAIL(delta, x, v_link);
2171         v = x;
2172         n = 1;
2173         gen = g->g_gen;
2174         while (v) {
2175                 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2176                         if (e->e_from->v_order > y->v_order
2177                             && e->e_from->v_gen != gen) {
2178                                 e->e_from->v_gen = gen;
2179                                 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2180                                 n++;
2181                         }
2182                 }
2183                 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2184         }
2185
2186         return (n);
2187 }
2188
2189 static int
2190 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2191 {
2192         struct owner_vertex *v;
2193         int i, j;
2194
2195         TAILQ_FOREACH(v, set, v_link) {
2196                 for (i = n;
2197                      i > 0 && indices[i - 1] > v->v_order; i--)
2198                         ;
2199                 for (j = n - 1; j >= i; j--)
2200                         indices[j + 1] = indices[j];
2201                 indices[i] = v->v_order;
2202                 n++;
2203         }
2204
2205         return (n);
2206 }
2207
2208 static int
2209 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2210     struct owner_vertex_list *set)
2211 {
2212         struct owner_vertex *v, *vlowest;
2213
2214         while (!TAILQ_EMPTY(set)) {
2215                 vlowest = NULL;
2216                 TAILQ_FOREACH(v, set, v_link) {
2217                         if (!vlowest || v->v_order < vlowest->v_order)
2218                                 vlowest = v;
2219                 }
2220                 TAILQ_REMOVE(set, vlowest, v_link);
2221                 vlowest->v_order = indices[nextunused];
2222                 g->g_vertices[vlowest->v_order] = vlowest;
2223                 nextunused++;
2224         }
2225
2226         return (nextunused);
2227 }
2228
2229 static int
2230 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2231     struct owner_vertex *y)
2232 {
2233         struct owner_edge *e;
2234         struct owner_vertex_list deltaF, deltaB;
2235         int nF, nB, n, vi, i;
2236         int *indices;
2237
2238         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2239
2240         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2241                 if (e->e_to == y) {
2242                         e->e_refs++;
2243                         return (0);
2244                 }
2245         }
2246
2247 #ifdef LOCKF_DEBUG
2248         if (lockf_debug & 8) {
2249                 printf("adding edge %d:", x->v_order);
2250                 lf_print_owner(x->v_owner);
2251                 printf(" -> %d:", y->v_order);
2252                 lf_print_owner(y->v_owner);
2253                 printf("\n");
2254         }
2255 #endif
2256         if (y->v_order < x->v_order) {
2257                 /*
2258                  * The new edge violates the order. First find the set
2259                  * of affected vertices reachable from y (deltaF) and
2260                  * the set of affect vertices affected that reach x
2261                  * (deltaB), using the graph generation number to
2262                  * detect whether we have visited a given vertex
2263                  * already. We re-order the graph so that each vertex
2264                  * in deltaB appears before each vertex in deltaF.
2265                  *
2266                  * If x is a member of deltaF, then the new edge would
2267                  * create a cycle. Otherwise, we may assume that
2268                  * deltaF and deltaB are disjoint.
2269                  */
2270                 g->g_gen++;
2271                 if (g->g_gen == 0) {
2272                         /*
2273                          * Generation wrap.
2274                          */
2275                         for (vi = 0; vi < g->g_size; vi++) {
2276                                 g->g_vertices[vi]->v_gen = 0;
2277                         }
2278                         g->g_gen++;
2279                 }
2280                 nF = graph_delta_forward(g, x, y, &deltaF);
2281                 if (nF < 0) {
2282 #ifdef LOCKF_DEBUG
2283                         if (lockf_debug & 8) {
2284                                 struct owner_vertex_list path;
2285                                 printf("deadlock: ");
2286                                 TAILQ_INIT(&path);
2287                                 graph_reaches(y, x, &path);
2288                                 graph_print_vertices(&path);
2289                         }
2290 #endif
2291                         return (EDEADLK);
2292                 }
2293
2294 #ifdef LOCKF_DEBUG
2295                 if (lockf_debug & 8) {
2296                         printf("re-ordering graph vertices\n");
2297                         printf("deltaF = ");
2298                         graph_print_vertices(&deltaF);
2299                 }
2300 #endif
2301
2302                 nB = graph_delta_backward(g, x, y, &deltaB);
2303
2304 #ifdef LOCKF_DEBUG
2305                 if (lockf_debug & 8) {
2306                         printf("deltaB = ");
2307                         graph_print_vertices(&deltaB);
2308                 }
2309 #endif
2310
2311                 /*
2312                  * We first build a set of vertex indices (vertex
2313                  * order values) that we may use, then we re-assign
2314                  * orders first to those vertices in deltaB, then to
2315                  * deltaF. Note that the contents of deltaF and deltaB
2316                  * may be partially disordered - we perform an
2317                  * insertion sort while building our index set.
2318                  */
2319                 indices = g->g_indexbuf;
2320                 n = graph_add_indices(indices, 0, &deltaF);
2321                 graph_add_indices(indices, n, &deltaB);
2322
2323                 /*
2324                  * We must also be sure to maintain the relative
2325                  * ordering of deltaF and deltaB when re-assigning
2326                  * vertices. We do this by iteratively removing the
2327                  * lowest ordered element from the set and assigning
2328                  * it the next value from our new ordering.
2329                  */
2330                 i = graph_assign_indices(g, indices, 0, &deltaB);
2331                 graph_assign_indices(g, indices, i, &deltaF);
2332
2333 #ifdef LOCKF_DEBUG
2334                 if (lockf_debug & 8) {
2335                         struct owner_vertex_list set;
2336                         TAILQ_INIT(&set);
2337                         for (i = 0; i < nB + nF; i++)
2338                                 TAILQ_INSERT_TAIL(&set,
2339                                     g->g_vertices[indices[i]], v_link);
2340                         printf("new ordering = ");
2341                         graph_print_vertices(&set);
2342                 }
2343 #endif
2344         }
2345
2346         KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2347
2348 #ifdef LOCKF_DEBUG
2349         if (lockf_debug & 8) {
2350                 graph_check(g, TRUE);
2351         }
2352 #endif
2353
2354         e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2355
2356         LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2357         LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2358         e->e_refs = 1;
2359         e->e_from = x;
2360         e->e_to = y;
2361
2362         return (0);
2363 }
2364
2365 /*
2366  * Remove an edge x->y from the graph.
2367  */
2368 static void
2369 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2370     struct owner_vertex *y)
2371 {
2372         struct owner_edge *e;
2373
2374         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2375
2376         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2377                 if (e->e_to == y)
2378                         break;
2379         }
2380         KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2381
2382         e->e_refs--;
2383         if (e->e_refs == 0) {
2384 #ifdef LOCKF_DEBUG
2385                 if (lockf_debug & 8) {
2386                         printf("removing edge %d:", x->v_order);
2387                         lf_print_owner(x->v_owner);
2388                         printf(" -> %d:", y->v_order);
2389                         lf_print_owner(y->v_owner);
2390                         printf("\n");
2391                 }
2392 #endif
2393                 LIST_REMOVE(e, e_outlink);
2394                 LIST_REMOVE(e, e_inlink);
2395                 free(e, M_LOCKF);
2396         }
2397 }
2398
2399 /*
2400  * Allocate a vertex from the free list. Return ENOMEM if there are
2401  * none.
2402  */
2403 static struct owner_vertex *
2404 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2405 {
2406         struct owner_vertex *v;
2407
2408         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2409
2410         v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2411         if (g->g_size == g->g_space) {
2412                 g->g_vertices = realloc(g->g_vertices,
2413                     2 * g->g_space * sizeof(struct owner_vertex *),
2414                     M_LOCKF, M_WAITOK);
2415                 free(g->g_indexbuf, M_LOCKF);
2416                 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2417                     M_LOCKF, M_WAITOK);
2418                 g->g_space = 2 * g->g_space;
2419         }
2420         v->v_order = g->g_size;
2421         v->v_gen = g->g_gen;
2422         g->g_vertices[g->g_size] = v;
2423         g->g_size++;
2424
2425         LIST_INIT(&v->v_outedges);
2426         LIST_INIT(&v->v_inedges);
2427         v->v_owner = lo;
2428
2429         return (v);
2430 }
2431
2432 static void
2433 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2434 {
2435         struct owner_vertex *w;
2436         int i;
2437
2438         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2439         
2440         KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2441         KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2442
2443         /*
2444          * Remove from the graph's array and close up the gap,
2445          * renumbering the other vertices.
2446          */
2447         for (i = v->v_order + 1; i < g->g_size; i++) {
2448                 w = g->g_vertices[i];
2449                 w->v_order--;
2450                 g->g_vertices[i - 1] = w;
2451         }
2452         g->g_size--;
2453
2454         free(v, M_LOCKF);
2455 }
2456
2457 static struct owner_graph *
2458 graph_init(struct owner_graph *g)
2459 {
2460
2461         g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2462             M_LOCKF, M_WAITOK);
2463         g->g_size = 0;
2464         g->g_space = 10;
2465         g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2466         g->g_gen = 0;
2467
2468         return (g);
2469 }
2470
2471 #ifdef LOCKF_DEBUG
2472 /*
2473  * Print description of a lock owner
2474  */
2475 static void
2476 lf_print_owner(struct lock_owner *lo)
2477 {
2478
2479         if (lo->lo_flags & F_REMOTE) {
2480                 printf("remote pid %d, system %d",
2481                     lo->lo_pid, lo->lo_sysid);
2482         } else if (lo->lo_flags & F_FLOCK) {
2483                 printf("file %p", lo->lo_id);
2484         } else {
2485                 printf("local pid %d", lo->lo_pid);
2486         }
2487 }
2488
2489 /*
2490  * Print out a lock.
2491  */
2492 static void
2493 lf_print(char *tag, struct lockf_entry *lock)
2494 {
2495
2496         printf("%s: lock %p for ", tag, (void *)lock);
2497         lf_print_owner(lock->lf_owner);
2498         if (lock->lf_inode != (struct inode *)0)
2499                 printf(" in ino %ju on dev <%s>,",
2500                     (uintmax_t)lock->lf_inode->i_number,
2501                     devtoname(lock->lf_inode->i_dev));
2502         printf(" %s, start %jd, end ",
2503             lock->lf_type == F_RDLCK ? "shared" :
2504             lock->lf_type == F_WRLCK ? "exclusive" :
2505             lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2506             (intmax_t)lock->lf_start);
2507         if (lock->lf_end == OFF_MAX)
2508                 printf("EOF");
2509         else
2510                 printf("%jd", (intmax_t)lock->lf_end);
2511         if (!LIST_EMPTY(&lock->lf_outedges))
2512                 printf(" block %p\n",
2513                     (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2514         else
2515                 printf("\n");
2516 }
2517
2518 static void
2519 lf_printlist(char *tag, struct lockf_entry *lock)
2520 {
2521         struct lockf_entry *lf, *blk;
2522         struct lockf_edge *e;
2523
2524         if (lock->lf_inode == (struct inode *)0)
2525                 return;
2526
2527         printf("%s: Lock list for ino %ju on dev <%s>:\n",
2528             tag, (uintmax_t)lock->lf_inode->i_number,
2529             devtoname(lock->lf_inode->i_dev));
2530         LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2531                 printf("\tlock %p for ",(void *)lf);
2532                 lf_print_owner(lock->lf_owner);
2533                 printf(", %s, start %jd, end %jd",
2534                     lf->lf_type == F_RDLCK ? "shared" :
2535                     lf->lf_type == F_WRLCK ? "exclusive" :
2536                     lf->lf_type == F_UNLCK ? "unlock" :
2537                     "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2538                 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2539                         blk = e->le_to;
2540                         printf("\n\t\tlock request %p for ", (void *)blk);
2541                         lf_print_owner(blk->lf_owner);
2542                         printf(", %s, start %jd, end %jd",
2543                             blk->lf_type == F_RDLCK ? "shared" :
2544                             blk->lf_type == F_WRLCK ? "exclusive" :
2545                             blk->lf_type == F_UNLCK ? "unlock" :
2546                             "unknown", (intmax_t)blk->lf_start,
2547                             (intmax_t)blk->lf_end);
2548                         if (!LIST_EMPTY(&blk->lf_inedges))
2549                                 panic("lf_printlist: bad list");
2550                 }
2551                 printf("\n");
2552         }
2553 }
2554 #endif /* LOCKF_DEBUG */