]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/kern/kern_lockf.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / kern / kern_lockf.c
1 /*-
2  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3  * Authors: Doug Rabson <dfr@rabson.org>
4  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 /*-
28  * Copyright (c) 1982, 1986, 1989, 1993
29  *      The Regents of the University of California.  All rights reserved.
30  *
31  * This code is derived from software contributed to Berkeley by
32  * Scooter Morris at Genentech Inc.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 4. Neither the name of the University nor the names of its contributors
43  *    may be used to endorse or promote products derived from this software
44  *    without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  *      @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
59  */
60
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
63
64 #include "opt_debug_lockf.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/hash.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
71 #include <sys/lock.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sx.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
82
83 #ifdef LOCKF_DEBUG
84 #include <sys/sysctl.h>
85
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
88
89 static int      lockf_debug = 0; /* control debug output */
90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
91 #endif
92
93 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
94
95 struct owner_edge;
96 struct owner_vertex;
97 struct owner_vertex_list;
98 struct owner_graph;
99
100 #define NOLOCKF (struct lockf_entry *)0
101 #define SELF    0x1
102 #define OTHERS  0x2
103 static void      lf_init(void *);
104 static int       lf_hash_owner(caddr_t, struct flock *, int);
105 static int       lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
106     int);
107 static struct lockf_entry *
108                  lf_alloc_lock(struct lock_owner *);
109 static int       lf_free_lock(struct lockf_entry *);
110 static int       lf_clearlock(struct lockf *, struct lockf_entry *);
111 static int       lf_overlaps(struct lockf_entry *, struct lockf_entry *);
112 static int       lf_blocks(struct lockf_entry *, struct lockf_entry *);
113 static void      lf_free_edge(struct lockf_edge *);
114 static struct lockf_edge *
115                  lf_alloc_edge(void);
116 static void      lf_alloc_vertex(struct lockf_entry *);
117 static int       lf_add_edge(struct lockf_entry *, struct lockf_entry *);
118 static void      lf_remove_edge(struct lockf_edge *);
119 static void      lf_remove_outgoing(struct lockf_entry *);
120 static void      lf_remove_incoming(struct lockf_entry *);
121 static int       lf_add_outgoing(struct lockf *, struct lockf_entry *);
122 static int       lf_add_incoming(struct lockf *, struct lockf_entry *);
123 static int       lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
124     int);
125 static struct lockf_entry *
126                  lf_getblock(struct lockf *, struct lockf_entry *);
127 static int       lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
128 static void      lf_insert_lock(struct lockf *, struct lockf_entry *);
129 static void      lf_wakeup_lock(struct lockf *, struct lockf_entry *);
130 static void      lf_update_dependancies(struct lockf *, struct lockf_entry *,
131     int all, struct lockf_entry_list *);
132 static void      lf_set_start(struct lockf *, struct lockf_entry *, off_t,
133         struct lockf_entry_list*);
134 static void      lf_set_end(struct lockf *, struct lockf_entry *, off_t,
135         struct lockf_entry_list*);
136 static int       lf_setlock(struct lockf *, struct lockf_entry *,
137     struct vnode *, void **cookiep);
138 static int       lf_cancel(struct lockf *, struct lockf_entry *, void *);
139 static void      lf_split(struct lockf *, struct lockf_entry *,
140     struct lockf_entry *, struct lockf_entry_list *);
141 #ifdef LOCKF_DEBUG
142 static int       graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
143     struct owner_vertex_list *path);
144 static void      graph_check(struct owner_graph *g, int checkorder);
145 static void      graph_print_vertices(struct owner_vertex_list *set);
146 #endif
147 static int       graph_delta_forward(struct owner_graph *g,
148     struct owner_vertex *x, struct owner_vertex *y,
149     struct owner_vertex_list *delta);
150 static int       graph_delta_backward(struct owner_graph *g,
151     struct owner_vertex *x, struct owner_vertex *y,
152     struct owner_vertex_list *delta);
153 static int       graph_add_indices(int *indices, int n,
154     struct owner_vertex_list *set);
155 static int       graph_assign_indices(struct owner_graph *g, int *indices,
156     int nextunused, struct owner_vertex_list *set);
157 static int       graph_add_edge(struct owner_graph *g,
158     struct owner_vertex *x, struct owner_vertex *y);
159 static void      graph_remove_edge(struct owner_graph *g,
160     struct owner_vertex *x, struct owner_vertex *y);
161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
162     struct lock_owner *lo);
163 static void      graph_free_vertex(struct owner_graph *g,
164     struct owner_vertex *v);
165 static struct owner_graph * graph_init(struct owner_graph *g);
166 #ifdef LOCKF_DEBUG
167 static void      lf_print(char *, struct lockf_entry *);
168 static void      lf_printlist(char *, struct lockf_entry *);
169 static void      lf_print_owner(struct lock_owner *);
170 #endif
171
172 /*
173  * This structure is used to keep track of both local and remote lock
174  * owners. The lf_owner field of the struct lockf_entry points back at
175  * the lock owner structure. Each possible lock owner (local proc for
176  * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
177  * pair for remote locks) is represented by a unique instance of
178  * struct lock_owner.
179  *
180  * If a lock owner has a lock that blocks some other lock or a lock
181  * that is waiting for some other lock, it also has a vertex in the
182  * owner_graph below.
183  *
184  * Locks:
185  * (s)          locked by state->ls_lock
186  * (S)          locked by lf_lock_states_lock
187  * (l)          locked by lf_lock_owners_lock
188  * (g)          locked by lf_owner_graph_lock
189  * (c)          const until freeing
190  */
191 #define LOCK_OWNER_HASH_SIZE    256
192
193 struct lock_owner {
194         LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
195         int     lo_refs;            /* (l) Number of locks referring to this */
196         int     lo_flags;           /* (c) Flags passwd to lf_advlock */
197         caddr_t lo_id;              /* (c) Id value passed to lf_advlock */
198         pid_t   lo_pid;             /* (c) Process Id of the lock owner */
199         int     lo_sysid;           /* (c) System Id of the lock owner */
200         struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
201 };
202
203 LIST_HEAD(lock_owner_list, lock_owner);
204
205 static struct sx                lf_lock_states_lock;
206 static struct lockf_list        lf_lock_states; /* (S) */
207 static struct sx                lf_lock_owners_lock;
208 static struct lock_owner_list   lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
209
210 /*
211  * Structures for deadlock detection.
212  *
213  * We have two types of directed graph, the first is the set of locks,
214  * both active and pending on a vnode. Within this graph, active locks
215  * are terminal nodes in the graph (i.e. have no out-going
216  * edges). Pending locks have out-going edges to each blocking active
217  * lock that prevents the lock from being granted and also to each
218  * older pending lock that would block them if it was active. The
219  * graph for each vnode is naturally acyclic; new edges are only ever
220  * added to or from new nodes (either new pending locks which only add
221  * out-going edges or new active locks which only add in-coming edges)
222  * therefore they cannot create loops in the lock graph.
223  *
224  * The second graph is a global graph of lock owners. Each lock owner
225  * is a vertex in that graph and an edge is added to the graph
226  * whenever an edge is added to a vnode graph, with end points
227  * corresponding to owner of the new pending lock and the owner of the
228  * lock upon which it waits. In order to prevent deadlock, we only add
229  * an edge to this graph if the new edge would not create a cycle.
230  * 
231  * The lock owner graph is topologically sorted, i.e. if a node has
232  * any outgoing edges, then it has an order strictly less than any
233  * node to which it has an outgoing edge. We preserve this ordering
234  * (and detect cycles) on edge insertion using Algorithm PK from the
235  * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
236  * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
237  * No. 1.7)
238  */
239 struct owner_vertex;
240
241 struct owner_edge {
242         LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
243         LIST_ENTRY(owner_edge) e_inlink;  /* (g) link to's in-edge list */
244         int             e_refs;           /* (g) number of times added */
245         struct owner_vertex *e_from;      /* (c) out-going from here */
246         struct owner_vertex *e_to;        /* (c) in-coming to here */
247 };
248 LIST_HEAD(owner_edge_list, owner_edge);
249
250 struct owner_vertex {
251         TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
252         uint32_t        v_gen;            /* (g) workspace for edge insertion */
253         int             v_order;          /* (g) order of vertex in graph */
254         struct owner_edge_list v_outedges;/* (g) list of out-edges */
255         struct owner_edge_list v_inedges; /* (g) list of in-edges */
256         struct lock_owner *v_owner;       /* (c) corresponding lock owner */
257 };
258 TAILQ_HEAD(owner_vertex_list, owner_vertex);
259
260 struct owner_graph {
261         struct owner_vertex** g_vertices; /* (g) pointers to vertices */
262         int             g_size;           /* (g) number of vertices */
263         int             g_space;          /* (g) space allocated for vertices */
264         int             *g_indexbuf;      /* (g) workspace for loop detection */
265         uint32_t        g_gen;            /* (g) increment when re-ordering */
266 };
267
268 static struct sx                lf_owner_graph_lock;
269 static struct owner_graph       lf_owner_graph;
270
271 /*
272  * Initialise various structures and locks.
273  */
274 static void
275 lf_init(void *dummy)
276 {
277         int i;
278
279         sx_init(&lf_lock_states_lock, "lock states lock");
280         LIST_INIT(&lf_lock_states);
281
282         sx_init(&lf_lock_owners_lock, "lock owners lock");
283         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
284                 LIST_INIT(&lf_lock_owners[i]);
285
286         sx_init(&lf_owner_graph_lock, "owner graph lock");
287         graph_init(&lf_owner_graph);
288 }
289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
290
291 /*
292  * Generate a hash value for a lock owner.
293  */
294 static int
295 lf_hash_owner(caddr_t id, struct flock *fl, int flags)
296 {
297         uint32_t h;
298
299         if (flags & F_REMOTE) {
300                 h = HASHSTEP(0, fl->l_pid);
301                 h = HASHSTEP(h, fl->l_sysid);
302         } else if (flags & F_FLOCK) {
303                 h = ((uintptr_t) id) >> 7;
304         } else {
305                 struct proc *p = (struct proc *) id;
306                 h = HASHSTEP(0, p->p_pid);
307                 h = HASHSTEP(h, 0);
308         }
309
310         return (h % LOCK_OWNER_HASH_SIZE);
311 }
312
313 /*
314  * Return true if a lock owner matches the details passed to
315  * lf_advlock.
316  */
317 static int
318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
319     int flags)
320 {
321         if (flags & F_REMOTE) {
322                 return lo->lo_pid == fl->l_pid
323                         && lo->lo_sysid == fl->l_sysid;
324         } else {
325                 return lo->lo_id == id;
326         }
327 }
328
329 static struct lockf_entry *
330 lf_alloc_lock(struct lock_owner *lo)
331 {
332         struct lockf_entry *lf;
333
334         lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
335
336 #ifdef LOCKF_DEBUG
337         if (lockf_debug & 4)
338                 printf("Allocated lock %p\n", lf);
339 #endif
340         if (lo) {
341                 sx_xlock(&lf_lock_owners_lock);
342                 lo->lo_refs++;
343                 sx_xunlock(&lf_lock_owners_lock);
344                 lf->lf_owner = lo;
345         }
346
347         return (lf);
348 }
349
350 static int
351 lf_free_lock(struct lockf_entry *lock)
352 {
353
354         KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
355         if (--lock->lf_refs > 0)
356                 return (0);
357         /*
358          * Adjust the lock_owner reference count and
359          * reclaim the entry if this is the last lock
360          * for that owner.
361          */
362         struct lock_owner *lo = lock->lf_owner;
363         if (lo) {
364                 KASSERT(LIST_EMPTY(&lock->lf_outedges),
365                     ("freeing lock with dependancies"));
366                 KASSERT(LIST_EMPTY(&lock->lf_inedges),
367                     ("freeing lock with dependants"));
368                 sx_xlock(&lf_lock_owners_lock);
369                 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
370                 lo->lo_refs--;
371                 if (lo->lo_refs == 0) {
372 #ifdef LOCKF_DEBUG
373                         if (lockf_debug & 1)
374                                 printf("lf_free_lock: freeing lock owner %p\n",
375                                     lo);
376 #endif
377                         if (lo->lo_vertex) {
378                                 sx_xlock(&lf_owner_graph_lock);
379                                 graph_free_vertex(&lf_owner_graph,
380                                     lo->lo_vertex);
381                                 sx_xunlock(&lf_owner_graph_lock);
382                         }
383                         LIST_REMOVE(lo, lo_link);
384                         free(lo, M_LOCKF);
385 #ifdef LOCKF_DEBUG
386                         if (lockf_debug & 4)
387                                 printf("Freed lock owner %p\n", lo);
388 #endif
389                 }
390                 sx_unlock(&lf_lock_owners_lock);
391         }
392         if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
393                 vrele(lock->lf_vnode);
394                 lock->lf_vnode = NULL;
395         }
396 #ifdef LOCKF_DEBUG
397         if (lockf_debug & 4)
398                 printf("Freed lock %p\n", lock);
399 #endif
400         free(lock, M_LOCKF);
401         return (1);
402 }
403
404 /*
405  * Advisory record locking support
406  */
407 int
408 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
409     u_quad_t size)
410 {
411         struct lockf *state, *freestate = NULL;
412         struct flock *fl = ap->a_fl;
413         struct lockf_entry *lock;
414         struct vnode *vp = ap->a_vp;
415         caddr_t id = ap->a_id;
416         int flags = ap->a_flags;
417         int hash;
418         struct lock_owner *lo;
419         off_t start, end, oadd;
420         int error;
421
422         /*
423          * Handle the F_UNLKSYS case first - no need to mess about
424          * creating a lock owner for this one.
425          */
426         if (ap->a_op == F_UNLCKSYS) {
427                 lf_clearremotesys(fl->l_sysid);
428                 return (0);
429         }
430
431         /*
432          * Convert the flock structure into a start and end.
433          */
434         switch (fl->l_whence) {
435
436         case SEEK_SET:
437         case SEEK_CUR:
438                 /*
439                  * Caller is responsible for adding any necessary offset
440                  * when SEEK_CUR is used.
441                  */
442                 start = fl->l_start;
443                 break;
444
445         case SEEK_END:
446                 if (size > OFF_MAX ||
447                     (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
448                         return (EOVERFLOW);
449                 start = size + fl->l_start;
450                 break;
451
452         default:
453                 return (EINVAL);
454         }
455         if (start < 0)
456                 return (EINVAL);
457         if (fl->l_len < 0) {
458                 if (start == 0)
459                         return (EINVAL);
460                 end = start - 1;
461                 start += fl->l_len;
462                 if (start < 0)
463                         return (EINVAL);
464         } else if (fl->l_len == 0) {
465                 end = OFF_MAX;
466         } else {
467                 oadd = fl->l_len - 1;
468                 if (oadd > OFF_MAX - start)
469                         return (EOVERFLOW);
470                 end = start + oadd;
471         }
472         /*
473          * Avoid the common case of unlocking when inode has no locks.
474          */
475         VI_LOCK(vp);
476         if ((*statep) == NULL) {
477                 if (ap->a_op != F_SETLK) {
478                         fl->l_type = F_UNLCK;
479                         VI_UNLOCK(vp);
480                         return (0);
481                 }
482         }
483         VI_UNLOCK(vp);
484
485         /*
486          * Map our arguments to an existing lock owner or create one
487          * if this is the first time we have seen this owner.
488          */
489         hash = lf_hash_owner(id, fl, flags);
490         sx_xlock(&lf_lock_owners_lock);
491         LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
492                 if (lf_owner_matches(lo, id, fl, flags))
493                         break;
494         if (!lo) {
495                 /*
496                  * We initialise the lock with a reference
497                  * count which matches the new lockf_entry
498                  * structure created below.
499                  */
500                 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
501                     M_WAITOK|M_ZERO);
502 #ifdef LOCKF_DEBUG
503                 if (lockf_debug & 4)
504                         printf("Allocated lock owner %p\n", lo);
505 #endif
506
507                 lo->lo_refs = 1;
508                 lo->lo_flags = flags;
509                 lo->lo_id = id;
510                 if (flags & F_REMOTE) {
511                         lo->lo_pid = fl->l_pid;
512                         lo->lo_sysid = fl->l_sysid;
513                 } else if (flags & F_FLOCK) {
514                         lo->lo_pid = -1;
515                         lo->lo_sysid = 0;
516                 } else {
517                         struct proc *p = (struct proc *) id;
518                         lo->lo_pid = p->p_pid;
519                         lo->lo_sysid = 0;
520                 }
521                 lo->lo_vertex = NULL;
522
523 #ifdef LOCKF_DEBUG
524                 if (lockf_debug & 1) {
525                         printf("lf_advlockasync: new lock owner %p ", lo);
526                         lf_print_owner(lo);
527                         printf("\n");
528                 }
529 #endif
530
531                 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
532         } else {
533                 /*
534                  * We have seen this lock owner before, increase its
535                  * reference count to account for the new lockf_entry
536                  * structure we create below.
537                  */
538                 lo->lo_refs++;
539         }
540         sx_xunlock(&lf_lock_owners_lock);
541
542         /*
543          * Create the lockf structure. We initialise the lf_owner
544          * field here instead of in lf_alloc_lock() to avoid paying
545          * the lf_lock_owners_lock tax twice.
546          */
547         lock = lf_alloc_lock(NULL);
548         lock->lf_refs = 1;
549         lock->lf_start = start;
550         lock->lf_end = end;
551         lock->lf_owner = lo;
552         lock->lf_vnode = vp;
553         if (flags & F_REMOTE) {
554                 /*
555                  * For remote locks, the caller may release its ref to
556                  * the vnode at any time - we have to ref it here to
557                  * prevent it from being recycled unexpectedly.
558                  */
559                 vref(vp);
560         }
561
562         /*
563          * XXX The problem is that VTOI is ufs specific, so it will
564          * break LOCKF_DEBUG for all other FS's other than UFS because
565          * it casts the vnode->data ptr to struct inode *.
566          */
567 /*      lock->lf_inode = VTOI(ap->a_vp); */
568         lock->lf_inode = (struct inode *)0;
569         lock->lf_type = fl->l_type;
570         LIST_INIT(&lock->lf_outedges);
571         LIST_INIT(&lock->lf_inedges);
572         lock->lf_async_task = ap->a_task;
573         lock->lf_flags = ap->a_flags;
574
575         /*
576          * Do the requested operation. First find our state structure
577          * and create a new one if necessary - the caller's *statep
578          * variable and the state's ls_threads count is protected by
579          * the vnode interlock.
580          */
581         VI_LOCK(vp);
582         if (vp->v_iflag & VI_DOOMED) {
583                 VI_UNLOCK(vp);
584                 lf_free_lock(lock);
585                 return (ENOENT);
586         }
587
588         /*
589          * Allocate a state structure if necessary.
590          */
591         state = *statep;
592         if (state == NULL) {
593                 struct lockf *ls;
594
595                 VI_UNLOCK(vp);
596
597                 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
598                 sx_init(&ls->ls_lock, "ls_lock");
599                 LIST_INIT(&ls->ls_active);
600                 LIST_INIT(&ls->ls_pending);
601                 ls->ls_threads = 1;
602
603                 sx_xlock(&lf_lock_states_lock);
604                 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
605                 sx_xunlock(&lf_lock_states_lock);
606
607                 /*
608                  * Cope if we lost a race with some other thread while
609                  * trying to allocate memory.
610                  */
611                 VI_LOCK(vp);
612                 if (vp->v_iflag & VI_DOOMED) {
613                         VI_UNLOCK(vp);
614                         sx_xlock(&lf_lock_states_lock);
615                         LIST_REMOVE(ls, ls_link);
616                         sx_xunlock(&lf_lock_states_lock);
617                         sx_destroy(&ls->ls_lock);
618                         free(ls, M_LOCKF);
619                         lf_free_lock(lock);
620                         return (ENOENT);
621                 }
622                 if ((*statep) == NULL) {
623                         state = *statep = ls;
624                         VI_UNLOCK(vp);
625                 } else {
626                         state = *statep;
627                         state->ls_threads++;
628                         VI_UNLOCK(vp);
629
630                         sx_xlock(&lf_lock_states_lock);
631                         LIST_REMOVE(ls, ls_link);
632                         sx_xunlock(&lf_lock_states_lock);
633                         sx_destroy(&ls->ls_lock);
634                         free(ls, M_LOCKF);
635                 }
636         } else {
637                 state->ls_threads++;
638                 VI_UNLOCK(vp);
639         }
640
641         sx_xlock(&state->ls_lock);
642         /*
643          * Recheck the doomed vnode after state->ls_lock is
644          * locked. lf_purgelocks() requires that no new threads add
645          * pending locks when vnode is marked by VI_DOOMED flag.
646          */
647         VI_LOCK(vp);
648         if (vp->v_iflag & VI_DOOMED) {
649                 state->ls_threads--;
650                 wakeup(state);
651                 VI_UNLOCK(vp);
652                 sx_xunlock(&state->ls_lock);
653                 lf_free_lock(lock);
654                 return (ENOENT);
655         }
656         VI_UNLOCK(vp);
657
658         switch (ap->a_op) {
659         case F_SETLK:
660                 error = lf_setlock(state, lock, vp, ap->a_cookiep);
661                 break;
662
663         case F_UNLCK:
664                 error = lf_clearlock(state, lock);
665                 lf_free_lock(lock);
666                 break;
667
668         case F_GETLK:
669                 error = lf_getlock(state, lock, fl);
670                 lf_free_lock(lock);
671                 break;
672
673         case F_CANCEL:
674                 if (ap->a_cookiep)
675                         error = lf_cancel(state, lock, *ap->a_cookiep);
676                 else
677                         error = EINVAL;
678                 lf_free_lock(lock);
679                 break;
680
681         default:
682                 lf_free_lock(lock);
683                 error = EINVAL;
684                 break;
685         }
686
687 #ifdef INVARIANTS
688         /*
689          * Check for some can't happen stuff. In this case, the active
690          * lock list becoming disordered or containing mutually
691          * blocking locks. We also check the pending list for locks
692          * which should be active (i.e. have no out-going edges).
693          */
694         LIST_FOREACH(lock, &state->ls_active, lf_link) {
695                 struct lockf_entry *lf;
696                 if (LIST_NEXT(lock, lf_link))
697                         KASSERT((lock->lf_start
698                                 <= LIST_NEXT(lock, lf_link)->lf_start),
699                             ("locks disordered"));
700                 LIST_FOREACH(lf, &state->ls_active, lf_link) {
701                         if (lock == lf)
702                                 break;
703                         KASSERT(!lf_blocks(lock, lf),
704                             ("two conflicting active locks"));
705                         if (lock->lf_owner == lf->lf_owner)
706                                 KASSERT(!lf_overlaps(lock, lf),
707                                     ("two overlapping locks from same owner"));
708                 }
709         }
710         LIST_FOREACH(lock, &state->ls_pending, lf_link) {
711                 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
712                     ("pending lock which should be active"));
713         }
714 #endif
715         sx_xunlock(&state->ls_lock);
716
717         /*
718          * If we have removed the last active lock on the vnode and
719          * this is the last thread that was in-progress, we can free
720          * the state structure. We update the caller's pointer inside
721          * the vnode interlock but call free outside.
722          *
723          * XXX alternatively, keep the state structure around until
724          * the filesystem recycles - requires a callback from the
725          * filesystem.
726          */
727         VI_LOCK(vp);
728
729         state->ls_threads--;
730         wakeup(state);
731         if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
732                 KASSERT(LIST_EMPTY(&state->ls_pending),
733                     ("freeing state with pending locks"));
734                 freestate = state;
735                 *statep = NULL;
736         }
737
738         VI_UNLOCK(vp);
739
740         if (freestate) {
741                 sx_xlock(&lf_lock_states_lock);
742                 LIST_REMOVE(freestate, ls_link);
743                 sx_xunlock(&lf_lock_states_lock);
744                 sx_destroy(&freestate->ls_lock);
745                 free(freestate, M_LOCKF);
746         }
747         return (error);
748 }
749
750 int
751 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
752 {
753         struct vop_advlockasync_args a;
754
755         a.a_vp = ap->a_vp;
756         a.a_id = ap->a_id;
757         a.a_op = ap->a_op;
758         a.a_fl = ap->a_fl;
759         a.a_flags = ap->a_flags;
760         a.a_task = NULL;
761         a.a_cookiep = NULL;
762
763         return (lf_advlockasync(&a, statep, size));
764 }
765
766 void
767 lf_purgelocks(struct vnode *vp, struct lockf **statep)
768 {
769         struct lockf *state;
770         struct lockf_entry *lock, *nlock;
771
772         /*
773          * For this to work correctly, the caller must ensure that no
774          * other threads enter the locking system for this vnode,
775          * e.g. by checking VI_DOOMED. We wake up any threads that are
776          * sleeping waiting for locks on this vnode and then free all
777          * the remaining locks.
778          */
779         VI_LOCK(vp);
780         KASSERT(vp->v_iflag & VI_DOOMED,
781             ("lf_purgelocks: vp %p has not vgone yet", vp));
782         state = *statep;
783         if (state) {
784                 *statep = NULL;
785                 state->ls_threads++;
786                 VI_UNLOCK(vp);
787
788                 sx_xlock(&state->ls_lock);
789                 sx_xlock(&lf_owner_graph_lock);
790                 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
791                         LIST_REMOVE(lock, lf_link);
792                         lf_remove_outgoing(lock);
793                         lf_remove_incoming(lock);
794
795                         /*
796                          * If its an async lock, we can just free it
797                          * here, otherwise we let the sleeping thread
798                          * free it.
799                          */
800                         if (lock->lf_async_task) {
801                                 lf_free_lock(lock);
802                         } else {
803                                 lock->lf_flags |= F_INTR;
804                                 wakeup(lock);
805                         }
806                 }
807                 sx_xunlock(&lf_owner_graph_lock);
808                 sx_xunlock(&state->ls_lock);
809
810                 /*
811                  * Wait for all other threads, sleeping and otherwise
812                  * to leave.
813                  */
814                 VI_LOCK(vp);
815                 while (state->ls_threads > 1)
816                         msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
817                 VI_UNLOCK(vp);
818
819                 /*
820                  * We can just free all the active locks since they
821                  * will have no dependancies (we removed them all
822                  * above). We don't need to bother locking since we
823                  * are the last thread using this state structure.
824                  */
825                 KASSERT(LIST_EMPTY(&state->ls_pending),
826                     ("lock pending for %p", state));
827                 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
828                         LIST_REMOVE(lock, lf_link);
829                         lf_free_lock(lock);
830                 }
831                 sx_xlock(&lf_lock_states_lock);
832                 LIST_REMOVE(state, ls_link);
833                 sx_xunlock(&lf_lock_states_lock);
834                 sx_destroy(&state->ls_lock);
835                 free(state, M_LOCKF);
836         } else {
837                 VI_UNLOCK(vp);
838         }
839 }
840
841 /*
842  * Return non-zero if locks 'x' and 'y' overlap.
843  */
844 static int
845 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
846 {
847
848         return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
849 }
850
851 /*
852  * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
853  */
854 static int
855 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
856 {
857
858         return x->lf_owner != y->lf_owner
859                 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
860                 && lf_overlaps(x, y);
861 }
862
863 /*
864  * Allocate a lock edge from the free list
865  */
866 static struct lockf_edge *
867 lf_alloc_edge(void)
868 {
869
870         return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
871 }
872
873 /*
874  * Free a lock edge.
875  */
876 static void
877 lf_free_edge(struct lockf_edge *e)
878 {
879
880         free(e, M_LOCKF);
881 }
882
883
884 /*
885  * Ensure that the lock's owner has a corresponding vertex in the
886  * owner graph.
887  */
888 static void
889 lf_alloc_vertex(struct lockf_entry *lock)
890 {
891         struct owner_graph *g = &lf_owner_graph;
892
893         if (!lock->lf_owner->lo_vertex)
894                 lock->lf_owner->lo_vertex =
895                         graph_alloc_vertex(g, lock->lf_owner);
896 }
897
898 /*
899  * Attempt to record an edge from lock x to lock y. Return EDEADLK if
900  * the new edge would cause a cycle in the owner graph.
901  */
902 static int
903 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
904 {
905         struct owner_graph *g = &lf_owner_graph;
906         struct lockf_edge *e;
907         int error;
908
909 #ifdef INVARIANTS
910         LIST_FOREACH(e, &x->lf_outedges, le_outlink)
911                 KASSERT(e->le_to != y, ("adding lock edge twice"));
912 #endif
913
914         /*
915          * Make sure the two owners have entries in the owner graph.
916          */
917         lf_alloc_vertex(x);
918         lf_alloc_vertex(y);
919
920         error = graph_add_edge(g, x->lf_owner->lo_vertex,
921             y->lf_owner->lo_vertex);
922         if (error)
923                 return (error);
924
925         e = lf_alloc_edge();
926         LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
927         LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
928         e->le_from = x;
929         e->le_to = y;
930
931         return (0);
932 }
933
934 /*
935  * Remove an edge from the lock graph.
936  */
937 static void
938 lf_remove_edge(struct lockf_edge *e)
939 {
940         struct owner_graph *g = &lf_owner_graph;
941         struct lockf_entry *x = e->le_from;
942         struct lockf_entry *y = e->le_to;
943
944         graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
945         LIST_REMOVE(e, le_outlink);
946         LIST_REMOVE(e, le_inlink);
947         e->le_from = NULL;
948         e->le_to = NULL;
949         lf_free_edge(e);
950 }
951
952 /*
953  * Remove all out-going edges from lock x.
954  */
955 static void
956 lf_remove_outgoing(struct lockf_entry *x)
957 {
958         struct lockf_edge *e;
959
960         while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
961                 lf_remove_edge(e);
962         }
963 }
964
965 /*
966  * Remove all in-coming edges from lock x.
967  */
968 static void
969 lf_remove_incoming(struct lockf_entry *x)
970 {
971         struct lockf_edge *e;
972
973         while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
974                 lf_remove_edge(e);
975         }
976 }
977
978 /*
979  * Walk the list of locks for the file and create an out-going edge
980  * from lock to each blocking lock.
981  */
982 static int
983 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
984 {
985         struct lockf_entry *overlap;
986         int error;
987
988         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
989                 /*
990                  * We may assume that the active list is sorted by
991                  * lf_start.
992                  */
993                 if (overlap->lf_start > lock->lf_end)
994                         break;
995                 if (!lf_blocks(lock, overlap))
996                         continue;
997
998                 /*
999                  * We've found a blocking lock. Add the corresponding
1000                  * edge to the graphs and see if it would cause a
1001                  * deadlock.
1002                  */
1003                 error = lf_add_edge(lock, overlap);
1004
1005                 /*
1006                  * The only error that lf_add_edge returns is EDEADLK.
1007                  * Remove any edges we added and return the error.
1008                  */
1009                 if (error) {
1010                         lf_remove_outgoing(lock);
1011                         return (error);
1012                 }
1013         }
1014
1015         /*
1016          * We also need to add edges to sleeping locks that block
1017          * us. This ensures that lf_wakeup_lock cannot grant two
1018          * mutually blocking locks simultaneously and also enforces a
1019          * 'first come, first served' fairness model. Note that this
1020          * only happens if we are blocked by at least one active lock
1021          * due to the call to lf_getblock in lf_setlock below.
1022          */
1023         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1024                 if (!lf_blocks(lock, overlap))
1025                         continue;
1026                 /*
1027                  * We've found a blocking lock. Add the corresponding
1028                  * edge to the graphs and see if it would cause a
1029                  * deadlock.
1030                  */
1031                 error = lf_add_edge(lock, overlap);
1032
1033                 /*
1034                  * The only error that lf_add_edge returns is EDEADLK.
1035                  * Remove any edges we added and return the error.
1036                  */
1037                 if (error) {
1038                         lf_remove_outgoing(lock);
1039                         return (error);
1040                 }
1041         }
1042
1043         return (0);
1044 }
1045
1046 /*
1047  * Walk the list of pending locks for the file and create an in-coming
1048  * edge from lock to each blocking lock.
1049  */
1050 static int
1051 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1052 {
1053         struct lockf_entry *overlap;
1054         int error;
1055
1056         LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1057                 if (!lf_blocks(lock, overlap))
1058                         continue;
1059
1060                 /*
1061                  * We've found a blocking lock. Add the corresponding
1062                  * edge to the graphs and see if it would cause a
1063                  * deadlock.
1064                  */
1065                 error = lf_add_edge(overlap, lock);
1066
1067                 /*
1068                  * The only error that lf_add_edge returns is EDEADLK.
1069                  * Remove any edges we added and return the error.
1070                  */
1071                 if (error) {
1072                         lf_remove_incoming(lock);
1073                         return (error);
1074                 }
1075         }
1076         return (0);
1077 }
1078
1079 /*
1080  * Insert lock into the active list, keeping list entries ordered by
1081  * increasing values of lf_start.
1082  */
1083 static void
1084 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1085 {
1086         struct lockf_entry *lf, *lfprev;
1087
1088         if (LIST_EMPTY(&state->ls_active)) {
1089                 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1090                 return;
1091         }
1092
1093         lfprev = NULL;
1094         LIST_FOREACH(lf, &state->ls_active, lf_link) {
1095                 if (lf->lf_start > lock->lf_start) {
1096                         LIST_INSERT_BEFORE(lf, lock, lf_link);
1097                         return;
1098                 }
1099                 lfprev = lf;
1100         }
1101         LIST_INSERT_AFTER(lfprev, lock, lf_link);
1102 }
1103
1104 /*
1105  * Wake up a sleeping lock and remove it from the pending list now
1106  * that all its dependancies have been resolved. The caller should
1107  * arrange for the lock to be added to the active list, adjusting any
1108  * existing locks for the same owner as needed.
1109  */
1110 static void
1111 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1112 {
1113
1114         /*
1115          * Remove from ls_pending list and wake up the caller
1116          * or start the async notification, as appropriate.
1117          */
1118         LIST_REMOVE(wakelock, lf_link);
1119 #ifdef LOCKF_DEBUG
1120         if (lockf_debug & 1)
1121                 lf_print("lf_wakeup_lock: awakening", wakelock);
1122 #endif /* LOCKF_DEBUG */
1123         if (wakelock->lf_async_task) {
1124                 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1125         } else {
1126                 wakeup(wakelock);
1127         }
1128 }
1129
1130 /*
1131  * Re-check all dependant locks and remove edges to locks that we no
1132  * longer block. If 'all' is non-zero, the lock has been removed and
1133  * we must remove all the dependancies, otherwise it has simply been
1134  * reduced but remains active. Any pending locks which have been been
1135  * unblocked are added to 'granted'
1136  */
1137 static void
1138 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1139         struct lockf_entry_list *granted)
1140 {
1141         struct lockf_edge *e, *ne;
1142         struct lockf_entry *deplock;
1143
1144         LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1145                 deplock = e->le_from;
1146                 if (all || !lf_blocks(lock, deplock)) {
1147                         sx_xlock(&lf_owner_graph_lock);
1148                         lf_remove_edge(e);
1149                         sx_xunlock(&lf_owner_graph_lock);
1150                         if (LIST_EMPTY(&deplock->lf_outedges)) {
1151                                 lf_wakeup_lock(state, deplock);
1152                                 LIST_INSERT_HEAD(granted, deplock, lf_link);
1153                         }
1154                 }
1155         }
1156 }
1157
1158 /*
1159  * Set the start of an existing active lock, updating dependancies and
1160  * adding any newly woken locks to 'granted'.
1161  */
1162 static void
1163 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1164         struct lockf_entry_list *granted)
1165 {
1166
1167         KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1168         lock->lf_start = new_start;
1169         LIST_REMOVE(lock, lf_link);
1170         lf_insert_lock(state, lock);
1171         lf_update_dependancies(state, lock, FALSE, granted);
1172 }
1173
1174 /*
1175  * Set the end of an existing active lock, updating dependancies and
1176  * adding any newly woken locks to 'granted'.
1177  */
1178 static void
1179 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1180         struct lockf_entry_list *granted)
1181 {
1182
1183         KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1184         lock->lf_end = new_end;
1185         lf_update_dependancies(state, lock, FALSE, granted);
1186 }
1187
1188 /*
1189  * Add a lock to the active list, updating or removing any current
1190  * locks owned by the same owner and processing any pending locks that
1191  * become unblocked as a result. This code is also used for unlock
1192  * since the logic for updating existing locks is identical.
1193  *
1194  * As a result of processing the new lock, we may unblock existing
1195  * pending locks as a result of downgrading/unlocking. We simply
1196  * activate the newly granted locks by looping.
1197  *
1198  * Since the new lock already has its dependancies set up, we always
1199  * add it to the list (unless its an unlock request). This may
1200  * fragment the lock list in some pathological cases but its probably
1201  * not a real problem.
1202  */
1203 static void
1204 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1205 {
1206         struct lockf_entry *overlap, *lf;
1207         struct lockf_entry_list granted;
1208         int ovcase;
1209
1210         LIST_INIT(&granted);
1211         LIST_INSERT_HEAD(&granted, lock, lf_link);
1212
1213         while (!LIST_EMPTY(&granted)) {
1214                 lock = LIST_FIRST(&granted);
1215                 LIST_REMOVE(lock, lf_link);
1216
1217                 /*
1218                  * Skip over locks owned by other processes.  Handle
1219                  * any locks that overlap and are owned by ourselves.
1220                  */
1221                 overlap = LIST_FIRST(&state->ls_active);
1222                 for (;;) {
1223                         ovcase = lf_findoverlap(&overlap, lock, SELF);
1224
1225 #ifdef LOCKF_DEBUG
1226                         if (ovcase && (lockf_debug & 2)) {
1227                                 printf("lf_setlock: overlap %d", ovcase);
1228                                 lf_print("", overlap);
1229                         }
1230 #endif
1231                         /*
1232                          * Six cases:
1233                          *      0) no overlap
1234                          *      1) overlap == lock
1235                          *      2) overlap contains lock
1236                          *      3) lock contains overlap
1237                          *      4) overlap starts before lock
1238                          *      5) overlap ends after lock
1239                          */
1240                         switch (ovcase) {
1241                         case 0: /* no overlap */
1242                                 break;
1243
1244                         case 1: /* overlap == lock */
1245                                 /*
1246                                  * We have already setup the
1247                                  * dependants for the new lock, taking
1248                                  * into account a possible downgrade
1249                                  * or unlock. Remove the old lock.
1250                                  */
1251                                 LIST_REMOVE(overlap, lf_link);
1252                                 lf_update_dependancies(state, overlap, TRUE,
1253                                         &granted);
1254                                 lf_free_lock(overlap);
1255                                 break;
1256
1257                         case 2: /* overlap contains lock */
1258                                 /*
1259                                  * Just split the existing lock.
1260                                  */
1261                                 lf_split(state, overlap, lock, &granted);
1262                                 break;
1263
1264                         case 3: /* lock contains overlap */
1265                                 /*
1266                                  * Delete the overlap and advance to
1267                                  * the next entry in the list.
1268                                  */
1269                                 lf = LIST_NEXT(overlap, lf_link);
1270                                 LIST_REMOVE(overlap, lf_link);
1271                                 lf_update_dependancies(state, overlap, TRUE,
1272                                         &granted);
1273                                 lf_free_lock(overlap);
1274                                 overlap = lf;
1275                                 continue;
1276
1277                         case 4: /* overlap starts before lock */
1278                                 /*
1279                                  * Just update the overlap end and
1280                                  * move on.
1281                                  */
1282                                 lf_set_end(state, overlap, lock->lf_start - 1,
1283                                     &granted);
1284                                 overlap = LIST_NEXT(overlap, lf_link);
1285                                 continue;
1286
1287                         case 5: /* overlap ends after lock */
1288                                 /*
1289                                  * Change the start of overlap and
1290                                  * re-insert.
1291                                  */
1292                                 lf_set_start(state, overlap, lock->lf_end + 1,
1293                                     &granted);
1294                                 break;
1295                         }
1296                         break;
1297                 }
1298 #ifdef LOCKF_DEBUG
1299                 if (lockf_debug & 1) {
1300                         if (lock->lf_type != F_UNLCK)
1301                                 lf_print("lf_activate_lock: activated", lock);
1302                         else
1303                                 lf_print("lf_activate_lock: unlocked", lock);
1304                         lf_printlist("lf_activate_lock", lock);
1305                 }
1306 #endif /* LOCKF_DEBUG */
1307                 if (lock->lf_type != F_UNLCK)
1308                         lf_insert_lock(state, lock);
1309         }
1310 }
1311
1312 /*
1313  * Cancel a pending lock request, either as a result of a signal or a
1314  * cancel request for an async lock.
1315  */
1316 static void
1317 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1318 {
1319         struct lockf_entry_list granted;
1320
1321         /*
1322          * Note it is theoretically possible that cancelling this lock
1323          * may allow some other pending lock to become
1324          * active. Consider this case:
1325          *
1326          * Owner        Action          Result          Dependancies
1327          * 
1328          * A:           lock [0..0]     succeeds        
1329          * B:           lock [2..2]     succeeds        
1330          * C:           lock [1..2]     blocked         C->B
1331          * D:           lock [0..1]     blocked         C->B,D->A,D->C
1332          * A:           unlock [0..0]                   C->B,D->C
1333          * C:           cancel [1..2]   
1334          */
1335
1336         LIST_REMOVE(lock, lf_link);
1337
1338         /*
1339          * Removing out-going edges is simple.
1340          */
1341         sx_xlock(&lf_owner_graph_lock);
1342         lf_remove_outgoing(lock);
1343         sx_xunlock(&lf_owner_graph_lock);
1344
1345         /*
1346          * Removing in-coming edges may allow some other lock to
1347          * become active - we use lf_update_dependancies to figure
1348          * this out.
1349          */
1350         LIST_INIT(&granted);
1351         lf_update_dependancies(state, lock, TRUE, &granted);
1352         lf_free_lock(lock);
1353
1354         /*
1355          * Feed any newly active locks to lf_activate_lock.
1356          */
1357         while (!LIST_EMPTY(&granted)) {
1358                 lock = LIST_FIRST(&granted);
1359                 LIST_REMOVE(lock, lf_link);
1360                 lf_activate_lock(state, lock);
1361         }
1362 }
1363
1364 /*
1365  * Set a byte-range lock.
1366  */
1367 static int
1368 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1369     void **cookiep)
1370 {
1371         static char lockstr[] = "lockf";
1372         int priority, error;
1373
1374 #ifdef LOCKF_DEBUG
1375         if (lockf_debug & 1)
1376                 lf_print("lf_setlock", lock);
1377 #endif /* LOCKF_DEBUG */
1378
1379         /*
1380          * Set the priority
1381          */
1382         priority = PLOCK;
1383         if (lock->lf_type == F_WRLCK)
1384                 priority += 4;
1385         if (!(lock->lf_flags & F_NOINTR))
1386                 priority |= PCATCH;
1387         /*
1388          * Scan lock list for this file looking for locks that would block us.
1389          */
1390         if (lf_getblock(state, lock)) {
1391                 /*
1392                  * Free the structure and return if nonblocking.
1393                  */
1394                 if ((lock->lf_flags & F_WAIT) == 0
1395                     && lock->lf_async_task == NULL) {
1396                         lf_free_lock(lock);
1397                         error = EAGAIN;
1398                         goto out;
1399                 }
1400
1401                 /*
1402                  * For flock type locks, we must first remove
1403                  * any shared locks that we hold before we sleep
1404                  * waiting for an exclusive lock.
1405                  */
1406                 if ((lock->lf_flags & F_FLOCK) &&
1407                     lock->lf_type == F_WRLCK) {
1408                         lock->lf_type = F_UNLCK;
1409                         lf_activate_lock(state, lock);
1410                         lock->lf_type = F_WRLCK;
1411                 }
1412
1413                 /*
1414                  * We are blocked. Create edges to each blocking lock,
1415                  * checking for deadlock using the owner graph. For
1416                  * simplicity, we run deadlock detection for all
1417                  * locks, posix and otherwise.
1418                  */
1419                 sx_xlock(&lf_owner_graph_lock);
1420                 error = lf_add_outgoing(state, lock);
1421                 sx_xunlock(&lf_owner_graph_lock);
1422
1423                 if (error) {
1424 #ifdef LOCKF_DEBUG
1425                         if (lockf_debug & 1)
1426                                 lf_print("lf_setlock: deadlock", lock);
1427 #endif
1428                         lf_free_lock(lock);
1429                         goto out;
1430                 }
1431
1432                 /*
1433                  * We have added edges to everything that blocks
1434                  * us. Sleep until they all go away.
1435                  */
1436                 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1437 #ifdef LOCKF_DEBUG
1438                 if (lockf_debug & 1) {
1439                         struct lockf_edge *e;
1440                         LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1441                                 lf_print("lf_setlock: blocking on", e->le_to);
1442                                 lf_printlist("lf_setlock", e->le_to);
1443                         }
1444                 }
1445 #endif /* LOCKF_DEBUG */
1446
1447                 if ((lock->lf_flags & F_WAIT) == 0) {
1448                         /*
1449                          * The caller requested async notification -
1450                          * this callback happens when the blocking
1451                          * lock is released, allowing the caller to
1452                          * make another attempt to take the lock.
1453                          */
1454                         *cookiep = (void *) lock;
1455                         error = EINPROGRESS;
1456                         goto out;
1457                 }
1458
1459                 lock->lf_refs++;
1460                 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1461                 if (lf_free_lock(lock)) {
1462                         error = EINTR;
1463                         goto out;
1464                 }
1465
1466                 /*
1467                  * We may have been awakened by a signal and/or by a
1468                  * debugger continuing us (in which cases we must
1469                  * remove our lock graph edges) and/or by another
1470                  * process releasing a lock (in which case our edges
1471                  * have already been removed and we have been moved to
1472                  * the active list). We may also have been woken by
1473                  * lf_purgelocks which we report to the caller as
1474                  * EINTR. In that case, lf_purgelocks will have
1475                  * removed our lock graph edges.
1476                  *
1477                  * Note that it is possible to receive a signal after
1478                  * we were successfully woken (and moved to the active
1479                  * list) but before we resumed execution. In this
1480                  * case, our lf_outedges list will be clear. We
1481                  * pretend there was no error.
1482                  *
1483                  * Note also, if we have been sleeping long enough, we
1484                  * may now have incoming edges from some newer lock
1485                  * which is waiting behind us in the queue.
1486                  */
1487                 if (lock->lf_flags & F_INTR) {
1488                         error = EINTR;
1489                         lf_free_lock(lock);
1490                         goto out;
1491                 }
1492                 if (LIST_EMPTY(&lock->lf_outedges)) {
1493                         error = 0;
1494                 } else {
1495                         lf_cancel_lock(state, lock);
1496                         goto out;
1497                 }
1498 #ifdef LOCKF_DEBUG
1499                 if (lockf_debug & 1) {
1500                         lf_print("lf_setlock: granted", lock);
1501                 }
1502 #endif
1503                 goto out;
1504         }
1505         /*
1506          * It looks like we are going to grant the lock. First add
1507          * edges from any currently pending lock that the new lock
1508          * would block.
1509          */
1510         sx_xlock(&lf_owner_graph_lock);
1511         error = lf_add_incoming(state, lock);
1512         sx_xunlock(&lf_owner_graph_lock);
1513         if (error) {
1514 #ifdef LOCKF_DEBUG
1515                 if (lockf_debug & 1)
1516                         lf_print("lf_setlock: deadlock", lock);
1517 #endif
1518                 lf_free_lock(lock);
1519                 goto out;
1520         }
1521
1522         /*
1523          * No blocks!!  Add the lock.  Note that we will
1524          * downgrade or upgrade any overlapping locks this
1525          * process already owns.
1526          */
1527         lf_activate_lock(state, lock);
1528         error = 0;
1529 out:
1530         return (error);
1531 }
1532
1533 /*
1534  * Remove a byte-range lock on an inode.
1535  *
1536  * Generally, find the lock (or an overlap to that lock)
1537  * and remove it (or shrink it), then wakeup anyone we can.
1538  */
1539 static int
1540 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1541 {
1542         struct lockf_entry *overlap;
1543
1544         overlap = LIST_FIRST(&state->ls_active);
1545
1546         if (overlap == NOLOCKF)
1547                 return (0);
1548 #ifdef LOCKF_DEBUG
1549         if (unlock->lf_type != F_UNLCK)
1550                 panic("lf_clearlock: bad type");
1551         if (lockf_debug & 1)
1552                 lf_print("lf_clearlock", unlock);
1553 #endif /* LOCKF_DEBUG */
1554
1555         lf_activate_lock(state, unlock);
1556
1557         return (0);
1558 }
1559
1560 /*
1561  * Check whether there is a blocking lock, and if so return its
1562  * details in '*fl'.
1563  */
1564 static int
1565 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1566 {
1567         struct lockf_entry *block;
1568
1569 #ifdef LOCKF_DEBUG
1570         if (lockf_debug & 1)
1571                 lf_print("lf_getlock", lock);
1572 #endif /* LOCKF_DEBUG */
1573
1574         if ((block = lf_getblock(state, lock))) {
1575                 fl->l_type = block->lf_type;
1576                 fl->l_whence = SEEK_SET;
1577                 fl->l_start = block->lf_start;
1578                 if (block->lf_end == OFF_MAX)
1579                         fl->l_len = 0;
1580                 else
1581                         fl->l_len = block->lf_end - block->lf_start + 1;
1582                 fl->l_pid = block->lf_owner->lo_pid;
1583                 fl->l_sysid = block->lf_owner->lo_sysid;
1584         } else {
1585                 fl->l_type = F_UNLCK;
1586         }
1587         return (0);
1588 }
1589
1590 /*
1591  * Cancel an async lock request.
1592  */
1593 static int
1594 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1595 {
1596         struct lockf_entry *reallock;
1597
1598         /*
1599          * We need to match this request with an existing lock
1600          * request.
1601          */
1602         LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1603                 if ((void *) reallock == cookie) {
1604                         /*
1605                          * Double-check that this lock looks right
1606                          * (maybe use a rolling ID for the cancel
1607                          * cookie instead?)
1608                          */
1609                         if (!(reallock->lf_vnode == lock->lf_vnode
1610                                 && reallock->lf_start == lock->lf_start
1611                                 && reallock->lf_end == lock->lf_end)) {
1612                                 return (ENOENT);
1613                         }
1614
1615                         /*
1616                          * Make sure this lock was async and then just
1617                          * remove it from its wait lists.
1618                          */
1619                         if (!reallock->lf_async_task) {
1620                                 return (ENOENT);
1621                         }
1622
1623                         /*
1624                          * Note that since any other thread must take
1625                          * state->ls_lock before it can possibly
1626                          * trigger the async callback, we are safe
1627                          * from a race with lf_wakeup_lock, i.e. we
1628                          * can free the lock (actually our caller does
1629                          * this).
1630                          */
1631                         lf_cancel_lock(state, reallock);
1632                         return (0);
1633                 }
1634         }
1635
1636         /*
1637          * We didn't find a matching lock - not much we can do here.
1638          */
1639         return (ENOENT);
1640 }
1641
1642 /*
1643  * Walk the list of locks for an inode and
1644  * return the first blocking lock.
1645  */
1646 static struct lockf_entry *
1647 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1648 {
1649         struct lockf_entry *overlap;
1650
1651         LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1652                 /*
1653                  * We may assume that the active list is sorted by
1654                  * lf_start.
1655                  */
1656                 if (overlap->lf_start > lock->lf_end)
1657                         break;
1658                 if (!lf_blocks(lock, overlap))
1659                         continue;
1660                 return (overlap);
1661         }
1662         return (NOLOCKF);
1663 }
1664
1665 /*
1666  * Walk the list of locks for an inode to find an overlapping lock (if
1667  * any) and return a classification of that overlap.
1668  *
1669  * Arguments:
1670  *      *overlap        The place in the lock list to start looking
1671  *      lock            The lock which is being tested
1672  *      type            Pass 'SELF' to test only locks with the same
1673  *                      owner as lock, or 'OTHER' to test only locks
1674  *                      with a different owner
1675  *
1676  * Returns one of six values:
1677  *      0) no overlap
1678  *      1) overlap == lock
1679  *      2) overlap contains lock
1680  *      3) lock contains overlap
1681  *      4) overlap starts before lock
1682  *      5) overlap ends after lock
1683  *
1684  * If there is an overlapping lock, '*overlap' is set to point at the
1685  * overlapping lock.
1686  *
1687  * NOTE: this returns only the FIRST overlapping lock.  There
1688  *       may be more than one.
1689  */
1690 static int
1691 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1692 {
1693         struct lockf_entry *lf;
1694         off_t start, end;
1695         int res;
1696
1697         if ((*overlap) == NOLOCKF) {
1698                 return (0);
1699         }
1700 #ifdef LOCKF_DEBUG
1701         if (lockf_debug & 2)
1702                 lf_print("lf_findoverlap: looking for overlap in", lock);
1703 #endif /* LOCKF_DEBUG */
1704         start = lock->lf_start;
1705         end = lock->lf_end;
1706         res = 0;
1707         while (*overlap) {
1708                 lf = *overlap;
1709                 if (lf->lf_start > end)
1710                         break;
1711                 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1712                     ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1713                         *overlap = LIST_NEXT(lf, lf_link);
1714                         continue;
1715                 }
1716 #ifdef LOCKF_DEBUG
1717                 if (lockf_debug & 2)
1718                         lf_print("\tchecking", lf);
1719 #endif /* LOCKF_DEBUG */
1720                 /*
1721                  * OK, check for overlap
1722                  *
1723                  * Six cases:
1724                  *      0) no overlap
1725                  *      1) overlap == lock
1726                  *      2) overlap contains lock
1727                  *      3) lock contains overlap
1728                  *      4) overlap starts before lock
1729                  *      5) overlap ends after lock
1730                  */
1731                 if (start > lf->lf_end) {
1732                         /* Case 0 */
1733 #ifdef LOCKF_DEBUG
1734                         if (lockf_debug & 2)
1735                                 printf("no overlap\n");
1736 #endif /* LOCKF_DEBUG */
1737                         *overlap = LIST_NEXT(lf, lf_link);
1738                         continue;
1739                 }
1740                 if (lf->lf_start == start && lf->lf_end == end) {
1741                         /* Case 1 */
1742 #ifdef LOCKF_DEBUG
1743                         if (lockf_debug & 2)
1744                                 printf("overlap == lock\n");
1745 #endif /* LOCKF_DEBUG */
1746                         res = 1;
1747                         break;
1748                 }
1749                 if (lf->lf_start <= start && lf->lf_end >= end) {
1750                         /* Case 2 */
1751 #ifdef LOCKF_DEBUG
1752                         if (lockf_debug & 2)
1753                                 printf("overlap contains lock\n");
1754 #endif /* LOCKF_DEBUG */
1755                         res = 2;
1756                         break;
1757                 }
1758                 if (start <= lf->lf_start && end >= lf->lf_end) {
1759                         /* Case 3 */
1760 #ifdef LOCKF_DEBUG
1761                         if (lockf_debug & 2)
1762                                 printf("lock contains overlap\n");
1763 #endif /* LOCKF_DEBUG */
1764                         res = 3;
1765                         break;
1766                 }
1767                 if (lf->lf_start < start && lf->lf_end >= start) {
1768                         /* Case 4 */
1769 #ifdef LOCKF_DEBUG
1770                         if (lockf_debug & 2)
1771                                 printf("overlap starts before lock\n");
1772 #endif /* LOCKF_DEBUG */
1773                         res = 4;
1774                         break;
1775                 }
1776                 if (lf->lf_start > start && lf->lf_end > end) {
1777                         /* Case 5 */
1778 #ifdef LOCKF_DEBUG
1779                         if (lockf_debug & 2)
1780                                 printf("overlap ends after lock\n");
1781 #endif /* LOCKF_DEBUG */
1782                         res = 5;
1783                         break;
1784                 }
1785                 panic("lf_findoverlap: default");
1786         }
1787         return (res);
1788 }
1789
1790 /*
1791  * Split an the existing 'lock1', based on the extent of the lock
1792  * described by 'lock2'. The existing lock should cover 'lock2'
1793  * entirely.
1794  *
1795  * Any pending locks which have been been unblocked are added to
1796  * 'granted'
1797  */
1798 static void
1799 lf_split(struct lockf *state, struct lockf_entry *lock1,
1800     struct lockf_entry *lock2, struct lockf_entry_list *granted)
1801 {
1802         struct lockf_entry *splitlock;
1803
1804 #ifdef LOCKF_DEBUG
1805         if (lockf_debug & 2) {
1806                 lf_print("lf_split", lock1);
1807                 lf_print("splitting from", lock2);
1808         }
1809 #endif /* LOCKF_DEBUG */
1810         /*
1811          * Check to see if we don't need to split at all.
1812          */
1813         if (lock1->lf_start == lock2->lf_start) {
1814                 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1815                 return;
1816         }
1817         if (lock1->lf_end == lock2->lf_end) {
1818                 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1819                 return;
1820         }
1821         /*
1822          * Make a new lock consisting of the last part of
1823          * the encompassing lock.
1824          */
1825         splitlock = lf_alloc_lock(lock1->lf_owner);
1826         memcpy(splitlock, lock1, sizeof *splitlock);
1827         splitlock->lf_refs = 1;
1828         if (splitlock->lf_flags & F_REMOTE)
1829                 vref(splitlock->lf_vnode);
1830
1831         /*
1832          * This cannot cause a deadlock since any edges we would add
1833          * to splitlock already exist in lock1. We must be sure to add
1834          * necessary dependancies to splitlock before we reduce lock1
1835          * otherwise we may accidentally grant a pending lock that
1836          * was blocked by the tail end of lock1.
1837          */
1838         splitlock->lf_start = lock2->lf_end + 1;
1839         LIST_INIT(&splitlock->lf_outedges);
1840         LIST_INIT(&splitlock->lf_inedges);
1841         sx_xlock(&lf_owner_graph_lock);
1842         lf_add_incoming(state, splitlock);
1843         sx_xunlock(&lf_owner_graph_lock);
1844
1845         lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1846
1847         /*
1848          * OK, now link it in
1849          */
1850         lf_insert_lock(state, splitlock);
1851 }
1852
1853 struct lockdesc {
1854         STAILQ_ENTRY(lockdesc) link;
1855         struct vnode *vp;
1856         struct flock fl;
1857 };
1858 STAILQ_HEAD(lockdesclist, lockdesc);
1859
1860 int
1861 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1862 {
1863         struct lockf *ls;
1864         struct lockf_entry *lf;
1865         struct lockdesc *ldesc;
1866         struct lockdesclist locks;
1867         int error;
1868
1869         /*
1870          * In order to keep the locking simple, we iterate over the
1871          * active lock lists to build a list of locks that need
1872          * releasing. We then call the iterator for each one in turn.
1873          *
1874          * We take an extra reference to the vnode for the duration to
1875          * make sure it doesn't go away before we are finished.
1876          */
1877         STAILQ_INIT(&locks);
1878         sx_xlock(&lf_lock_states_lock);
1879         LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1880                 sx_xlock(&ls->ls_lock);
1881                 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1882                         if (lf->lf_owner->lo_sysid != sysid)
1883                                 continue;
1884
1885                         ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1886                             M_WAITOK);
1887                         ldesc->vp = lf->lf_vnode;
1888                         vref(ldesc->vp);
1889                         ldesc->fl.l_start = lf->lf_start;
1890                         if (lf->lf_end == OFF_MAX)
1891                                 ldesc->fl.l_len = 0;
1892                         else
1893                                 ldesc->fl.l_len =
1894                                         lf->lf_end - lf->lf_start + 1;
1895                         ldesc->fl.l_whence = SEEK_SET;
1896                         ldesc->fl.l_type = F_UNLCK;
1897                         ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1898                         ldesc->fl.l_sysid = sysid;
1899                         STAILQ_INSERT_TAIL(&locks, ldesc, link);
1900                 }
1901                 sx_xunlock(&ls->ls_lock);
1902         }
1903         sx_xunlock(&lf_lock_states_lock);
1904
1905         /*
1906          * Call the iterator function for each lock in turn. If the
1907          * iterator returns an error code, just free the rest of the
1908          * lockdesc structures.
1909          */
1910         error = 0;
1911         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1912                 STAILQ_REMOVE_HEAD(&locks, link);
1913                 if (!error)
1914                         error = fn(ldesc->vp, &ldesc->fl, arg);
1915                 vrele(ldesc->vp);
1916                 free(ldesc, M_LOCKF);
1917         }
1918
1919         return (error);
1920 }
1921
1922 int
1923 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1924 {
1925         struct lockf *ls;
1926         struct lockf_entry *lf;
1927         struct lockdesc *ldesc;
1928         struct lockdesclist locks;
1929         int error;
1930
1931         /*
1932          * In order to keep the locking simple, we iterate over the
1933          * active lock lists to build a list of locks that need
1934          * releasing. We then call the iterator for each one in turn.
1935          *
1936          * We take an extra reference to the vnode for the duration to
1937          * make sure it doesn't go away before we are finished.
1938          */
1939         STAILQ_INIT(&locks);
1940         VI_LOCK(vp);
1941         ls = vp->v_lockf;
1942         if (!ls) {
1943                 VI_UNLOCK(vp);
1944                 return (0);
1945         }
1946         ls->ls_threads++;
1947         VI_UNLOCK(vp);
1948
1949         sx_xlock(&ls->ls_lock);
1950         LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1951                 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1952                     M_WAITOK);
1953                 ldesc->vp = lf->lf_vnode;
1954                 vref(ldesc->vp);
1955                 ldesc->fl.l_start = lf->lf_start;
1956                 if (lf->lf_end == OFF_MAX)
1957                         ldesc->fl.l_len = 0;
1958                 else
1959                         ldesc->fl.l_len =
1960                                 lf->lf_end - lf->lf_start + 1;
1961                 ldesc->fl.l_whence = SEEK_SET;
1962                 ldesc->fl.l_type = F_UNLCK;
1963                 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1964                 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1965                 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1966         }
1967         sx_xunlock(&ls->ls_lock);
1968         VI_LOCK(vp);
1969         ls->ls_threads--;
1970         wakeup(ls);
1971         VI_UNLOCK(vp);
1972
1973         /*
1974          * Call the iterator function for each lock in turn. If the
1975          * iterator returns an error code, just free the rest of the
1976          * lockdesc structures.
1977          */
1978         error = 0;
1979         while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1980                 STAILQ_REMOVE_HEAD(&locks, link);
1981                 if (!error)
1982                         error = fn(ldesc->vp, &ldesc->fl, arg);
1983                 vrele(ldesc->vp);
1984                 free(ldesc, M_LOCKF);
1985         }
1986
1987         return (error);
1988 }
1989
1990 static int
1991 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
1992 {
1993
1994         VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
1995         return (0);
1996 }
1997
1998 void
1999 lf_clearremotesys(int sysid)
2000 {
2001
2002         KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2003         lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2004 }
2005
2006 int
2007 lf_countlocks(int sysid)
2008 {
2009         int i;
2010         struct lock_owner *lo;
2011         int count;
2012
2013         count = 0;
2014         sx_xlock(&lf_lock_owners_lock);
2015         for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
2016                 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
2017                         if (lo->lo_sysid == sysid)
2018                                 count += lo->lo_refs;
2019         sx_xunlock(&lf_lock_owners_lock);
2020
2021         return (count);
2022 }
2023
2024 #ifdef LOCKF_DEBUG
2025
2026 /*
2027  * Return non-zero if y is reachable from x using a brute force
2028  * search. If reachable and path is non-null, return the route taken
2029  * in path.
2030  */
2031 static int
2032 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2033     struct owner_vertex_list *path)
2034 {
2035         struct owner_edge *e;
2036
2037         if (x == y) {
2038                 if (path)
2039                         TAILQ_INSERT_HEAD(path, x, v_link);
2040                 return 1;
2041         }
2042
2043         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2044                 if (graph_reaches(e->e_to, y, path)) {
2045                         if (path)
2046                                 TAILQ_INSERT_HEAD(path, x, v_link);
2047                         return 1;
2048                 }
2049         }
2050         return 0;
2051 }
2052
2053 /*
2054  * Perform consistency checks on the graph. Make sure the values of
2055  * v_order are correct. If checkorder is non-zero, check no vertex can
2056  * reach any other vertex with a smaller order.
2057  */
2058 static void
2059 graph_check(struct owner_graph *g, int checkorder)
2060 {
2061         int i, j;
2062
2063         for (i = 0; i < g->g_size; i++) {
2064                 if (!g->g_vertices[i]->v_owner)
2065                         continue;
2066                 KASSERT(g->g_vertices[i]->v_order == i,
2067                     ("lock graph vertices disordered"));
2068                 if (checkorder) {
2069                         for (j = 0; j < i; j++) {
2070                                 if (!g->g_vertices[j]->v_owner)
2071                                         continue;
2072                                 KASSERT(!graph_reaches(g->g_vertices[i],
2073                                         g->g_vertices[j], NULL),
2074                                     ("lock graph vertices disordered"));
2075                         }
2076                 }
2077         }
2078 }
2079
2080 static void
2081 graph_print_vertices(struct owner_vertex_list *set)
2082 {
2083         struct owner_vertex *v;
2084
2085         printf("{ ");
2086         TAILQ_FOREACH(v, set, v_link) {
2087                 printf("%d:", v->v_order);
2088                 lf_print_owner(v->v_owner);
2089                 if (TAILQ_NEXT(v, v_link))
2090                         printf(", ");
2091         }
2092         printf(" }\n");
2093 }
2094
2095 #endif
2096
2097 /*
2098  * Calculate the sub-set of vertices v from the affected region [y..x]
2099  * where v is reachable from y. Return -1 if a loop was detected
2100  * (i.e. x is reachable from y, otherwise the number of vertices in
2101  * this subset.
2102  */
2103 static int
2104 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2105     struct owner_vertex *y, struct owner_vertex_list *delta)
2106 {
2107         uint32_t gen;
2108         struct owner_vertex *v;
2109         struct owner_edge *e;
2110         int n;
2111
2112         /*
2113          * We start with a set containing just y. Then for each vertex
2114          * v in the set so far unprocessed, we add each vertex that v
2115          * has an out-edge to and that is within the affected region
2116          * [y..x]. If we see the vertex x on our travels, stop
2117          * immediately.
2118          */
2119         TAILQ_INIT(delta);
2120         TAILQ_INSERT_TAIL(delta, y, v_link);
2121         v = y;
2122         n = 1;
2123         gen = g->g_gen;
2124         while (v) {
2125                 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2126                         if (e->e_to == x)
2127                                 return -1;
2128                         if (e->e_to->v_order < x->v_order
2129                             && e->e_to->v_gen != gen) {
2130                                 e->e_to->v_gen = gen;
2131                                 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2132                                 n++;
2133                         }
2134                 }
2135                 v = TAILQ_NEXT(v, v_link);
2136         }
2137
2138         return (n);
2139 }
2140
2141 /*
2142  * Calculate the sub-set of vertices v from the affected region [y..x]
2143  * where v reaches x. Return the number of vertices in this subset.
2144  */
2145 static int
2146 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2147     struct owner_vertex *y, struct owner_vertex_list *delta)
2148 {
2149         uint32_t gen;
2150         struct owner_vertex *v;
2151         struct owner_edge *e;
2152         int n;
2153
2154         /*
2155          * We start with a set containing just x. Then for each vertex
2156          * v in the set so far unprocessed, we add each vertex that v
2157          * has an in-edge from and that is within the affected region
2158          * [y..x].
2159          */
2160         TAILQ_INIT(delta);
2161         TAILQ_INSERT_TAIL(delta, x, v_link);
2162         v = x;
2163         n = 1;
2164         gen = g->g_gen;
2165         while (v) {
2166                 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2167                         if (e->e_from->v_order > y->v_order
2168                             && e->e_from->v_gen != gen) {
2169                                 e->e_from->v_gen = gen;
2170                                 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2171                                 n++;
2172                         }
2173                 }
2174                 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2175         }
2176
2177         return (n);
2178 }
2179
2180 static int
2181 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2182 {
2183         struct owner_vertex *v;
2184         int i, j;
2185
2186         TAILQ_FOREACH(v, set, v_link) {
2187                 for (i = n;
2188                      i > 0 && indices[i - 1] > v->v_order; i--)
2189                         ;
2190                 for (j = n - 1; j >= i; j--)
2191                         indices[j + 1] = indices[j];
2192                 indices[i] = v->v_order;
2193                 n++;
2194         }
2195
2196         return (n);
2197 }
2198
2199 static int
2200 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2201     struct owner_vertex_list *set)
2202 {
2203         struct owner_vertex *v, *vlowest;
2204
2205         while (!TAILQ_EMPTY(set)) {
2206                 vlowest = NULL;
2207                 TAILQ_FOREACH(v, set, v_link) {
2208                         if (!vlowest || v->v_order < vlowest->v_order)
2209                                 vlowest = v;
2210                 }
2211                 TAILQ_REMOVE(set, vlowest, v_link);
2212                 vlowest->v_order = indices[nextunused];
2213                 g->g_vertices[vlowest->v_order] = vlowest;
2214                 nextunused++;
2215         }
2216
2217         return (nextunused);
2218 }
2219
2220 static int
2221 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2222     struct owner_vertex *y)
2223 {
2224         struct owner_edge *e;
2225         struct owner_vertex_list deltaF, deltaB;
2226         int nF, nB, n, vi, i;
2227         int *indices;
2228
2229         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2230
2231         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2232                 if (e->e_to == y) {
2233                         e->e_refs++;
2234                         return (0);
2235                 }
2236         }
2237
2238 #ifdef LOCKF_DEBUG
2239         if (lockf_debug & 8) {
2240                 printf("adding edge %d:", x->v_order);
2241                 lf_print_owner(x->v_owner);
2242                 printf(" -> %d:", y->v_order);
2243                 lf_print_owner(y->v_owner);
2244                 printf("\n");
2245         }
2246 #endif
2247         if (y->v_order < x->v_order) {
2248                 /*
2249                  * The new edge violates the order. First find the set
2250                  * of affected vertices reachable from y (deltaF) and
2251                  * the set of affect vertices affected that reach x
2252                  * (deltaB), using the graph generation number to
2253                  * detect whether we have visited a given vertex
2254                  * already. We re-order the graph so that each vertex
2255                  * in deltaB appears before each vertex in deltaF.
2256                  *
2257                  * If x is a member of deltaF, then the new edge would
2258                  * create a cycle. Otherwise, we may assume that
2259                  * deltaF and deltaB are disjoint.
2260                  */
2261                 g->g_gen++;
2262                 if (g->g_gen == 0) {
2263                         /*
2264                          * Generation wrap.
2265                          */
2266                         for (vi = 0; vi < g->g_size; vi++) {
2267                                 g->g_vertices[vi]->v_gen = 0;
2268                         }
2269                         g->g_gen++;
2270                 }
2271                 nF = graph_delta_forward(g, x, y, &deltaF);
2272                 if (nF < 0) {
2273 #ifdef LOCKF_DEBUG
2274                         if (lockf_debug & 8) {
2275                                 struct owner_vertex_list path;
2276                                 printf("deadlock: ");
2277                                 TAILQ_INIT(&path);
2278                                 graph_reaches(y, x, &path);
2279                                 graph_print_vertices(&path);
2280                         }
2281 #endif
2282                         return (EDEADLK);
2283                 }
2284
2285 #ifdef LOCKF_DEBUG
2286                 if (lockf_debug & 8) {
2287                         printf("re-ordering graph vertices\n");
2288                         printf("deltaF = ");
2289                         graph_print_vertices(&deltaF);
2290                 }
2291 #endif
2292
2293                 nB = graph_delta_backward(g, x, y, &deltaB);
2294
2295 #ifdef LOCKF_DEBUG
2296                 if (lockf_debug & 8) {
2297                         printf("deltaB = ");
2298                         graph_print_vertices(&deltaB);
2299                 }
2300 #endif
2301
2302                 /*
2303                  * We first build a set of vertex indices (vertex
2304                  * order values) that we may use, then we re-assign
2305                  * orders first to those vertices in deltaB, then to
2306                  * deltaF. Note that the contents of deltaF and deltaB
2307                  * may be partially disordered - we perform an
2308                  * insertion sort while building our index set.
2309                  */
2310                 indices = g->g_indexbuf;
2311                 n = graph_add_indices(indices, 0, &deltaF);
2312                 graph_add_indices(indices, n, &deltaB);
2313
2314                 /*
2315                  * We must also be sure to maintain the relative
2316                  * ordering of deltaF and deltaB when re-assigning
2317                  * vertices. We do this by iteratively removing the
2318                  * lowest ordered element from the set and assigning
2319                  * it the next value from our new ordering.
2320                  */
2321                 i = graph_assign_indices(g, indices, 0, &deltaB);
2322                 graph_assign_indices(g, indices, i, &deltaF);
2323
2324 #ifdef LOCKF_DEBUG
2325                 if (lockf_debug & 8) {
2326                         struct owner_vertex_list set;
2327                         TAILQ_INIT(&set);
2328                         for (i = 0; i < nB + nF; i++)
2329                                 TAILQ_INSERT_TAIL(&set,
2330                                     g->g_vertices[indices[i]], v_link);
2331                         printf("new ordering = ");
2332                         graph_print_vertices(&set);
2333                 }
2334 #endif
2335         }
2336
2337         KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2338
2339 #ifdef LOCKF_DEBUG
2340         if (lockf_debug & 8) {
2341                 graph_check(g, TRUE);
2342         }
2343 #endif
2344
2345         e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2346
2347         LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2348         LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2349         e->e_refs = 1;
2350         e->e_from = x;
2351         e->e_to = y;
2352
2353         return (0);
2354 }
2355
2356 /*
2357  * Remove an edge x->y from the graph.
2358  */
2359 static void
2360 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2361     struct owner_vertex *y)
2362 {
2363         struct owner_edge *e;
2364
2365         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2366
2367         LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2368                 if (e->e_to == y)
2369                         break;
2370         }
2371         KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2372
2373         e->e_refs--;
2374         if (e->e_refs == 0) {
2375 #ifdef LOCKF_DEBUG
2376                 if (lockf_debug & 8) {
2377                         printf("removing edge %d:", x->v_order);
2378                         lf_print_owner(x->v_owner);
2379                         printf(" -> %d:", y->v_order);
2380                         lf_print_owner(y->v_owner);
2381                         printf("\n");
2382                 }
2383 #endif
2384                 LIST_REMOVE(e, e_outlink);
2385                 LIST_REMOVE(e, e_inlink);
2386                 free(e, M_LOCKF);
2387         }
2388 }
2389
2390 /*
2391  * Allocate a vertex from the free list. Return ENOMEM if there are
2392  * none.
2393  */
2394 static struct owner_vertex *
2395 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2396 {
2397         struct owner_vertex *v;
2398
2399         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2400
2401         v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2402         if (g->g_size == g->g_space) {
2403                 g->g_vertices = realloc(g->g_vertices,
2404                     2 * g->g_space * sizeof(struct owner_vertex *),
2405                     M_LOCKF, M_WAITOK);
2406                 free(g->g_indexbuf, M_LOCKF);
2407                 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2408                     M_LOCKF, M_WAITOK);
2409                 g->g_space = 2 * g->g_space;
2410         }
2411         v->v_order = g->g_size;
2412         v->v_gen = g->g_gen;
2413         g->g_vertices[g->g_size] = v;
2414         g->g_size++;
2415
2416         LIST_INIT(&v->v_outedges);
2417         LIST_INIT(&v->v_inedges);
2418         v->v_owner = lo;
2419
2420         return (v);
2421 }
2422
2423 static void
2424 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2425 {
2426         struct owner_vertex *w;
2427         int i;
2428
2429         sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2430         
2431         KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2432         KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2433
2434         /*
2435          * Remove from the graph's array and close up the gap,
2436          * renumbering the other vertices.
2437          */
2438         for (i = v->v_order + 1; i < g->g_size; i++) {
2439                 w = g->g_vertices[i];
2440                 w->v_order--;
2441                 g->g_vertices[i - 1] = w;
2442         }
2443         g->g_size--;
2444
2445         free(v, M_LOCKF);
2446 }
2447
2448 static struct owner_graph *
2449 graph_init(struct owner_graph *g)
2450 {
2451
2452         g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2453             M_LOCKF, M_WAITOK);
2454         g->g_size = 0;
2455         g->g_space = 10;
2456         g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2457         g->g_gen = 0;
2458
2459         return (g);
2460 }
2461
2462 #ifdef LOCKF_DEBUG
2463 /*
2464  * Print description of a lock owner
2465  */
2466 static void
2467 lf_print_owner(struct lock_owner *lo)
2468 {
2469
2470         if (lo->lo_flags & F_REMOTE) {
2471                 printf("remote pid %d, system %d",
2472                     lo->lo_pid, lo->lo_sysid);
2473         } else if (lo->lo_flags & F_FLOCK) {
2474                 printf("file %p", lo->lo_id);
2475         } else {
2476                 printf("local pid %d", lo->lo_pid);
2477         }
2478 }
2479
2480 /*
2481  * Print out a lock.
2482  */
2483 static void
2484 lf_print(char *tag, struct lockf_entry *lock)
2485 {
2486
2487         printf("%s: lock %p for ", tag, (void *)lock);
2488         lf_print_owner(lock->lf_owner);
2489         if (lock->lf_inode != (struct inode *)0)
2490                 printf(" in ino %ju on dev <%s>,",
2491                     (uintmax_t)lock->lf_inode->i_number,
2492                     devtoname(lock->lf_inode->i_dev));
2493         printf(" %s, start %jd, end ",
2494             lock->lf_type == F_RDLCK ? "shared" :
2495             lock->lf_type == F_WRLCK ? "exclusive" :
2496             lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2497             (intmax_t)lock->lf_start);
2498         if (lock->lf_end == OFF_MAX)
2499                 printf("EOF");
2500         else
2501                 printf("%jd", (intmax_t)lock->lf_end);
2502         if (!LIST_EMPTY(&lock->lf_outedges))
2503                 printf(" block %p\n",
2504                     (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2505         else
2506                 printf("\n");
2507 }
2508
2509 static void
2510 lf_printlist(char *tag, struct lockf_entry *lock)
2511 {
2512         struct lockf_entry *lf, *blk;
2513         struct lockf_edge *e;
2514
2515         if (lock->lf_inode == (struct inode *)0)
2516                 return;
2517
2518         printf("%s: Lock list for ino %ju on dev <%s>:\n",
2519             tag, (uintmax_t)lock->lf_inode->i_number,
2520             devtoname(lock->lf_inode->i_dev));
2521         LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2522                 printf("\tlock %p for ",(void *)lf);
2523                 lf_print_owner(lock->lf_owner);
2524                 printf(", %s, start %jd, end %jd",
2525                     lf->lf_type == F_RDLCK ? "shared" :
2526                     lf->lf_type == F_WRLCK ? "exclusive" :
2527                     lf->lf_type == F_UNLCK ? "unlock" :
2528                     "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2529                 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2530                         blk = e->le_to;
2531                         printf("\n\t\tlock request %p for ", (void *)blk);
2532                         lf_print_owner(blk->lf_owner);
2533                         printf(", %s, start %jd, end %jd",
2534                             blk->lf_type == F_RDLCK ? "shared" :
2535                             blk->lf_type == F_WRLCK ? "exclusive" :
2536                             blk->lf_type == F_UNLCK ? "unlock" :
2537                             "unknown", (intmax_t)blk->lf_start,
2538                             (intmax_t)blk->lf_end);
2539                         if (!LIST_EMPTY(&blk->lf_inedges))
2540                                 panic("lf_printlist: bad list");
2541                 }
2542                 printf("\n");
2543         }
2544 }
2545 #endif /* LOCKF_DEBUG */