2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
5 * Authors: Doug Rabson <dfr@rabson.org>
6 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Scooter Morris at Genentech Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
63 #include <sys/cdefs.h>
64 __FBSDID("$FreeBSD$");
66 #include "opt_debug_lockf.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/limits.h>
75 #include <sys/mount.h>
76 #include <sys/mutex.h>
81 #include <sys/unistd.h>
83 #include <sys/vnode.h>
84 #include <sys/malloc.h>
85 #include <sys/fcntl.h>
86 #include <sys/lockf.h>
87 #include <sys/taskqueue.h>
90 #include <sys/sysctl.h>
92 static int lockf_debug = 0; /* control debug output */
93 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
96 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
100 struct owner_vertex_list;
103 #define NOLOCKF (struct lockf_entry *)0
106 static void lf_init(void *);
107 static int lf_hash_owner(caddr_t, struct vnode *, struct flock *, int);
108 static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
110 static struct lockf_entry *
111 lf_alloc_lock(struct lock_owner *);
112 static int lf_free_lock(struct lockf_entry *);
113 static int lf_clearlock(struct lockf *, struct lockf_entry *);
114 static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
115 static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
116 static void lf_free_edge(struct lockf_edge *);
117 static struct lockf_edge *
119 static void lf_alloc_vertex(struct lockf_entry *);
120 static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
121 static void lf_remove_edge(struct lockf_edge *);
122 static void lf_remove_outgoing(struct lockf_entry *);
123 static void lf_remove_incoming(struct lockf_entry *);
124 static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
125 static int lf_add_incoming(struct lockf *, struct lockf_entry *);
126 static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
128 static struct lockf_entry *
129 lf_getblock(struct lockf *, struct lockf_entry *);
130 static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
131 static void lf_insert_lock(struct lockf *, struct lockf_entry *);
132 static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
133 static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
134 int all, struct lockf_entry_list *);
135 static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
136 struct lockf_entry_list*);
137 static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
138 struct lockf_entry_list*);
139 static int lf_setlock(struct lockf *, struct lockf_entry *,
140 struct vnode *, void **cookiep);
141 static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
142 static void lf_split(struct lockf *, struct lockf_entry *,
143 struct lockf_entry *, struct lockf_entry_list *);
145 static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
146 struct owner_vertex_list *path);
147 static void graph_check(struct owner_graph *g, int checkorder);
148 static void graph_print_vertices(struct owner_vertex_list *set);
150 static int graph_delta_forward(struct owner_graph *g,
151 struct owner_vertex *x, struct owner_vertex *y,
152 struct owner_vertex_list *delta);
153 static int graph_delta_backward(struct owner_graph *g,
154 struct owner_vertex *x, struct owner_vertex *y,
155 struct owner_vertex_list *delta);
156 static int graph_add_indices(int *indices, int n,
157 struct owner_vertex_list *set);
158 static int graph_assign_indices(struct owner_graph *g, int *indices,
159 int nextunused, struct owner_vertex_list *set);
160 static int graph_add_edge(struct owner_graph *g,
161 struct owner_vertex *x, struct owner_vertex *y);
162 static void graph_remove_edge(struct owner_graph *g,
163 struct owner_vertex *x, struct owner_vertex *y);
164 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
165 struct lock_owner *lo);
166 static void graph_free_vertex(struct owner_graph *g,
167 struct owner_vertex *v);
168 static struct owner_graph * graph_init(struct owner_graph *g);
170 static void lf_print(char *, struct lockf_entry *);
171 static void lf_printlist(char *, struct lockf_entry *);
172 static void lf_print_owner(struct lock_owner *);
176 * This structure is used to keep track of both local and remote lock
177 * owners. The lf_owner field of the struct lockf_entry points back at
178 * the lock owner structure. Each possible lock owner (local proc for
179 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
180 * pair for remote locks) is represented by a unique instance of
183 * If a lock owner has a lock that blocks some other lock or a lock
184 * that is waiting for some other lock, it also has a vertex in the
188 * (s) locked by state->ls_lock
189 * (S) locked by lf_lock_states_lock
190 * (g) locked by lf_owner_graph_lock
191 * (c) const until freeing
193 #define LOCK_OWNER_HASH_SIZE 256
196 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
197 int lo_refs; /* (l) Number of locks referring to this */
198 int lo_flags; /* (c) Flags passwd to lf_advlock */
199 caddr_t lo_id; /* (c) Id value passed to lf_advlock */
200 pid_t lo_pid; /* (c) Process Id of the lock owner */
201 int lo_sysid; /* (c) System Id of the lock owner */
202 int lo_hash; /* (c) Used to lock the appropriate chain */
203 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
206 LIST_HEAD(lock_owner_list, lock_owner);
208 struct lock_owner_chain {
210 struct lock_owner_list list;
213 static struct sx lf_lock_states_lock;
214 static struct lockf_list lf_lock_states; /* (S) */
215 static struct lock_owner_chain lf_lock_owners[LOCK_OWNER_HASH_SIZE];
218 * Structures for deadlock detection.
220 * We have two types of directed graph, the first is the set of locks,
221 * both active and pending on a vnode. Within this graph, active locks
222 * are terminal nodes in the graph (i.e. have no out-going
223 * edges). Pending locks have out-going edges to each blocking active
224 * lock that prevents the lock from being granted and also to each
225 * older pending lock that would block them if it was active. The
226 * graph for each vnode is naturally acyclic; new edges are only ever
227 * added to or from new nodes (either new pending locks which only add
228 * out-going edges or new active locks which only add in-coming edges)
229 * therefore they cannot create loops in the lock graph.
231 * The second graph is a global graph of lock owners. Each lock owner
232 * is a vertex in that graph and an edge is added to the graph
233 * whenever an edge is added to a vnode graph, with end points
234 * corresponding to owner of the new pending lock and the owner of the
235 * lock upon which it waits. In order to prevent deadlock, we only add
236 * an edge to this graph if the new edge would not create a cycle.
238 * The lock owner graph is topologically sorted, i.e. if a node has
239 * any outgoing edges, then it has an order strictly less than any
240 * node to which it has an outgoing edge. We preserve this ordering
241 * (and detect cycles) on edge insertion using Algorithm PK from the
242 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
243 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
249 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
250 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
251 int e_refs; /* (g) number of times added */
252 struct owner_vertex *e_from; /* (c) out-going from here */
253 struct owner_vertex *e_to; /* (c) in-coming to here */
255 LIST_HEAD(owner_edge_list, owner_edge);
257 struct owner_vertex {
258 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
259 uint32_t v_gen; /* (g) workspace for edge insertion */
260 int v_order; /* (g) order of vertex in graph */
261 struct owner_edge_list v_outedges;/* (g) list of out-edges */
262 struct owner_edge_list v_inedges; /* (g) list of in-edges */
263 struct lock_owner *v_owner; /* (c) corresponding lock owner */
265 TAILQ_HEAD(owner_vertex_list, owner_vertex);
268 struct owner_vertex** g_vertices; /* (g) pointers to vertices */
269 int g_size; /* (g) number of vertices */
270 int g_space; /* (g) space allocated for vertices */
271 int *g_indexbuf; /* (g) workspace for loop detection */
272 uint32_t g_gen; /* (g) increment when re-ordering */
275 static struct sx lf_owner_graph_lock;
276 static struct owner_graph lf_owner_graph;
279 * Initialise various structures and locks.
286 sx_init(&lf_lock_states_lock, "lock states lock");
287 LIST_INIT(&lf_lock_states);
289 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
290 sx_init(&lf_lock_owners[i].lock, "lock owners lock");
291 LIST_INIT(&lf_lock_owners[i].list);
294 sx_init(&lf_owner_graph_lock, "owner graph lock");
295 graph_init(&lf_owner_graph);
297 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
300 * Generate a hash value for a lock owner.
303 lf_hash_owner(caddr_t id, struct vnode *vp, struct flock *fl, int flags)
307 if (flags & F_REMOTE) {
308 h = HASHSTEP(0, fl->l_pid);
309 h = HASHSTEP(h, fl->l_sysid);
310 } else if (flags & F_FLOCK) {
311 h = ((uintptr_t) id) >> 7;
313 h = ((uintptr_t) vp) >> 7;
316 return (h % LOCK_OWNER_HASH_SIZE);
320 * Return true if a lock owner matches the details passed to
324 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
327 if (flags & F_REMOTE) {
328 return lo->lo_pid == fl->l_pid
329 && lo->lo_sysid == fl->l_sysid;
331 return lo->lo_id == id;
335 static struct lockf_entry *
336 lf_alloc_lock(struct lock_owner *lo)
338 struct lockf_entry *lf;
340 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
344 printf("Allocated lock %p\n", lf);
347 sx_xlock(&lf_lock_owners[lo->lo_hash].lock);
349 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock);
357 lf_free_lock(struct lockf_entry *lock)
359 struct sx *chainlock;
361 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
362 if (--lock->lf_refs > 0)
365 * Adjust the lock_owner reference count and
366 * reclaim the entry if this is the last lock
369 struct lock_owner *lo = lock->lf_owner;
371 KASSERT(LIST_EMPTY(&lock->lf_outedges),
372 ("freeing lock with dependencies"));
373 KASSERT(LIST_EMPTY(&lock->lf_inedges),
374 ("freeing lock with dependants"));
375 chainlock = &lf_lock_owners[lo->lo_hash].lock;
377 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
379 if (lo->lo_refs == 0) {
382 printf("lf_free_lock: freeing lock owner %p\n",
386 sx_xlock(&lf_owner_graph_lock);
387 graph_free_vertex(&lf_owner_graph,
389 sx_xunlock(&lf_owner_graph_lock);
391 LIST_REMOVE(lo, lo_link);
395 printf("Freed lock owner %p\n", lo);
398 sx_unlock(chainlock);
400 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
401 vrele(lock->lf_vnode);
402 lock->lf_vnode = NULL;
406 printf("Freed lock %p\n", lock);
413 * Advisory record locking support
416 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
420 struct flock *fl = ap->a_fl;
421 struct lockf_entry *lock;
422 struct vnode *vp = ap->a_vp;
423 caddr_t id = ap->a_id;
424 int flags = ap->a_flags;
426 struct lock_owner *lo;
427 off_t start, end, oadd;
431 * Handle the F_UNLKSYS case first - no need to mess about
432 * creating a lock owner for this one.
434 if (ap->a_op == F_UNLCKSYS) {
435 lf_clearremotesys(fl->l_sysid);
440 * Convert the flock structure into a start and end.
442 switch (fl->l_whence) {
446 * Caller is responsible for adding any necessary offset
447 * when SEEK_CUR is used.
453 if (size > OFF_MAX ||
454 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
456 start = size + fl->l_start;
471 } else if (fl->l_len == 0) {
474 oadd = fl->l_len - 1;
475 if (oadd > OFF_MAX - start)
483 * Avoid the common case of unlocking when inode has no locks.
485 if (ap->a_op != F_SETLK && (*statep) == NULL) {
487 if ((*statep) == NULL) {
488 fl->l_type = F_UNLCK;
496 * Map our arguments to an existing lock owner or create one
497 * if this is the first time we have seen this owner.
499 hash = lf_hash_owner(id, vp, fl, flags);
500 sx_xlock(&lf_lock_owners[hash].lock);
501 LIST_FOREACH(lo, &lf_lock_owners[hash].list, lo_link)
502 if (lf_owner_matches(lo, id, fl, flags))
506 * We initialise the lock with a reference
507 * count which matches the new lockf_entry
508 * structure created below.
510 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
514 printf("Allocated lock owner %p\n", lo);
518 lo->lo_flags = flags;
521 if (flags & F_REMOTE) {
522 lo->lo_pid = fl->l_pid;
523 lo->lo_sysid = fl->l_sysid;
524 } else if (flags & F_FLOCK) {
528 struct proc *p = (struct proc *) id;
529 lo->lo_pid = p->p_pid;
532 lo->lo_vertex = NULL;
535 if (lockf_debug & 1) {
536 printf("lf_advlockasync: new lock owner %p ", lo);
542 LIST_INSERT_HEAD(&lf_lock_owners[hash].list, lo, lo_link);
545 * We have seen this lock owner before, increase its
546 * reference count to account for the new lockf_entry
547 * structure we create below.
551 sx_xunlock(&lf_lock_owners[hash].lock);
554 * Create the lockf structure. We initialise the lf_owner
555 * field here instead of in lf_alloc_lock() to avoid paying
556 * the lf_lock_owners_lock tax twice.
558 lock = lf_alloc_lock(NULL);
560 lock->lf_start = start;
564 if (flags & F_REMOTE) {
566 * For remote locks, the caller may release its ref to
567 * the vnode at any time - we have to ref it here to
568 * prevent it from being recycled unexpectedly.
573 lock->lf_type = fl->l_type;
574 LIST_INIT(&lock->lf_outedges);
575 LIST_INIT(&lock->lf_inedges);
576 lock->lf_async_task = ap->a_task;
577 lock->lf_flags = ap->a_flags;
580 * Do the requested operation. First find our state structure
581 * and create a new one if necessary - the caller's *statep
582 * variable and the state's ls_threads count is protected by
583 * the vnode interlock.
586 if (VN_IS_DOOMED(vp)) {
593 * Allocate a state structure if necessary.
601 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
602 sx_init(&ls->ls_lock, "ls_lock");
603 LIST_INIT(&ls->ls_active);
604 LIST_INIT(&ls->ls_pending);
607 sx_xlock(&lf_lock_states_lock);
608 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
609 sx_xunlock(&lf_lock_states_lock);
612 * Cope if we lost a race with some other thread while
613 * trying to allocate memory.
616 if (VN_IS_DOOMED(vp)) {
618 sx_xlock(&lf_lock_states_lock);
619 LIST_REMOVE(ls, ls_link);
620 sx_xunlock(&lf_lock_states_lock);
621 sx_destroy(&ls->ls_lock);
626 if ((*statep) == NULL) {
627 state = *statep = ls;
631 MPASS(state->ls_threads >= 0);
635 sx_xlock(&lf_lock_states_lock);
636 LIST_REMOVE(ls, ls_link);
637 sx_xunlock(&lf_lock_states_lock);
638 sx_destroy(&ls->ls_lock);
642 MPASS(state->ls_threads >= 0);
647 sx_xlock(&state->ls_lock);
649 * Recheck the doomed vnode after state->ls_lock is
650 * locked. lf_purgelocks() requires that no new threads add
651 * pending locks when vnode is marked by VIRF_DOOMED flag.
653 if (VN_IS_DOOMED(vp)) {
655 MPASS(state->ls_threads > 0);
659 sx_xunlock(&state->ls_lock);
666 error = lf_setlock(state, lock, vp, ap->a_cookiep);
670 error = lf_clearlock(state, lock);
675 error = lf_getlock(state, lock, fl);
681 error = lf_cancel(state, lock, *ap->a_cookiep);
695 * Check for some can't happen stuff. In this case, the active
696 * lock list becoming disordered or containing mutually
697 * blocking locks. We also check the pending list for locks
698 * which should be active (i.e. have no out-going edges).
700 LIST_FOREACH(lock, &state->ls_active, lf_link) {
701 struct lockf_entry *lf;
702 if (LIST_NEXT(lock, lf_link))
703 KASSERT((lock->lf_start
704 <= LIST_NEXT(lock, lf_link)->lf_start),
705 ("locks disordered"));
706 LIST_FOREACH(lf, &state->ls_active, lf_link) {
709 KASSERT(!lf_blocks(lock, lf),
710 ("two conflicting active locks"));
711 if (lock->lf_owner == lf->lf_owner)
712 KASSERT(!lf_overlaps(lock, lf),
713 ("two overlapping locks from same owner"));
716 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
717 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
718 ("pending lock which should be active"));
721 sx_xunlock(&state->ls_lock);
724 MPASS(state->ls_threads > 0);
726 if (state->ls_threads != 0) {
731 if (error == EDOOFUS) {
732 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
739 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
741 struct vop_advlockasync_args a;
747 a.a_flags = ap->a_flags;
751 return (lf_advlockasync(&a, statep, size));
755 lf_purgelocks(struct vnode *vp, struct lockf **statep)
758 struct lockf_entry *lock, *nlock;
761 * For this to work correctly, the caller must ensure that no
762 * other threads enter the locking system for this vnode,
763 * e.g. by checking VIRF_DOOMED. We wake up any threads that are
764 * sleeping waiting for locks on this vnode and then free all
765 * the remaining locks.
767 KASSERT(VN_IS_DOOMED(vp),
768 ("lf_purgelocks: vp %p has not vgone yet", vp));
775 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
776 KASSERT(LIST_EMPTY(&state->ls_pending),
777 ("freeing state with pending locks"));
781 MPASS(state->ls_threads >= 0);
785 sx_xlock(&state->ls_lock);
786 sx_xlock(&lf_owner_graph_lock);
787 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
788 LIST_REMOVE(lock, lf_link);
789 lf_remove_outgoing(lock);
790 lf_remove_incoming(lock);
793 * If its an async lock, we can just free it
794 * here, otherwise we let the sleeping thread
797 if (lock->lf_async_task) {
800 lock->lf_flags |= F_INTR;
804 sx_xunlock(&lf_owner_graph_lock);
805 sx_xunlock(&state->ls_lock);
808 * Wait for all other threads, sleeping and otherwise
812 while (state->ls_threads > 1)
813 msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
817 * We can just free all the active locks since they
818 * will have no dependencies (we removed them all
819 * above). We don't need to bother locking since we
820 * are the last thread using this state structure.
822 KASSERT(LIST_EMPTY(&state->ls_pending),
823 ("lock pending for %p", state));
824 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
825 LIST_REMOVE(lock, lf_link);
829 sx_xlock(&lf_lock_states_lock);
830 LIST_REMOVE(state, ls_link);
831 sx_xunlock(&lf_lock_states_lock);
832 sx_destroy(&state->ls_lock);
833 free(state, M_LOCKF);
837 * Return non-zero if locks 'x' and 'y' overlap.
840 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
843 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
847 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
850 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
853 return x->lf_owner != y->lf_owner
854 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
855 && lf_overlaps(x, y);
859 * Allocate a lock edge from the free list
861 static struct lockf_edge *
865 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
872 lf_free_edge(struct lockf_edge *e)
879 * Ensure that the lock's owner has a corresponding vertex in the
883 lf_alloc_vertex(struct lockf_entry *lock)
885 struct owner_graph *g = &lf_owner_graph;
887 if (!lock->lf_owner->lo_vertex)
888 lock->lf_owner->lo_vertex =
889 graph_alloc_vertex(g, lock->lf_owner);
893 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
894 * the new edge would cause a cycle in the owner graph.
897 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
899 struct owner_graph *g = &lf_owner_graph;
900 struct lockf_edge *e;
904 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
905 KASSERT(e->le_to != y, ("adding lock edge twice"));
909 * Make sure the two owners have entries in the owner graph.
914 error = graph_add_edge(g, x->lf_owner->lo_vertex,
915 y->lf_owner->lo_vertex);
920 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
921 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
929 * Remove an edge from the lock graph.
932 lf_remove_edge(struct lockf_edge *e)
934 struct owner_graph *g = &lf_owner_graph;
935 struct lockf_entry *x = e->le_from;
936 struct lockf_entry *y = e->le_to;
938 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
939 LIST_REMOVE(e, le_outlink);
940 LIST_REMOVE(e, le_inlink);
947 * Remove all out-going edges from lock x.
950 lf_remove_outgoing(struct lockf_entry *x)
952 struct lockf_edge *e;
954 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
960 * Remove all in-coming edges from lock x.
963 lf_remove_incoming(struct lockf_entry *x)
965 struct lockf_edge *e;
967 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
973 * Walk the list of locks for the file and create an out-going edge
974 * from lock to each blocking lock.
977 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
979 struct lockf_entry *overlap;
982 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
984 * We may assume that the active list is sorted by
987 if (overlap->lf_start > lock->lf_end)
989 if (!lf_blocks(lock, overlap))
993 * We've found a blocking lock. Add the corresponding
994 * edge to the graphs and see if it would cause a
997 error = lf_add_edge(lock, overlap);
1000 * The only error that lf_add_edge returns is EDEADLK.
1001 * Remove any edges we added and return the error.
1004 lf_remove_outgoing(lock);
1010 * We also need to add edges to sleeping locks that block
1011 * us. This ensures that lf_wakeup_lock cannot grant two
1012 * mutually blocking locks simultaneously and also enforces a
1013 * 'first come, first served' fairness model. Note that this
1014 * only happens if we are blocked by at least one active lock
1015 * due to the call to lf_getblock in lf_setlock below.
1017 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1018 if (!lf_blocks(lock, overlap))
1021 * We've found a blocking lock. Add the corresponding
1022 * edge to the graphs and see if it would cause a
1025 error = lf_add_edge(lock, overlap);
1028 * The only error that lf_add_edge returns is EDEADLK.
1029 * Remove any edges we added and return the error.
1032 lf_remove_outgoing(lock);
1041 * Walk the list of pending locks for the file and create an in-coming
1042 * edge from lock to each blocking lock.
1045 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1047 struct lockf_entry *overlap;
1050 sx_assert(&state->ls_lock, SX_XLOCKED);
1051 if (LIST_EMPTY(&state->ls_pending))
1055 sx_xlock(&lf_owner_graph_lock);
1056 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1057 if (!lf_blocks(lock, overlap))
1061 * We've found a blocking lock. Add the corresponding
1062 * edge to the graphs and see if it would cause a
1065 error = lf_add_edge(overlap, lock);
1068 * The only error that lf_add_edge returns is EDEADLK.
1069 * Remove any edges we added and return the error.
1072 lf_remove_incoming(lock);
1076 sx_xunlock(&lf_owner_graph_lock);
1081 * Insert lock into the active list, keeping list entries ordered by
1082 * increasing values of lf_start.
1085 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1087 struct lockf_entry *lf, *lfprev;
1089 if (LIST_EMPTY(&state->ls_active)) {
1090 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1095 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1096 if (lf->lf_start > lock->lf_start) {
1097 LIST_INSERT_BEFORE(lf, lock, lf_link);
1102 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1106 * Wake up a sleeping lock and remove it from the pending list now
1107 * that all its dependencies have been resolved. The caller should
1108 * arrange for the lock to be added to the active list, adjusting any
1109 * existing locks for the same owner as needed.
1112 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1116 * Remove from ls_pending list and wake up the caller
1117 * or start the async notification, as appropriate.
1119 LIST_REMOVE(wakelock, lf_link);
1121 if (lockf_debug & 1)
1122 lf_print("lf_wakeup_lock: awakening", wakelock);
1123 #endif /* LOCKF_DEBUG */
1124 if (wakelock->lf_async_task) {
1125 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1132 * Re-check all dependent locks and remove edges to locks that we no
1133 * longer block. If 'all' is non-zero, the lock has been removed and
1134 * we must remove all the dependencies, otherwise it has simply been
1135 * reduced but remains active. Any pending locks which have been been
1136 * unblocked are added to 'granted'
1139 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1140 struct lockf_entry_list *granted)
1142 struct lockf_edge *e, *ne;
1143 struct lockf_entry *deplock;
1145 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1146 deplock = e->le_from;
1147 if (all || !lf_blocks(lock, deplock)) {
1148 sx_xlock(&lf_owner_graph_lock);
1150 sx_xunlock(&lf_owner_graph_lock);
1151 if (LIST_EMPTY(&deplock->lf_outedges)) {
1152 lf_wakeup_lock(state, deplock);
1153 LIST_INSERT_HEAD(granted, deplock, lf_link);
1160 * Set the start of an existing active lock, updating dependencies and
1161 * adding any newly woken locks to 'granted'.
1164 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1165 struct lockf_entry_list *granted)
1168 KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1169 lock->lf_start = new_start;
1170 LIST_REMOVE(lock, lf_link);
1171 lf_insert_lock(state, lock);
1172 lf_update_dependancies(state, lock, FALSE, granted);
1176 * Set the end of an existing active lock, updating dependencies and
1177 * adding any newly woken locks to 'granted'.
1180 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1181 struct lockf_entry_list *granted)
1184 KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1185 lock->lf_end = new_end;
1186 lf_update_dependancies(state, lock, FALSE, granted);
1190 * Add a lock to the active list, updating or removing any current
1191 * locks owned by the same owner and processing any pending locks that
1192 * become unblocked as a result. This code is also used for unlock
1193 * since the logic for updating existing locks is identical.
1195 * As a result of processing the new lock, we may unblock existing
1196 * pending locks as a result of downgrading/unlocking. We simply
1197 * activate the newly granted locks by looping.
1199 * Since the new lock already has its dependencies set up, we always
1200 * add it to the list (unless its an unlock request). This may
1201 * fragment the lock list in some pathological cases but its probably
1202 * not a real problem.
1205 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1207 struct lockf_entry *overlap, *lf;
1208 struct lockf_entry_list granted;
1211 LIST_INIT(&granted);
1212 LIST_INSERT_HEAD(&granted, lock, lf_link);
1214 while (!LIST_EMPTY(&granted)) {
1215 lock = LIST_FIRST(&granted);
1216 LIST_REMOVE(lock, lf_link);
1219 * Skip over locks owned by other processes. Handle
1220 * any locks that overlap and are owned by ourselves.
1222 overlap = LIST_FIRST(&state->ls_active);
1224 ovcase = lf_findoverlap(&overlap, lock, SELF);
1227 if (ovcase && (lockf_debug & 2)) {
1228 printf("lf_setlock: overlap %d", ovcase);
1229 lf_print("", overlap);
1235 * 1) overlap == lock
1236 * 2) overlap contains lock
1237 * 3) lock contains overlap
1238 * 4) overlap starts before lock
1239 * 5) overlap ends after lock
1242 case 0: /* no overlap */
1245 case 1: /* overlap == lock */
1247 * We have already setup the
1248 * dependants for the new lock, taking
1249 * into account a possible downgrade
1250 * or unlock. Remove the old lock.
1252 LIST_REMOVE(overlap, lf_link);
1253 lf_update_dependancies(state, overlap, TRUE,
1255 lf_free_lock(overlap);
1258 case 2: /* overlap contains lock */
1260 * Just split the existing lock.
1262 lf_split(state, overlap, lock, &granted);
1265 case 3: /* lock contains overlap */
1267 * Delete the overlap and advance to
1268 * the next entry in the list.
1270 lf = LIST_NEXT(overlap, lf_link);
1271 LIST_REMOVE(overlap, lf_link);
1272 lf_update_dependancies(state, overlap, TRUE,
1274 lf_free_lock(overlap);
1278 case 4: /* overlap starts before lock */
1280 * Just update the overlap end and
1283 lf_set_end(state, overlap, lock->lf_start - 1,
1285 overlap = LIST_NEXT(overlap, lf_link);
1288 case 5: /* overlap ends after lock */
1290 * Change the start of overlap and
1293 lf_set_start(state, overlap, lock->lf_end + 1,
1300 if (lockf_debug & 1) {
1301 if (lock->lf_type != F_UNLCK)
1302 lf_print("lf_activate_lock: activated", lock);
1304 lf_print("lf_activate_lock: unlocked", lock);
1305 lf_printlist("lf_activate_lock", lock);
1307 #endif /* LOCKF_DEBUG */
1308 if (lock->lf_type != F_UNLCK)
1309 lf_insert_lock(state, lock);
1314 * Cancel a pending lock request, either as a result of a signal or a
1315 * cancel request for an async lock.
1318 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1320 struct lockf_entry_list granted;
1323 * Note it is theoretically possible that cancelling this lock
1324 * may allow some other pending lock to become
1325 * active. Consider this case:
1327 * Owner Action Result Dependencies
1329 * A: lock [0..0] succeeds
1330 * B: lock [2..2] succeeds
1331 * C: lock [1..2] blocked C->B
1332 * D: lock [0..1] blocked C->B,D->A,D->C
1333 * A: unlock [0..0] C->B,D->C
1337 LIST_REMOVE(lock, lf_link);
1340 * Removing out-going edges is simple.
1342 sx_xlock(&lf_owner_graph_lock);
1343 lf_remove_outgoing(lock);
1344 sx_xunlock(&lf_owner_graph_lock);
1347 * Removing in-coming edges may allow some other lock to
1348 * become active - we use lf_update_dependancies to figure
1351 LIST_INIT(&granted);
1352 lf_update_dependancies(state, lock, TRUE, &granted);
1356 * Feed any newly active locks to lf_activate_lock.
1358 while (!LIST_EMPTY(&granted)) {
1359 lock = LIST_FIRST(&granted);
1360 LIST_REMOVE(lock, lf_link);
1361 lf_activate_lock(state, lock);
1366 * Set a byte-range lock.
1369 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1372 static char lockstr[] = "lockf";
1373 int error, priority, stops_deferred;
1376 if (lockf_debug & 1)
1377 lf_print("lf_setlock", lock);
1378 #endif /* LOCKF_DEBUG */
1384 if (lock->lf_type == F_WRLCK)
1386 if (!(lock->lf_flags & F_NOINTR))
1389 * Scan lock list for this file looking for locks that would block us.
1391 if (lf_getblock(state, lock)) {
1393 * Free the structure and return if nonblocking.
1395 if ((lock->lf_flags & F_WAIT) == 0
1396 && lock->lf_async_task == NULL) {
1403 * For flock type locks, we must first remove
1404 * any shared locks that we hold before we sleep
1405 * waiting for an exclusive lock.
1407 if ((lock->lf_flags & F_FLOCK) &&
1408 lock->lf_type == F_WRLCK) {
1409 lock->lf_type = F_UNLCK;
1410 lf_activate_lock(state, lock);
1411 lock->lf_type = F_WRLCK;
1415 * We are blocked. Create edges to each blocking lock,
1416 * checking for deadlock using the owner graph. For
1417 * simplicity, we run deadlock detection for all
1418 * locks, posix and otherwise.
1420 sx_xlock(&lf_owner_graph_lock);
1421 error = lf_add_outgoing(state, lock);
1422 sx_xunlock(&lf_owner_graph_lock);
1426 if (lockf_debug & 1)
1427 lf_print("lf_setlock: deadlock", lock);
1434 * We have added edges to everything that blocks
1435 * us. Sleep until they all go away.
1437 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1439 if (lockf_debug & 1) {
1440 struct lockf_edge *e;
1441 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1442 lf_print("lf_setlock: blocking on", e->le_to);
1443 lf_printlist("lf_setlock", e->le_to);
1446 #endif /* LOCKF_DEBUG */
1448 if ((lock->lf_flags & F_WAIT) == 0) {
1450 * The caller requested async notification -
1451 * this callback happens when the blocking
1452 * lock is released, allowing the caller to
1453 * make another attempt to take the lock.
1455 *cookiep = (void *) lock;
1456 error = EINPROGRESS;
1461 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1462 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1463 sigallowstop(stops_deferred);
1464 if (lf_free_lock(lock)) {
1470 * We may have been awakened by a signal and/or by a
1471 * debugger continuing us (in which cases we must
1472 * remove our lock graph edges) and/or by another
1473 * process releasing a lock (in which case our edges
1474 * have already been removed and we have been moved to
1475 * the active list). We may also have been woken by
1476 * lf_purgelocks which we report to the caller as
1477 * EINTR. In that case, lf_purgelocks will have
1478 * removed our lock graph edges.
1480 * Note that it is possible to receive a signal after
1481 * we were successfully woken (and moved to the active
1482 * list) but before we resumed execution. In this
1483 * case, our lf_outedges list will be clear. We
1484 * pretend there was no error.
1486 * Note also, if we have been sleeping long enough, we
1487 * may now have incoming edges from some newer lock
1488 * which is waiting behind us in the queue.
1490 if (lock->lf_flags & F_INTR) {
1495 if (LIST_EMPTY(&lock->lf_outedges)) {
1498 lf_cancel_lock(state, lock);
1502 if (lockf_debug & 1) {
1503 lf_print("lf_setlock: granted", lock);
1509 * It looks like we are going to grant the lock. First add
1510 * edges from any currently pending lock that the new lock
1513 error = lf_add_incoming(state, lock);
1516 if (lockf_debug & 1)
1517 lf_print("lf_setlock: deadlock", lock);
1524 * No blocks!! Add the lock. Note that we will
1525 * downgrade or upgrade any overlapping locks this
1526 * process already owns.
1528 lf_activate_lock(state, lock);
1535 * Remove a byte-range lock on an inode.
1537 * Generally, find the lock (or an overlap to that lock)
1538 * and remove it (or shrink it), then wakeup anyone we can.
1541 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1543 struct lockf_entry *overlap;
1545 overlap = LIST_FIRST(&state->ls_active);
1547 if (overlap == NOLOCKF)
1550 if (unlock->lf_type != F_UNLCK)
1551 panic("lf_clearlock: bad type");
1552 if (lockf_debug & 1)
1553 lf_print("lf_clearlock", unlock);
1554 #endif /* LOCKF_DEBUG */
1556 lf_activate_lock(state, unlock);
1562 * Check whether there is a blocking lock, and if so return its
1566 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1568 struct lockf_entry *block;
1571 if (lockf_debug & 1)
1572 lf_print("lf_getlock", lock);
1573 #endif /* LOCKF_DEBUG */
1575 if ((block = lf_getblock(state, lock))) {
1576 fl->l_type = block->lf_type;
1577 fl->l_whence = SEEK_SET;
1578 fl->l_start = block->lf_start;
1579 if (block->lf_end == OFF_MAX)
1582 fl->l_len = block->lf_end - block->lf_start + 1;
1583 fl->l_pid = block->lf_owner->lo_pid;
1584 fl->l_sysid = block->lf_owner->lo_sysid;
1586 fl->l_type = F_UNLCK;
1592 * Cancel an async lock request.
1595 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1597 struct lockf_entry *reallock;
1600 * We need to match this request with an existing lock
1603 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1604 if ((void *) reallock == cookie) {
1606 * Double-check that this lock looks right
1607 * (maybe use a rolling ID for the cancel
1610 if (!(reallock->lf_vnode == lock->lf_vnode
1611 && reallock->lf_start == lock->lf_start
1612 && reallock->lf_end == lock->lf_end)) {
1617 * Make sure this lock was async and then just
1618 * remove it from its wait lists.
1620 if (!reallock->lf_async_task) {
1625 * Note that since any other thread must take
1626 * state->ls_lock before it can possibly
1627 * trigger the async callback, we are safe
1628 * from a race with lf_wakeup_lock, i.e. we
1629 * can free the lock (actually our caller does
1632 lf_cancel_lock(state, reallock);
1638 * We didn't find a matching lock - not much we can do here.
1644 * Walk the list of locks for an inode and
1645 * return the first blocking lock.
1647 static struct lockf_entry *
1648 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1650 struct lockf_entry *overlap;
1652 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1654 * We may assume that the active list is sorted by
1657 if (overlap->lf_start > lock->lf_end)
1659 if (!lf_blocks(lock, overlap))
1667 * Walk the list of locks for an inode to find an overlapping lock (if
1668 * any) and return a classification of that overlap.
1671 * *overlap The place in the lock list to start looking
1672 * lock The lock which is being tested
1673 * type Pass 'SELF' to test only locks with the same
1674 * owner as lock, or 'OTHER' to test only locks
1675 * with a different owner
1677 * Returns one of six values:
1679 * 1) overlap == lock
1680 * 2) overlap contains lock
1681 * 3) lock contains overlap
1682 * 4) overlap starts before lock
1683 * 5) overlap ends after lock
1685 * If there is an overlapping lock, '*overlap' is set to point at the
1688 * NOTE: this returns only the FIRST overlapping lock. There
1689 * may be more than one.
1692 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1694 struct lockf_entry *lf;
1698 if ((*overlap) == NOLOCKF) {
1702 if (lockf_debug & 2)
1703 lf_print("lf_findoverlap: looking for overlap in", lock);
1704 #endif /* LOCKF_DEBUG */
1705 start = lock->lf_start;
1710 if (lf->lf_start > end)
1712 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1713 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1714 *overlap = LIST_NEXT(lf, lf_link);
1718 if (lockf_debug & 2)
1719 lf_print("\tchecking", lf);
1720 #endif /* LOCKF_DEBUG */
1722 * OK, check for overlap
1726 * 1) overlap == lock
1727 * 2) overlap contains lock
1728 * 3) lock contains overlap
1729 * 4) overlap starts before lock
1730 * 5) overlap ends after lock
1732 if (start > lf->lf_end) {
1735 if (lockf_debug & 2)
1736 printf("no overlap\n");
1737 #endif /* LOCKF_DEBUG */
1738 *overlap = LIST_NEXT(lf, lf_link);
1741 if (lf->lf_start == start && lf->lf_end == end) {
1744 if (lockf_debug & 2)
1745 printf("overlap == lock\n");
1746 #endif /* LOCKF_DEBUG */
1750 if (lf->lf_start <= start && lf->lf_end >= end) {
1753 if (lockf_debug & 2)
1754 printf("overlap contains lock\n");
1755 #endif /* LOCKF_DEBUG */
1759 if (start <= lf->lf_start && end >= lf->lf_end) {
1762 if (lockf_debug & 2)
1763 printf("lock contains overlap\n");
1764 #endif /* LOCKF_DEBUG */
1768 if (lf->lf_start < start && lf->lf_end >= start) {
1771 if (lockf_debug & 2)
1772 printf("overlap starts before lock\n");
1773 #endif /* LOCKF_DEBUG */
1777 if (lf->lf_start > start && lf->lf_end > end) {
1780 if (lockf_debug & 2)
1781 printf("overlap ends after lock\n");
1782 #endif /* LOCKF_DEBUG */
1786 panic("lf_findoverlap: default");
1792 * Split an the existing 'lock1', based on the extent of the lock
1793 * described by 'lock2'. The existing lock should cover 'lock2'
1796 * Any pending locks which have been been unblocked are added to
1800 lf_split(struct lockf *state, struct lockf_entry *lock1,
1801 struct lockf_entry *lock2, struct lockf_entry_list *granted)
1803 struct lockf_entry *splitlock;
1806 if (lockf_debug & 2) {
1807 lf_print("lf_split", lock1);
1808 lf_print("splitting from", lock2);
1810 #endif /* LOCKF_DEBUG */
1812 * Check to see if we don't need to split at all.
1814 if (lock1->lf_start == lock2->lf_start) {
1815 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1818 if (lock1->lf_end == lock2->lf_end) {
1819 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1823 * Make a new lock consisting of the last part of
1824 * the encompassing lock.
1826 splitlock = lf_alloc_lock(lock1->lf_owner);
1827 memcpy(splitlock, lock1, sizeof *splitlock);
1828 splitlock->lf_refs = 1;
1829 if (splitlock->lf_flags & F_REMOTE)
1830 vref(splitlock->lf_vnode);
1833 * This cannot cause a deadlock since any edges we would add
1834 * to splitlock already exist in lock1. We must be sure to add
1835 * necessary dependencies to splitlock before we reduce lock1
1836 * otherwise we may accidentally grant a pending lock that
1837 * was blocked by the tail end of lock1.
1839 splitlock->lf_start = lock2->lf_end + 1;
1840 LIST_INIT(&splitlock->lf_outedges);
1841 LIST_INIT(&splitlock->lf_inedges);
1842 lf_add_incoming(state, splitlock);
1844 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1847 * OK, now link it in
1849 lf_insert_lock(state, splitlock);
1853 STAILQ_ENTRY(lockdesc) link;
1857 STAILQ_HEAD(lockdesclist, lockdesc);
1860 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1863 struct lockf_entry *lf;
1864 struct lockdesc *ldesc;
1865 struct lockdesclist locks;
1869 * In order to keep the locking simple, we iterate over the
1870 * active lock lists to build a list of locks that need
1871 * releasing. We then call the iterator for each one in turn.
1873 * We take an extra reference to the vnode for the duration to
1874 * make sure it doesn't go away before we are finished.
1876 STAILQ_INIT(&locks);
1877 sx_xlock(&lf_lock_states_lock);
1878 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1879 sx_xlock(&ls->ls_lock);
1880 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1881 if (lf->lf_owner->lo_sysid != sysid)
1884 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1886 ldesc->vp = lf->lf_vnode;
1888 ldesc->fl.l_start = lf->lf_start;
1889 if (lf->lf_end == OFF_MAX)
1890 ldesc->fl.l_len = 0;
1893 lf->lf_end - lf->lf_start + 1;
1894 ldesc->fl.l_whence = SEEK_SET;
1895 ldesc->fl.l_type = F_UNLCK;
1896 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1897 ldesc->fl.l_sysid = sysid;
1898 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1900 sx_xunlock(&ls->ls_lock);
1902 sx_xunlock(&lf_lock_states_lock);
1905 * Call the iterator function for each lock in turn. If the
1906 * iterator returns an error code, just free the rest of the
1907 * lockdesc structures.
1910 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1911 STAILQ_REMOVE_HEAD(&locks, link);
1913 error = fn(ldesc->vp, &ldesc->fl, arg);
1915 free(ldesc, M_LOCKF);
1922 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1925 struct lockf_entry *lf;
1926 struct lockdesc *ldesc;
1927 struct lockdesclist locks;
1931 * In order to keep the locking simple, we iterate over the
1932 * active lock lists to build a list of locks that need
1933 * releasing. We then call the iterator for each one in turn.
1935 * We take an extra reference to the vnode for the duration to
1936 * make sure it doesn't go away before we are finished.
1938 STAILQ_INIT(&locks);
1945 MPASS(ls->ls_threads >= 0);
1949 sx_xlock(&ls->ls_lock);
1950 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1951 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1953 ldesc->vp = lf->lf_vnode;
1955 ldesc->fl.l_start = lf->lf_start;
1956 if (lf->lf_end == OFF_MAX)
1957 ldesc->fl.l_len = 0;
1960 lf->lf_end - lf->lf_start + 1;
1961 ldesc->fl.l_whence = SEEK_SET;
1962 ldesc->fl.l_type = F_UNLCK;
1963 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1964 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1965 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1967 sx_xunlock(&ls->ls_lock);
1969 MPASS(ls->ls_threads > 0);
1975 * Call the iterator function for each lock in turn. If the
1976 * iterator returns an error code, just free the rest of the
1977 * lockdesc structures.
1980 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1981 STAILQ_REMOVE_HEAD(&locks, link);
1983 error = fn(ldesc->vp, &ldesc->fl, arg);
1985 free(ldesc, M_LOCKF);
1992 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
1995 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2000 lf_clearremotesys(int sysid)
2003 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2004 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2008 lf_countlocks(int sysid)
2011 struct lock_owner *lo;
2015 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
2016 sx_xlock(&lf_lock_owners[i].lock);
2017 LIST_FOREACH(lo, &lf_lock_owners[i].list, lo_link)
2018 if (lo->lo_sysid == sysid)
2019 count += lo->lo_refs;
2020 sx_xunlock(&lf_lock_owners[i].lock);
2029 * Return non-zero if y is reachable from x using a brute force
2030 * search. If reachable and path is non-null, return the route taken
2034 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2035 struct owner_vertex_list *path)
2037 struct owner_edge *e;
2041 TAILQ_INSERT_HEAD(path, x, v_link);
2045 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2046 if (graph_reaches(e->e_to, y, path)) {
2048 TAILQ_INSERT_HEAD(path, x, v_link);
2056 * Perform consistency checks on the graph. Make sure the values of
2057 * v_order are correct. If checkorder is non-zero, check no vertex can
2058 * reach any other vertex with a smaller order.
2061 graph_check(struct owner_graph *g, int checkorder)
2065 for (i = 0; i < g->g_size; i++) {
2066 if (!g->g_vertices[i]->v_owner)
2068 KASSERT(g->g_vertices[i]->v_order == i,
2069 ("lock graph vertices disordered"));
2071 for (j = 0; j < i; j++) {
2072 if (!g->g_vertices[j]->v_owner)
2074 KASSERT(!graph_reaches(g->g_vertices[i],
2075 g->g_vertices[j], NULL),
2076 ("lock graph vertices disordered"));
2083 graph_print_vertices(struct owner_vertex_list *set)
2085 struct owner_vertex *v;
2088 TAILQ_FOREACH(v, set, v_link) {
2089 printf("%d:", v->v_order);
2090 lf_print_owner(v->v_owner);
2091 if (TAILQ_NEXT(v, v_link))
2100 * Calculate the sub-set of vertices v from the affected region [y..x]
2101 * where v is reachable from y. Return -1 if a loop was detected
2102 * (i.e. x is reachable from y, otherwise the number of vertices in
2106 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2107 struct owner_vertex *y, struct owner_vertex_list *delta)
2110 struct owner_vertex *v;
2111 struct owner_edge *e;
2115 * We start with a set containing just y. Then for each vertex
2116 * v in the set so far unprocessed, we add each vertex that v
2117 * has an out-edge to and that is within the affected region
2118 * [y..x]. If we see the vertex x on our travels, stop
2122 TAILQ_INSERT_TAIL(delta, y, v_link);
2127 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2130 if (e->e_to->v_order < x->v_order
2131 && e->e_to->v_gen != gen) {
2132 e->e_to->v_gen = gen;
2133 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2137 v = TAILQ_NEXT(v, v_link);
2144 * Calculate the sub-set of vertices v from the affected region [y..x]
2145 * where v reaches x. Return the number of vertices in this subset.
2148 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2149 struct owner_vertex *y, struct owner_vertex_list *delta)
2152 struct owner_vertex *v;
2153 struct owner_edge *e;
2157 * We start with a set containing just x. Then for each vertex
2158 * v in the set so far unprocessed, we add each vertex that v
2159 * has an in-edge from and that is within the affected region
2163 TAILQ_INSERT_TAIL(delta, x, v_link);
2168 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2169 if (e->e_from->v_order > y->v_order
2170 && e->e_from->v_gen != gen) {
2171 e->e_from->v_gen = gen;
2172 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2176 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2183 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2185 struct owner_vertex *v;
2188 TAILQ_FOREACH(v, set, v_link) {
2190 i > 0 && indices[i - 1] > v->v_order; i--)
2192 for (j = n - 1; j >= i; j--)
2193 indices[j + 1] = indices[j];
2194 indices[i] = v->v_order;
2202 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2203 struct owner_vertex_list *set)
2205 struct owner_vertex *v, *vlowest;
2207 while (!TAILQ_EMPTY(set)) {
2209 TAILQ_FOREACH(v, set, v_link) {
2210 if (!vlowest || v->v_order < vlowest->v_order)
2213 TAILQ_REMOVE(set, vlowest, v_link);
2214 vlowest->v_order = indices[nextunused];
2215 g->g_vertices[vlowest->v_order] = vlowest;
2219 return (nextunused);
2223 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2224 struct owner_vertex *y)
2226 struct owner_edge *e;
2227 struct owner_vertex_list deltaF, deltaB;
2232 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2234 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2242 if (lockf_debug & 8) {
2243 printf("adding edge %d:", x->v_order);
2244 lf_print_owner(x->v_owner);
2245 printf(" -> %d:", y->v_order);
2246 lf_print_owner(y->v_owner);
2250 if (y->v_order < x->v_order) {
2252 * The new edge violates the order. First find the set
2253 * of affected vertices reachable from y (deltaF) and
2254 * the set of affect vertices affected that reach x
2255 * (deltaB), using the graph generation number to
2256 * detect whether we have visited a given vertex
2257 * already. We re-order the graph so that each vertex
2258 * in deltaB appears before each vertex in deltaF.
2260 * If x is a member of deltaF, then the new edge would
2261 * create a cycle. Otherwise, we may assume that
2262 * deltaF and deltaB are disjoint.
2265 if (g->g_gen == 0) {
2269 for (vi = 0; vi < g->g_size; vi++) {
2270 g->g_vertices[vi]->v_gen = 0;
2274 nF = graph_delta_forward(g, x, y, &deltaF);
2277 if (lockf_debug & 8) {
2278 struct owner_vertex_list path;
2279 printf("deadlock: ");
2281 graph_reaches(y, x, &path);
2282 graph_print_vertices(&path);
2289 if (lockf_debug & 8) {
2290 printf("re-ordering graph vertices\n");
2291 printf("deltaF = ");
2292 graph_print_vertices(&deltaF);
2296 nB = graph_delta_backward(g, x, y, &deltaB);
2299 if (lockf_debug & 8) {
2300 printf("deltaB = ");
2301 graph_print_vertices(&deltaB);
2306 * We first build a set of vertex indices (vertex
2307 * order values) that we may use, then we re-assign
2308 * orders first to those vertices in deltaB, then to
2309 * deltaF. Note that the contents of deltaF and deltaB
2310 * may be partially disordered - we perform an
2311 * insertion sort while building our index set.
2313 indices = g->g_indexbuf;
2314 n = graph_add_indices(indices, 0, &deltaF);
2315 graph_add_indices(indices, n, &deltaB);
2318 * We must also be sure to maintain the relative
2319 * ordering of deltaF and deltaB when re-assigning
2320 * vertices. We do this by iteratively removing the
2321 * lowest ordered element from the set and assigning
2322 * it the next value from our new ordering.
2324 i = graph_assign_indices(g, indices, 0, &deltaB);
2325 graph_assign_indices(g, indices, i, &deltaF);
2328 if (lockf_debug & 8) {
2329 struct owner_vertex_list set;
2331 for (i = 0; i < nB + nF; i++)
2332 TAILQ_INSERT_TAIL(&set,
2333 g->g_vertices[indices[i]], v_link);
2334 printf("new ordering = ");
2335 graph_print_vertices(&set);
2340 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2343 if (lockf_debug & 8) {
2344 graph_check(g, TRUE);
2348 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2350 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2351 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2360 * Remove an edge x->y from the graph.
2363 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2364 struct owner_vertex *y)
2366 struct owner_edge *e;
2368 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2370 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2374 KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2377 if (e->e_refs == 0) {
2379 if (lockf_debug & 8) {
2380 printf("removing edge %d:", x->v_order);
2381 lf_print_owner(x->v_owner);
2382 printf(" -> %d:", y->v_order);
2383 lf_print_owner(y->v_owner);
2387 LIST_REMOVE(e, e_outlink);
2388 LIST_REMOVE(e, e_inlink);
2394 * Allocate a vertex from the free list. Return ENOMEM if there are
2397 static struct owner_vertex *
2398 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2400 struct owner_vertex *v;
2402 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2404 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2405 if (g->g_size == g->g_space) {
2406 g->g_vertices = realloc(g->g_vertices,
2407 2 * g->g_space * sizeof(struct owner_vertex *),
2409 free(g->g_indexbuf, M_LOCKF);
2410 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2412 g->g_space = 2 * g->g_space;
2414 v->v_order = g->g_size;
2415 v->v_gen = g->g_gen;
2416 g->g_vertices[g->g_size] = v;
2419 LIST_INIT(&v->v_outedges);
2420 LIST_INIT(&v->v_inedges);
2427 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2429 struct owner_vertex *w;
2432 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2434 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2435 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2438 * Remove from the graph's array and close up the gap,
2439 * renumbering the other vertices.
2441 for (i = v->v_order + 1; i < g->g_size; i++) {
2442 w = g->g_vertices[i];
2444 g->g_vertices[i - 1] = w;
2451 static struct owner_graph *
2452 graph_init(struct owner_graph *g)
2455 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2459 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2465 struct kinfo_lockf_linked {
2466 struct kinfo_lockf kl;
2468 STAILQ_ENTRY(kinfo_lockf_linked) link;
2472 vfs_report_lockf(struct mount *mp, struct sbuf *sb)
2475 struct lockf_entry *lf;
2476 struct kinfo_lockf_linked *klf;
2478 struct ucred *ucred;
2479 char *fullpath, *freepath;
2481 STAILQ_HEAD(, kinfo_lockf_linked) locks;
2484 STAILQ_INIT(&locks);
2485 sx_slock(&lf_lock_states_lock);
2486 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
2487 sx_slock(&ls->ls_lock);
2488 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
2490 if (VN_IS_DOOMED(vp) || vp->v_mount != mp)
2493 klf = malloc(sizeof(struct kinfo_lockf_linked),
2494 M_LOCKF, M_WAITOK | M_ZERO);
2496 klf->kl.kl_structsize = sizeof(struct kinfo_lockf);
2497 klf->kl.kl_start = lf->lf_start;
2498 klf->kl.kl_len = lf->lf_end == OFF_MAX ? 0 :
2499 lf->lf_end - lf->lf_start + 1;
2500 klf->kl.kl_rw = lf->lf_type == F_RDLCK ?
2501 KLOCKF_RW_READ : KLOCKF_RW_WRITE;
2502 if (lf->lf_owner->lo_sysid != 0) {
2503 klf->kl.kl_pid = lf->lf_owner->lo_pid;
2504 klf->kl.kl_sysid = lf->lf_owner->lo_sysid;
2505 klf->kl.kl_type = KLOCKF_TYPE_REMOTE;
2506 } else if (lf->lf_owner->lo_pid == -1) {
2507 klf->kl.kl_pid = -1;
2508 klf->kl.kl_sysid = 0;
2509 klf->kl.kl_type = KLOCKF_TYPE_FLOCK;
2511 klf->kl.kl_pid = lf->lf_owner->lo_pid;
2512 klf->kl.kl_sysid = 0;
2513 klf->kl.kl_type = KLOCKF_TYPE_PID;
2515 STAILQ_INSERT_TAIL(&locks, klf, link);
2517 sx_sunlock(&ls->ls_lock);
2519 sx_sunlock(&lf_lock_states_lock);
2522 ucred = curthread->td_ucred;
2523 while ((klf = STAILQ_FIRST(&locks)) != NULL) {
2524 STAILQ_REMOVE_HEAD(&locks, link);
2526 if (gerror == 0 && vn_lock(vp, LK_SHARED) == 0) {
2527 error = prison_canseemount(ucred, vp->v_mount);
2529 error = VOP_STAT(vp, &stt, ucred, NOCRED);
2532 klf->kl.kl_file_fsid = stt.st_dev;
2533 klf->kl.kl_file_rdev = stt.st_rdev;
2534 klf->kl.kl_file_fileid = stt.st_ino;
2537 error = vn_fullpath(vp, &fullpath, &freepath);
2539 strlcpy(klf->kl.kl_path, fullpath,
2540 sizeof(klf->kl.kl_path));
2541 free(freepath, M_TEMP);
2542 if (sbuf_bcat(sb, &klf->kl,
2543 klf->kl.kl_structsize) != 0) {
2544 gerror = sbuf_error(sb);
2556 sysctl_kern_lockf_run(struct sbuf *sb)
2562 mtx_lock(&mountlist_mtx);
2563 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2564 error = vfs_busy(mp, MBF_MNTLSTLOCK);
2567 error = mp->mnt_op->vfs_report_lockf(mp, sb);
2568 mtx_lock(&mountlist_mtx);
2573 mtx_unlock(&mountlist_mtx);
2578 sysctl_kern_lockf(SYSCTL_HANDLER_ARGS)
2583 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_lockf) * 5, req);
2584 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2585 error = sysctl_kern_lockf_run(&sb);
2586 error2 = sbuf_finish(&sb);
2588 return (error != 0 ? error : error2);
2590 SYSCTL_PROC(_kern, KERN_LOCKF, lockf,
2591 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2592 0, 0, sysctl_kern_lockf, "S,lockf",
2593 "Advisory locks table");
2597 * Print description of a lock owner
2600 lf_print_owner(struct lock_owner *lo)
2603 if (lo->lo_flags & F_REMOTE) {
2604 printf("remote pid %d, system %d",
2605 lo->lo_pid, lo->lo_sysid);
2606 } else if (lo->lo_flags & F_FLOCK) {
2607 printf("file %p", lo->lo_id);
2609 printf("local pid %d", lo->lo_pid);
2617 lf_print(char *tag, struct lockf_entry *lock)
2620 printf("%s: lock %p for ", tag, (void *)lock);
2621 lf_print_owner(lock->lf_owner);
2622 printf("\nvnode %p", lock->lf_vnode);
2623 VOP_PRINT(lock->lf_vnode);
2624 printf(" %s, start %jd, end ",
2625 lock->lf_type == F_RDLCK ? "shared" :
2626 lock->lf_type == F_WRLCK ? "exclusive" :
2627 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2628 (intmax_t)lock->lf_start);
2629 if (lock->lf_end == OFF_MAX)
2632 printf("%jd", (intmax_t)lock->lf_end);
2633 if (!LIST_EMPTY(&lock->lf_outedges))
2634 printf(" block %p\n",
2635 (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2641 lf_printlist(char *tag, struct lockf_entry *lock)
2643 struct lockf_entry *lf, *blk;
2644 struct lockf_edge *e;
2646 printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode);
2647 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2648 printf("\tlock %p for ",(void *)lf);
2649 lf_print_owner(lock->lf_owner);
2650 printf(", %s, start %jd, end %jd",
2651 lf->lf_type == F_RDLCK ? "shared" :
2652 lf->lf_type == F_WRLCK ? "exclusive" :
2653 lf->lf_type == F_UNLCK ? "unlock" :
2654 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2655 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2657 printf("\n\t\tlock request %p for ", (void *)blk);
2658 lf_print_owner(blk->lf_owner);
2659 printf(", %s, start %jd, end %jd",
2660 blk->lf_type == F_RDLCK ? "shared" :
2661 blk->lf_type == F_WRLCK ? "exclusive" :
2662 blk->lf_type == F_UNLCK ? "unlock" :
2663 "unknown", (intmax_t)blk->lf_start,
2664 (intmax_t)blk->lf_end);
2665 if (!LIST_EMPTY(&blk->lf_inedges))
2666 panic("lf_printlist: bad list");
2671 #endif /* LOCKF_DEBUG */