]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - cddl/contrib/opensolaris/lib/libuutil/common/uu_list.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / cddl / contrib / opensolaris / lib / libuutil / common / uu_list.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26
27 #pragma ident   "%Z%%M% %I%     %E% SMI"
28
29 #include "libuutil_common.h"
30
31 #include <stdlib.h>
32 #include <string.h>
33 #include <unistd.h>
34 #include <sys/time.h>
35
36 #define ELEM_TO_NODE(lp, e) \
37         ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
38
39 #define NODE_TO_ELEM(lp, n) \
40         ((void *)((uintptr_t)(n) - (lp)->ul_offset))
41
42 /*
43  * uu_list_index_ts define a location for insertion.  They are simply a
44  * pointer to the object after the insertion point.  We store a mark
45  * in the low-bits of the index, to help prevent mistakes.
46  *
47  * When debugging, the index mark changes on every insert and delete, to
48  * catch stale references.
49  */
50 #define INDEX_MAX               (sizeof (uintptr_t) - 1)
51 #define INDEX_NEXT(m)           (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
52
53 #define INDEX_TO_NODE(i)        ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
54 #define NODE_TO_INDEX(p, n)     (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
55 #define INDEX_VALID(p, i)       (((i) & INDEX_MAX) == (p)->ul_index)
56 #define INDEX_CHECK(i)          (((i) & INDEX_MAX) != 0)
57
58 #define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
59
60 static uu_list_pool_t   uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
61 static pthread_mutex_t  uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
62
63 uu_list_pool_t *
64 uu_list_pool_create(const char *name, size_t objsize,
65     size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
66 {
67         uu_list_pool_t *pp, *next, *prev;
68
69         if (name == NULL ||
70             uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
71             nodeoffset + sizeof (uu_list_node_t) > objsize) {
72                 uu_set_error(UU_ERROR_INVALID_ARGUMENT);
73                 return (NULL);
74         }
75
76         if (flags & ~UU_LIST_POOL_DEBUG) {
77                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
78                 return (NULL);
79         }
80
81         pp = uu_zalloc(sizeof (uu_list_pool_t));
82         if (pp == NULL) {
83                 uu_set_error(UU_ERROR_NO_MEMORY);
84                 return (NULL);
85         }
86
87         (void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
88         pp->ulp_nodeoffset = nodeoffset;
89         pp->ulp_objsize = objsize;
90         pp->ulp_cmp = compare_func;
91         if (flags & UU_LIST_POOL_DEBUG)
92                 pp->ulp_debug = 1;
93         pp->ulp_last_index = 0;
94
95         (void) pthread_mutex_init(&pp->ulp_lock, NULL);
96
97         pp->ulp_null_list.ul_next_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
98         pp->ulp_null_list.ul_prev_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
99
100         (void) pthread_mutex_lock(&uu_lpool_list_lock);
101         pp->ulp_next = next = &uu_null_lpool;
102         pp->ulp_prev = prev = next->ulp_prev;
103         next->ulp_prev = pp;
104         prev->ulp_next = pp;
105         (void) pthread_mutex_unlock(&uu_lpool_list_lock);
106
107         return (pp);
108 }
109
110 void
111 uu_list_pool_destroy(uu_list_pool_t *pp)
112 {
113         if (pp->ulp_debug) {
114                 if (pp->ulp_null_list.ul_next_enc !=
115                     UU_PTR_ENCODE(&pp->ulp_null_list) ||
116                     pp->ulp_null_list.ul_prev_enc !=
117                     UU_PTR_ENCODE(&pp->ulp_null_list)) {
118                         uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
119                             "outstanding lists, or is corrupt.\n",
120                             sizeof (pp->ulp_name), pp->ulp_name, pp);
121                 }
122         }
123         (void) pthread_mutex_lock(&uu_lpool_list_lock);
124         pp->ulp_next->ulp_prev = pp->ulp_prev;
125         pp->ulp_prev->ulp_next = pp->ulp_next;
126         (void) pthread_mutex_unlock(&uu_lpool_list_lock);
127         pp->ulp_prev = NULL;
128         pp->ulp_next = NULL;
129         uu_free(pp);
130 }
131
132 void
133 uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
134 {
135         uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
136
137         if (pp->ulp_debug) {
138                 uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
139                 if (offset + sizeof (*np) > pp->ulp_objsize) {
140                         uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
141                             "offset %ld doesn't fit in object (size %ld)\n",
142                             base, np, pp, pp->ulp_name, offset,
143                             pp->ulp_objsize);
144                 }
145                 if (offset != pp->ulp_nodeoffset) {
146                         uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
147                             "offset %ld doesn't match pool's offset (%ld)\n",
148                             base, np, pp, pp->ulp_name, offset,
149                             pp->ulp_objsize);
150                 }
151         }
152         np->uln_next = POOL_TO_MARKER(pp);
153         np->uln_prev = NULL;
154 }
155
156 void
157 uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
158 {
159         uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
160
161         if (pp->ulp_debug) {
162                 if (np->uln_next == NULL &&
163                     np->uln_prev == NULL) {
164                         uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
165                             "node already finied\n",
166                             base, np_arg, pp, pp->ulp_name);
167                 }
168                 if (np->uln_next != POOL_TO_MARKER(pp) ||
169                     np->uln_prev != NULL) {
170                         uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
171                             "node corrupt or on list\n",
172                             base, np_arg, pp, pp->ulp_name);
173                 }
174         }
175         np->uln_next = NULL;
176         np->uln_prev = NULL;
177 }
178
179 uu_list_t *
180 uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
181 {
182         uu_list_t *lp, *next, *prev;
183
184         if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
185                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
186                 return (NULL);
187         }
188
189         if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
190                 if (pp->ulp_debug)
191                         uu_panic("uu_list_create(%p, ...): requested "
192                             "UU_LIST_SORTED, but pool has no comparison func\n",
193                             pp);
194                 uu_set_error(UU_ERROR_NOT_SUPPORTED);
195                 return (NULL);
196         }
197
198         lp = uu_zalloc(sizeof (*lp));
199         if (lp == NULL) {
200                 uu_set_error(UU_ERROR_NO_MEMORY);
201                 return (NULL);
202         }
203
204         lp->ul_pool = pp;
205         lp->ul_parent_enc = UU_PTR_ENCODE(parent);
206         lp->ul_offset = pp->ulp_nodeoffset;
207         lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
208         lp->ul_sorted = (flags & UU_LIST_SORTED);
209         lp->ul_numnodes = 0;
210         lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
211
212         lp->ul_null_node.uln_next = &lp->ul_null_node;
213         lp->ul_null_node.uln_prev = &lp->ul_null_node;
214
215         lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
216         lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
217
218         (void) pthread_mutex_lock(&pp->ulp_lock);
219         next = &pp->ulp_null_list;
220         prev = UU_PTR_DECODE(next->ul_prev_enc);
221         lp->ul_next_enc = UU_PTR_ENCODE(next);
222         lp->ul_prev_enc = UU_PTR_ENCODE(prev);
223         next->ul_prev_enc = UU_PTR_ENCODE(lp);
224         prev->ul_next_enc = UU_PTR_ENCODE(lp);
225         (void) pthread_mutex_unlock(&pp->ulp_lock);
226
227         return (lp);
228 }
229
230 void
231 uu_list_destroy(uu_list_t *lp)
232 {
233         uu_list_pool_t *pp = lp->ul_pool;
234
235         if (lp->ul_debug) {
236                 if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
237                     lp->ul_null_node.uln_prev != &lp->ul_null_node) {
238                         uu_panic("uu_list_destroy(%p):  list not empty\n",
239                             lp);
240                 }
241                 if (lp->ul_numnodes != 0) {
242                         uu_panic("uu_list_destroy(%p):  numnodes is nonzero, "
243                             "but list is empty\n", lp);
244                 }
245                 if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
246                     lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
247                         uu_panic("uu_list_destroy(%p):  outstanding walkers\n",
248                             lp);
249                 }
250         }
251
252         (void) pthread_mutex_lock(&pp->ulp_lock);
253         UU_LIST_PTR(lp->ul_next_enc)->ul_prev_enc = lp->ul_prev_enc;
254         UU_LIST_PTR(lp->ul_prev_enc)->ul_next_enc = lp->ul_next_enc;
255         (void) pthread_mutex_unlock(&pp->ulp_lock);
256         lp->ul_prev_enc = UU_PTR_ENCODE(NULL);
257         lp->ul_next_enc = UU_PTR_ENCODE(NULL);
258         lp->ul_pool = NULL;
259         uu_free(lp);
260 }
261
262 static void
263 list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
264     uu_list_node_impl_t *next)
265 {
266         if (lp->ul_debug) {
267                 if (next->uln_prev != prev || prev->uln_next != next)
268                         uu_panic("insert(%p): internal error: %p and %p not "
269                             "neighbors\n", lp, next, prev);
270
271                 if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
272                     np->uln_prev != NULL) {
273                         uu_panic("insert(%p): elem %p node %p corrupt, "
274                             "not initialized, or already in a list.\n",
275                             lp, NODE_TO_ELEM(lp, np), np);
276                 }
277                 /*
278                  * invalidate outstanding uu_list_index_ts.
279                  */
280                 lp->ul_index = INDEX_NEXT(lp->ul_index);
281         }
282         np->uln_next = next;
283         np->uln_prev = prev;
284         next->uln_prev = np;
285         prev->uln_next = np;
286
287         lp->ul_numnodes++;
288 }
289
290 void
291 uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
292 {
293         uu_list_node_impl_t *np;
294
295         np = INDEX_TO_NODE(idx);
296         if (np == NULL)
297                 np = &lp->ul_null_node;
298
299         if (lp->ul_debug) {
300                 if (!INDEX_VALID(lp, idx))
301                         uu_panic("uu_list_insert(%p, %p, %p): %s\n",
302                             lp, elem, idx,
303                             INDEX_CHECK(idx)? "outdated index" :
304                             "invalid index");
305                 if (np->uln_prev == NULL)
306                         uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
307                             "index\n", lp, elem, idx);
308         }
309
310         list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
311 }
312
313 void *
314 uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
315 {
316         int sorted = lp->ul_sorted;
317         uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
318         uu_list_node_impl_t *np;
319
320         if (func == NULL) {
321                 if (out != NULL)
322                         *out = 0;
323                 uu_set_error(UU_ERROR_NOT_SUPPORTED);
324                 return (NULL);
325         }
326         for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
327             np = np->uln_next) {
328                 void *ep = NODE_TO_ELEM(lp, np);
329                 int cmp = func(ep, elem, private);
330                 if (cmp == 0) {
331                         if (out != NULL)
332                                 *out = NODE_TO_INDEX(lp, np);
333                         return (ep);
334                 }
335                 if (sorted && cmp > 0) {
336                         if (out != NULL)
337                                 *out = NODE_TO_INDEX(lp, np);
338                         return (NULL);
339                 }
340         }
341         if (out != NULL)
342                 *out = NODE_TO_INDEX(lp, 0);
343         return (NULL);
344 }
345
346 void *
347 uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
348 {
349         uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
350
351         if (np == NULL)
352                 np = &lp->ul_null_node;
353
354         if (lp->ul_debug) {
355                 if (!INDEX_VALID(lp, idx))
356                         uu_panic("uu_list_nearest_next(%p, %p): %s\n",
357                             lp, idx, INDEX_CHECK(idx)? "outdated index" :
358                             "invalid index");
359                 if (np->uln_prev == NULL)
360                         uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
361                             "index\n", lp, idx);
362         }
363
364         if (np == &lp->ul_null_node)
365                 return (NULL);
366         else
367                 return (NODE_TO_ELEM(lp, np));
368 }
369
370 void *
371 uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
372 {
373         uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
374
375         if (np == NULL)
376                 np = &lp->ul_null_node;
377
378         if (lp->ul_debug) {
379                 if (!INDEX_VALID(lp, idx))
380                         uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
381                             lp, idx, INDEX_CHECK(idx)? "outdated index" :
382                             "invalid index");
383                 if (np->uln_prev == NULL)
384                         uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
385                             "index\n", lp, idx);
386         }
387
388         if ((np = np->uln_prev) == &lp->ul_null_node)
389                 return (NULL);
390         else
391                 return (NODE_TO_ELEM(lp, np));
392 }
393
394 static void
395 list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
396 {
397         uu_list_walk_t *next, *prev;
398
399         int robust = (flags & UU_WALK_ROBUST);
400         int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
401
402         (void) memset(wp, 0, sizeof (*wp));
403         wp->ulw_list = lp;
404         wp->ulw_robust = robust;
405         wp->ulw_dir = direction;
406         if (direction > 0)
407                 wp->ulw_next_result = lp->ul_null_node.uln_next;
408         else
409                 wp->ulw_next_result = lp->ul_null_node.uln_prev;
410
411         if (lp->ul_debug || robust) {
412                 wp->ulw_next = next = &lp->ul_null_walk;
413                 wp->ulw_prev = prev = next->ulw_prev;
414                 next->ulw_prev = wp;
415                 prev->ulw_next = wp;
416         }
417 }
418
419 static uu_list_node_impl_t *
420 list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
421 {
422         uu_list_node_impl_t *np = wp->ulw_next_result;
423         uu_list_node_impl_t *next;
424
425         if (np == &lp->ul_null_node)
426                 return (NULL);
427
428         next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
429
430         wp->ulw_next_result = next;
431         return (np);
432 }
433
434 static void
435 list_walk_fini(uu_list_walk_t *wp)
436 {
437         /* GLXXX debugging? */
438         if (wp->ulw_next != NULL) {
439                 wp->ulw_next->ulw_prev = wp->ulw_prev;
440                 wp->ulw_prev->ulw_next = wp->ulw_next;
441                 wp->ulw_next = NULL;
442                 wp->ulw_prev = NULL;
443         }
444         wp->ulw_list = NULL;
445         wp->ulw_next_result = NULL;
446 }
447
448 uu_list_walk_t *
449 uu_list_walk_start(uu_list_t *lp, uint32_t flags)
450 {
451         uu_list_walk_t *wp;
452
453         if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
454                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
455                 return (NULL);
456         }
457
458         wp = uu_zalloc(sizeof (*wp));
459         if (wp == NULL) {
460                 uu_set_error(UU_ERROR_NO_MEMORY);
461                 return (NULL);
462         }
463
464         list_walk_init(wp, lp, flags);
465         return (wp);
466 }
467
468 void *
469 uu_list_walk_next(uu_list_walk_t *wp)
470 {
471         uu_list_t *lp = wp->ulw_list;
472         uu_list_node_impl_t *np = list_walk_advance(wp, lp);
473
474         if (np == NULL)
475                 return (NULL);
476
477         return (NODE_TO_ELEM(lp, np));
478 }
479
480 void
481 uu_list_walk_end(uu_list_walk_t *wp)
482 {
483         list_walk_fini(wp);
484         uu_free(wp);
485 }
486
487 int
488 uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
489 {
490         uu_list_node_impl_t *np;
491
492         int status = UU_WALK_NEXT;
493
494         int robust = (flags & UU_WALK_ROBUST);
495         int reverse = (flags & UU_WALK_REVERSE);
496
497         if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
498                 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
499                 return (-1);
500         }
501
502         if (lp->ul_debug || robust) {
503                 uu_list_walk_t my_walk;
504                 void *e;
505
506                 list_walk_init(&my_walk, lp, flags);
507                 while (status == UU_WALK_NEXT &&
508                     (e = uu_list_walk_next(&my_walk)) != NULL)
509                         status = (*func)(e, private);
510                 list_walk_fini(&my_walk);
511         } else {
512                 if (!reverse) {
513                         for (np = lp->ul_null_node.uln_next;
514                             status == UU_WALK_NEXT && np != &lp->ul_null_node;
515                             np = np->uln_next) {
516                                 status = (*func)(NODE_TO_ELEM(lp, np), private);
517                         }
518                 } else {
519                         for (np = lp->ul_null_node.uln_prev;
520                             status == UU_WALK_NEXT && np != &lp->ul_null_node;
521                             np = np->uln_prev) {
522                                 status = (*func)(NODE_TO_ELEM(lp, np), private);
523                         }
524                 }
525         }
526         if (status >= 0)
527                 return (0);
528         uu_set_error(UU_ERROR_CALLBACK_FAILED);
529         return (-1);
530 }
531
532 void
533 uu_list_remove(uu_list_t *lp, void *elem)
534 {
535         uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
536         uu_list_walk_t *wp;
537
538         if (lp->ul_debug) {
539                 if (np->uln_prev == NULL)
540                         uu_panic("uu_list_remove(%p, %p): elem not on list\n",
541                             lp, elem);
542                 /*
543                  * invalidate outstanding uu_list_index_ts.
544                  */
545                 lp->ul_index = INDEX_NEXT(lp->ul_index);
546         }
547
548         /*
549          * robust walkers must be advanced.  In debug mode, non-robust
550          * walkers are also on the list.  If there are any, it's an error.
551          */
552         for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
553             wp = wp->ulw_next) {
554                 if (wp->ulw_robust) {
555                         if (np == wp->ulw_next_result)
556                                 (void) list_walk_advance(wp, lp);
557                 } else if (wp->ulw_next_result != NULL) {
558                         uu_panic("uu_list_remove(%p, %p): active non-robust "
559                             "walker\n", lp, elem);
560                 }
561         }
562
563         np->uln_next->uln_prev = np->uln_prev;
564         np->uln_prev->uln_next = np->uln_next;
565
566         lp->ul_numnodes--;
567
568         np->uln_next = POOL_TO_MARKER(lp->ul_pool);
569         np->uln_prev = NULL;
570 }
571
572 void *
573 uu_list_teardown(uu_list_t *lp, void **cookie)
574 {
575         void *ep;
576
577         /*
578          * XXX: disable list modification until list is empty
579          */
580         if (lp->ul_debug && *cookie != NULL)
581                 uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n", lp,
582                     cookie);
583
584         ep = uu_list_first(lp);
585         if (ep)
586                 uu_list_remove(lp, ep);
587         return (ep);
588 }
589
590 int
591 uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
592 {
593         uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
594
595         if (target == NULL)
596                 np = &lp->ul_null_node;
597
598         if (lp->ul_debug) {
599                 if (np->uln_prev == NULL)
600                         uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
601                             "not currently on a list\n",
602                             lp, target, elem, target);
603         }
604         if (lp->ul_sorted) {
605                 if (lp->ul_debug)
606                         uu_panic("uu_list_insert_before(%p, ...): list is "
607                             "UU_LIST_SORTED\n", lp);
608                 uu_set_error(UU_ERROR_NOT_SUPPORTED);
609                 return (-1);
610         }
611
612         list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
613         return (0);
614 }
615
616 int
617 uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
618 {
619         uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
620
621         if (target == NULL)
622                 np = &lp->ul_null_node;
623
624         if (lp->ul_debug) {
625                 if (np->uln_prev == NULL)
626                         uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
627                             "not currently on a list\n",
628                             lp, target, elem, target);
629         }
630         if (lp->ul_sorted) {
631                 if (lp->ul_debug)
632                         uu_panic("uu_list_insert_after(%p, ...): list is "
633                             "UU_LIST_SORTED\n", lp);
634                 uu_set_error(UU_ERROR_NOT_SUPPORTED);
635                 return (-1);
636         }
637
638         list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
639         return (0);
640 }
641
642 size_t
643 uu_list_numnodes(uu_list_t *lp)
644 {
645         return (lp->ul_numnodes);
646 }
647
648 void *
649 uu_list_first(uu_list_t *lp)
650 {
651         uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
652         if (n == &lp->ul_null_node)
653                 return (NULL);
654         return (NODE_TO_ELEM(lp, n));
655 }
656
657 void *
658 uu_list_last(uu_list_t *lp)
659 {
660         uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
661         if (n == &lp->ul_null_node)
662                 return (NULL);
663         return (NODE_TO_ELEM(lp, n));
664 }
665
666 void *
667 uu_list_next(uu_list_t *lp, void *elem)
668 {
669         uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
670
671         n = n->uln_next;
672         if (n == &lp->ul_null_node)
673                 return (NULL);
674         return (NODE_TO_ELEM(lp, n));
675 }
676
677 void *
678 uu_list_prev(uu_list_t *lp, void *elem)
679 {
680         uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
681
682         n = n->uln_prev;
683         if (n == &lp->ul_null_node)
684                 return (NULL);
685         return (NODE_TO_ELEM(lp, n));
686 }
687
688 /*
689  * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
690  */
691 void
692 uu_list_lockup(void)
693 {
694         uu_list_pool_t *pp;
695
696         (void) pthread_mutex_lock(&uu_lpool_list_lock);
697         for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
698             pp = pp->ulp_next)
699                 (void) pthread_mutex_lock(&pp->ulp_lock);
700 }
701
702 void
703 uu_list_release(void)
704 {
705         uu_list_pool_t *pp;
706
707         for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
708             pp = pp->ulp_next)
709                 (void) pthread_mutex_unlock(&pp->ulp_lock);
710         (void) pthread_mutex_unlock(&uu_lpool_list_lock);
711 }