4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include "libuutil_common.h"
36 #define ELEM_TO_NODE(lp, e) \
37 ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
39 #define NODE_TO_ELEM(lp, n) \
40 ((void *)((uintptr_t)(n) - (lp)->ul_offset))
43 * uu_list_index_ts define a location for insertion. They are simply a
44 * pointer to the object after the insertion point. We store a mark
45 * in the low-bits of the index, to help prevent mistakes.
47 * When debugging, the index mark changes on every insert and delete, to
48 * catch stale references.
50 #define INDEX_MAX (sizeof (uintptr_t) - 1)
51 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
53 #define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
54 #define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
55 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index)
56 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
58 #define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
60 static uu_list_pool_t uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
61 static pthread_mutex_t uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
64 uu_list_pool_create(const char *name, size_t objsize,
65 size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
67 uu_list_pool_t *pp, *next, *prev;
70 uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
71 nodeoffset + sizeof (uu_list_node_t) > objsize) {
72 uu_set_error(UU_ERROR_INVALID_ARGUMENT);
76 if (flags & ~UU_LIST_POOL_DEBUG) {
77 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
81 pp = uu_zalloc(sizeof (uu_list_pool_t));
83 uu_set_error(UU_ERROR_NO_MEMORY);
87 (void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
88 pp->ulp_nodeoffset = nodeoffset;
89 pp->ulp_objsize = objsize;
90 pp->ulp_cmp = compare_func;
91 if (flags & UU_LIST_POOL_DEBUG)
93 pp->ulp_last_index = 0;
95 (void) pthread_mutex_init(&pp->ulp_lock, NULL);
97 pp->ulp_null_list.ul_next_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
98 pp->ulp_null_list.ul_prev_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
100 (void) pthread_mutex_lock(&uu_lpool_list_lock);
101 pp->ulp_next = next = &uu_null_lpool;
102 pp->ulp_prev = prev = next->ulp_prev;
105 (void) pthread_mutex_unlock(&uu_lpool_list_lock);
111 uu_list_pool_destroy(uu_list_pool_t *pp)
114 if (pp->ulp_null_list.ul_next_enc !=
115 UU_PTR_ENCODE(&pp->ulp_null_list) ||
116 pp->ulp_null_list.ul_prev_enc !=
117 UU_PTR_ENCODE(&pp->ulp_null_list)) {
118 uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
119 "outstanding lists, or is corrupt.\n",
120 sizeof (pp->ulp_name), pp->ulp_name, pp);
123 (void) pthread_mutex_lock(&uu_lpool_list_lock);
124 pp->ulp_next->ulp_prev = pp->ulp_prev;
125 pp->ulp_prev->ulp_next = pp->ulp_next;
126 (void) pthread_mutex_unlock(&uu_lpool_list_lock);
133 uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
135 uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
138 uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
139 if (offset + sizeof (*np) > pp->ulp_objsize) {
140 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
141 "offset %ld doesn't fit in object (size %ld)\n",
142 base, np, pp, pp->ulp_name, offset,
145 if (offset != pp->ulp_nodeoffset) {
146 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
147 "offset %ld doesn't match pool's offset (%ld)\n",
148 base, np, pp, pp->ulp_name, offset,
152 np->uln_next = POOL_TO_MARKER(pp);
157 uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
159 uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
162 if (np->uln_next == NULL &&
163 np->uln_prev == NULL) {
164 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
165 "node already finied\n",
166 base, np_arg, pp, pp->ulp_name);
168 if (np->uln_next != POOL_TO_MARKER(pp) ||
169 np->uln_prev != NULL) {
170 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
171 "node corrupt or on list\n",
172 base, np_arg, pp, pp->ulp_name);
180 uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
182 uu_list_t *lp, *next, *prev;
184 if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
185 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
189 if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
191 uu_panic("uu_list_create(%p, ...): requested "
192 "UU_LIST_SORTED, but pool has no comparison func\n",
194 uu_set_error(UU_ERROR_NOT_SUPPORTED);
198 lp = uu_zalloc(sizeof (*lp));
200 uu_set_error(UU_ERROR_NO_MEMORY);
205 lp->ul_parent_enc = UU_PTR_ENCODE(parent);
206 lp->ul_offset = pp->ulp_nodeoffset;
207 lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
208 lp->ul_sorted = (flags & UU_LIST_SORTED);
210 lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
212 lp->ul_null_node.uln_next = &lp->ul_null_node;
213 lp->ul_null_node.uln_prev = &lp->ul_null_node;
215 lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
216 lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
218 (void) pthread_mutex_lock(&pp->ulp_lock);
219 next = &pp->ulp_null_list;
220 prev = UU_PTR_DECODE(next->ul_prev_enc);
221 lp->ul_next_enc = UU_PTR_ENCODE(next);
222 lp->ul_prev_enc = UU_PTR_ENCODE(prev);
223 next->ul_prev_enc = UU_PTR_ENCODE(lp);
224 prev->ul_next_enc = UU_PTR_ENCODE(lp);
225 (void) pthread_mutex_unlock(&pp->ulp_lock);
231 uu_list_destroy(uu_list_t *lp)
233 uu_list_pool_t *pp = lp->ul_pool;
236 if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
237 lp->ul_null_node.uln_prev != &lp->ul_null_node) {
238 uu_panic("uu_list_destroy(%p): list not empty\n",
241 if (lp->ul_numnodes != 0) {
242 uu_panic("uu_list_destroy(%p): numnodes is nonzero, "
243 "but list is empty\n", lp);
245 if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
246 lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
247 uu_panic("uu_list_destroy(%p): outstanding walkers\n",
252 (void) pthread_mutex_lock(&pp->ulp_lock);
253 UU_LIST_PTR(lp->ul_next_enc)->ul_prev_enc = lp->ul_prev_enc;
254 UU_LIST_PTR(lp->ul_prev_enc)->ul_next_enc = lp->ul_next_enc;
255 (void) pthread_mutex_unlock(&pp->ulp_lock);
256 lp->ul_prev_enc = UU_PTR_ENCODE(NULL);
257 lp->ul_next_enc = UU_PTR_ENCODE(NULL);
263 list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
264 uu_list_node_impl_t *next)
267 if (next->uln_prev != prev || prev->uln_next != next)
268 uu_panic("insert(%p): internal error: %p and %p not "
269 "neighbors\n", lp, next, prev);
271 if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
272 np->uln_prev != NULL) {
273 uu_panic("insert(%p): elem %p node %p corrupt, "
274 "not initialized, or already in a list.\n",
275 lp, NODE_TO_ELEM(lp, np), np);
278 * invalidate outstanding uu_list_index_ts.
280 lp->ul_index = INDEX_NEXT(lp->ul_index);
291 uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
293 uu_list_node_impl_t *np;
295 np = INDEX_TO_NODE(idx);
297 np = &lp->ul_null_node;
300 if (!INDEX_VALID(lp, idx))
301 uu_panic("uu_list_insert(%p, %p, %p): %s\n",
303 INDEX_CHECK(idx)? "outdated index" :
305 if (np->uln_prev == NULL)
306 uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
307 "index\n", lp, elem, idx);
310 list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
314 uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
316 int sorted = lp->ul_sorted;
317 uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
318 uu_list_node_impl_t *np;
323 uu_set_error(UU_ERROR_NOT_SUPPORTED);
326 for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
328 void *ep = NODE_TO_ELEM(lp, np);
329 int cmp = func(ep, elem, private);
332 *out = NODE_TO_INDEX(lp, np);
335 if (sorted && cmp > 0) {
337 *out = NODE_TO_INDEX(lp, np);
342 *out = NODE_TO_INDEX(lp, 0);
347 uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
349 uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
352 np = &lp->ul_null_node;
355 if (!INDEX_VALID(lp, idx))
356 uu_panic("uu_list_nearest_next(%p, %p): %s\n",
357 lp, idx, INDEX_CHECK(idx)? "outdated index" :
359 if (np->uln_prev == NULL)
360 uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
364 if (np == &lp->ul_null_node)
367 return (NODE_TO_ELEM(lp, np));
371 uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
373 uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
376 np = &lp->ul_null_node;
379 if (!INDEX_VALID(lp, idx))
380 uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
381 lp, idx, INDEX_CHECK(idx)? "outdated index" :
383 if (np->uln_prev == NULL)
384 uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
388 if ((np = np->uln_prev) == &lp->ul_null_node)
391 return (NODE_TO_ELEM(lp, np));
395 list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
397 uu_list_walk_t *next, *prev;
399 int robust = (flags & UU_WALK_ROBUST);
400 int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
402 (void) memset(wp, 0, sizeof (*wp));
404 wp->ulw_robust = robust;
405 wp->ulw_dir = direction;
407 wp->ulw_next_result = lp->ul_null_node.uln_next;
409 wp->ulw_next_result = lp->ul_null_node.uln_prev;
411 if (lp->ul_debug || robust) {
412 wp->ulw_next = next = &lp->ul_null_walk;
413 wp->ulw_prev = prev = next->ulw_prev;
419 static uu_list_node_impl_t *
420 list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
422 uu_list_node_impl_t *np = wp->ulw_next_result;
423 uu_list_node_impl_t *next;
425 if (np == &lp->ul_null_node)
428 next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
430 wp->ulw_next_result = next;
435 list_walk_fini(uu_list_walk_t *wp)
437 /* GLXXX debugging? */
438 if (wp->ulw_next != NULL) {
439 wp->ulw_next->ulw_prev = wp->ulw_prev;
440 wp->ulw_prev->ulw_next = wp->ulw_next;
445 wp->ulw_next_result = NULL;
449 uu_list_walk_start(uu_list_t *lp, uint32_t flags)
453 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
454 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
458 wp = uu_zalloc(sizeof (*wp));
460 uu_set_error(UU_ERROR_NO_MEMORY);
464 list_walk_init(wp, lp, flags);
469 uu_list_walk_next(uu_list_walk_t *wp)
471 uu_list_t *lp = wp->ulw_list;
472 uu_list_node_impl_t *np = list_walk_advance(wp, lp);
477 return (NODE_TO_ELEM(lp, np));
481 uu_list_walk_end(uu_list_walk_t *wp)
488 uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
490 uu_list_node_impl_t *np;
492 int status = UU_WALK_NEXT;
494 int robust = (flags & UU_WALK_ROBUST);
495 int reverse = (flags & UU_WALK_REVERSE);
497 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
498 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
502 if (lp->ul_debug || robust) {
503 uu_list_walk_t my_walk;
506 list_walk_init(&my_walk, lp, flags);
507 while (status == UU_WALK_NEXT &&
508 (e = uu_list_walk_next(&my_walk)) != NULL)
509 status = (*func)(e, private);
510 list_walk_fini(&my_walk);
513 for (np = lp->ul_null_node.uln_next;
514 status == UU_WALK_NEXT && np != &lp->ul_null_node;
516 status = (*func)(NODE_TO_ELEM(lp, np), private);
519 for (np = lp->ul_null_node.uln_prev;
520 status == UU_WALK_NEXT && np != &lp->ul_null_node;
522 status = (*func)(NODE_TO_ELEM(lp, np), private);
528 uu_set_error(UU_ERROR_CALLBACK_FAILED);
533 uu_list_remove(uu_list_t *lp, void *elem)
535 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
539 if (np->uln_prev == NULL)
540 uu_panic("uu_list_remove(%p, %p): elem not on list\n",
543 * invalidate outstanding uu_list_index_ts.
545 lp->ul_index = INDEX_NEXT(lp->ul_index);
549 * robust walkers must be advanced. In debug mode, non-robust
550 * walkers are also on the list. If there are any, it's an error.
552 for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
554 if (wp->ulw_robust) {
555 if (np == wp->ulw_next_result)
556 (void) list_walk_advance(wp, lp);
557 } else if (wp->ulw_next_result != NULL) {
558 uu_panic("uu_list_remove(%p, %p): active non-robust "
559 "walker\n", lp, elem);
563 np->uln_next->uln_prev = np->uln_prev;
564 np->uln_prev->uln_next = np->uln_next;
568 np->uln_next = POOL_TO_MARKER(lp->ul_pool);
573 uu_list_teardown(uu_list_t *lp, void **cookie)
578 * XXX: disable list modification until list is empty
580 if (lp->ul_debug && *cookie != NULL)
581 uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n", lp,
584 ep = uu_list_first(lp);
586 uu_list_remove(lp, ep);
591 uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
593 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
596 np = &lp->ul_null_node;
599 if (np->uln_prev == NULL)
600 uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
601 "not currently on a list\n",
602 lp, target, elem, target);
606 uu_panic("uu_list_insert_before(%p, ...): list is "
607 "UU_LIST_SORTED\n", lp);
608 uu_set_error(UU_ERROR_NOT_SUPPORTED);
612 list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
617 uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
619 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
622 np = &lp->ul_null_node;
625 if (np->uln_prev == NULL)
626 uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
627 "not currently on a list\n",
628 lp, target, elem, target);
632 uu_panic("uu_list_insert_after(%p, ...): list is "
633 "UU_LIST_SORTED\n", lp);
634 uu_set_error(UU_ERROR_NOT_SUPPORTED);
638 list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
643 uu_list_numnodes(uu_list_t *lp)
645 return (lp->ul_numnodes);
649 uu_list_first(uu_list_t *lp)
651 uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
652 if (n == &lp->ul_null_node)
654 return (NODE_TO_ELEM(lp, n));
658 uu_list_last(uu_list_t *lp)
660 uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
661 if (n == &lp->ul_null_node)
663 return (NODE_TO_ELEM(lp, n));
667 uu_list_next(uu_list_t *lp, void *elem)
669 uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
672 if (n == &lp->ul_null_node)
674 return (NODE_TO_ELEM(lp, n));
678 uu_list_prev(uu_list_t *lp, void *elem)
680 uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
683 if (n == &lp->ul_null_node)
685 return (NODE_TO_ELEM(lp, n));
689 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
696 (void) pthread_mutex_lock(&uu_lpool_list_lock);
697 for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
699 (void) pthread_mutex_lock(&pp->ulp_lock);
703 uu_list_release(void)
707 for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
709 (void) pthread_mutex_unlock(&pp->ulp_lock);
710 (void) pthread_mutex_unlock(&uu_lpool_list_lock);