4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include "libuutil_common.h"
35 static uu_avl_pool_t uu_null_apool = { &uu_null_apool, &uu_null_apool };
36 static pthread_mutex_t uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER;
39 * The index mark change on every insert and delete, to catch stale
42 * We leave the low bit alone, since the avl code uses it.
44 #define INDEX_MAX (sizeof (uintptr_t) - 2)
45 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
47 #define INDEX_DECODE(i) ((i) & ~INDEX_MAX)
48 #define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index)
49 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index)
50 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
53 * When an element is inactive (not in a tree), we keep a marked pointer to
54 * its containing pool in its first word, and a NULL pointer in its second.
56 * On insert, we use these to verify that it comes from the correct pool.
58 #define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \
59 (pp)->uap_nodeoffset))
61 #define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
63 #define DEAD_MARKER 0xc4
66 uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset,
67 uu_compare_fn_t *compare_func, uint32_t flags)
69 uu_avl_pool_t *pp, *next, *prev;
72 uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
73 nodeoffset + sizeof (uu_avl_node_t) > objsize ||
74 compare_func == NULL) {
75 uu_set_error(UU_ERROR_INVALID_ARGUMENT);
79 if (flags & ~UU_AVL_POOL_DEBUG) {
80 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
84 pp = uu_zalloc(sizeof (uu_avl_pool_t));
86 uu_set_error(UU_ERROR_NO_MEMORY);
90 (void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name));
91 pp->uap_nodeoffset = nodeoffset;
92 pp->uap_objsize = objsize;
93 pp->uap_cmp = compare_func;
94 if (flags & UU_AVL_POOL_DEBUG)
96 pp->uap_last_index = 0;
98 (void) pthread_mutex_init(&pp->uap_lock, NULL);
100 pp->uap_null_avl.ua_next_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
101 pp->uap_null_avl.ua_prev_enc = UU_PTR_ENCODE(&pp->uap_null_avl);
103 (void) pthread_mutex_lock(&uu_apool_list_lock);
104 pp->uap_next = next = &uu_null_apool;
105 pp->uap_prev = prev = next->uap_prev;
108 (void) pthread_mutex_unlock(&uu_apool_list_lock);
114 uu_avl_pool_destroy(uu_avl_pool_t *pp)
117 if (pp->uap_null_avl.ua_next_enc !=
118 UU_PTR_ENCODE(&pp->uap_null_avl) ||
119 pp->uap_null_avl.ua_prev_enc !=
120 UU_PTR_ENCODE(&pp->uap_null_avl)) {
121 uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
122 "outstanding avls, or is corrupt.\n",
123 (int)sizeof (pp->uap_name), pp->uap_name,
127 (void) pthread_mutex_lock(&uu_apool_list_lock);
128 pp->uap_next->uap_prev = pp->uap_prev;
129 pp->uap_prev->uap_next = pp->uap_next;
130 (void) pthread_mutex_unlock(&uu_apool_list_lock);
131 (void) pthread_mutex_destroy(&pp->uap_lock);
138 uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
140 uintptr_t *na = (uintptr_t *)np;
143 uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
144 if (offset + sizeof (*np) > pp->uap_objsize) {
145 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
146 "offset %ld doesn't fit in object (size %ld)\n",
147 base, (void *)np, (void *)pp, pp->uap_name,
148 (long)offset, (long)pp->uap_objsize);
150 if (offset != pp->uap_nodeoffset) {
151 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
152 "offset %ld doesn't match pool's offset (%ld)\n",
153 base, (void *)np, (void *)pp, pp->uap_name,
154 (long)offset, (long)pp->uap_objsize);
158 na[0] = POOL_TO_MARKER(pp);
163 uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
165 uintptr_t *na = (uintptr_t *)np;
168 if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) {
169 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
170 "node already finied\n",
171 base, (void *)np, (void *)pp, pp->uap_name);
173 if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) {
174 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
175 "node corrupt, in tree, or in different pool\n",
176 base, (void *)np, (void *)pp, pp->uap_name);
185 struct uu_avl_node_compare_info {
186 uu_compare_fn_t *ac_compare;
193 uu_avl_node_compare(const void *l, const void *r)
195 struct uu_avl_node_compare_info *info =
196 (struct uu_avl_node_compare_info *)l;
198 int res = info->ac_compare(r, info->ac_right, info->ac_private);
201 if (info->ac_found == NULL)
202 info->ac_found = (void *)r;
211 uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags)
213 uu_avl_t *ap, *next, *prev;
215 if (flags & ~UU_AVL_DEBUG) {
216 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
220 ap = uu_zalloc(sizeof (*ap));
222 uu_set_error(UU_ERROR_NO_MEMORY);
227 ap->ua_parent_enc = UU_PTR_ENCODE(parent);
228 ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG);
229 ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index));
231 avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
234 ap->ua_null_walk.uaw_next = &ap->ua_null_walk;
235 ap->ua_null_walk.uaw_prev = &ap->ua_null_walk;
237 (void) pthread_mutex_lock(&pp->uap_lock);
238 next = &pp->uap_null_avl;
239 prev = UU_PTR_DECODE(next->ua_prev_enc);
240 ap->ua_next_enc = UU_PTR_ENCODE(next);
241 ap->ua_prev_enc = UU_PTR_ENCODE(prev);
242 next->ua_prev_enc = UU_PTR_ENCODE(ap);
243 prev->ua_next_enc = UU_PTR_ENCODE(ap);
244 (void) pthread_mutex_unlock(&pp->uap_lock);
250 uu_avl_destroy(uu_avl_t *ap)
252 uu_avl_pool_t *pp = ap->ua_pool;
255 if (avl_numnodes(&ap->ua_tree) != 0) {
256 uu_panic("uu_avl_destroy(%p): tree not empty\n",
259 if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk ||
260 ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) {
261 uu_panic("uu_avl_destroy(%p): outstanding walkers\n",
265 (void) pthread_mutex_lock(&pp->uap_lock);
266 UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc;
267 UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc;
268 (void) pthread_mutex_unlock(&pp->uap_lock);
269 ap->ua_prev_enc = UU_PTR_ENCODE(NULL);
270 ap->ua_next_enc = UU_PTR_ENCODE(NULL);
273 avl_destroy(&ap->ua_tree);
279 uu_avl_numnodes(uu_avl_t *ap)
281 return (avl_numnodes(&ap->ua_tree));
285 uu_avl_first(uu_avl_t *ap)
287 return (avl_first(&ap->ua_tree));
291 uu_avl_last(uu_avl_t *ap)
293 return (avl_last(&ap->ua_tree));
297 uu_avl_next(uu_avl_t *ap, void *node)
299 return (AVL_NEXT(&ap->ua_tree, node));
303 uu_avl_prev(uu_avl_t *ap, void *node)
305 return (AVL_PREV(&ap->ua_tree, node));
309 _avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags)
311 uu_avl_walk_t *next, *prev;
313 int robust = (flags & UU_WALK_ROBUST);
314 int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
316 (void) memset(wp, 0, sizeof (*wp));
318 wp->uaw_robust = robust;
319 wp->uaw_dir = direction;
322 wp->uaw_next_result = avl_first(&ap->ua_tree);
324 wp->uaw_next_result = avl_last(&ap->ua_tree);
326 if (ap->ua_debug || robust) {
327 wp->uaw_next = next = &ap->ua_null_walk;
328 wp->uaw_prev = prev = next->uaw_prev;
335 _avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap)
337 void *np = wp->uaw_next_result;
339 avl_tree_t *t = &ap->ua_tree;
344 wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) :
351 _avl_walk_fini(uu_avl_walk_t *wp)
353 if (wp->uaw_next != NULL) {
354 wp->uaw_next->uaw_prev = wp->uaw_prev;
355 wp->uaw_prev->uaw_next = wp->uaw_next;
360 wp->uaw_next_result = NULL;
364 uu_avl_walk_start(uu_avl_t *ap, uint32_t flags)
368 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
369 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
373 wp = uu_zalloc(sizeof (*wp));
375 uu_set_error(UU_ERROR_NO_MEMORY);
379 _avl_walk_init(wp, ap, flags);
384 uu_avl_walk_next(uu_avl_walk_t *wp)
386 return (_avl_walk_advance(wp, wp->uaw_avl));
390 uu_avl_walk_end(uu_avl_walk_t *wp)
397 uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags)
400 uu_avl_walk_t my_walk;
402 int status = UU_WALK_NEXT;
404 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
405 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
409 _avl_walk_init(&my_walk, ap, flags);
410 while (status == UU_WALK_NEXT &&
411 (e = _avl_walk_advance(&my_walk, ap)) != NULL)
412 status = (*func)(e, private);
413 _avl_walk_fini(&my_walk);
417 uu_set_error(UU_ERROR_CALLBACK_FAILED);
422 uu_avl_remove(uu_avl_t *ap, void *elem)
425 uu_avl_pool_t *pp = ap->ua_pool;
426 uintptr_t *na = NODE_ARRAY(pp, elem);
430 * invalidate outstanding uu_avl_index_ts.
432 ap->ua_index = INDEX_NEXT(ap->ua_index);
436 * Robust walkers most be advanced, if we are removing the node
437 * they are currently using. In debug mode, non-robust walkers
438 * are also on the walker list.
440 for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk;
442 if (wp->uaw_robust) {
443 if (elem == wp->uaw_next_result)
444 (void) _avl_walk_advance(wp, ap);
445 } else if (wp->uaw_next_result != NULL) {
446 uu_panic("uu_avl_remove(%p, %p): active non-robust "
447 "walker\n", (void *)ap, elem);
451 avl_remove(&ap->ua_tree, elem);
453 na[0] = POOL_TO_MARKER(pp);
458 uu_avl_teardown(uu_avl_t *ap, void **cookie)
460 void *elem = avl_destroy_nodes(&ap->ua_tree, cookie);
463 uu_avl_pool_t *pp = ap->ua_pool;
464 uintptr_t *na = NODE_ARRAY(pp, elem);
466 na[0] = POOL_TO_MARKER(pp);
473 uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out)
475 struct uu_avl_node_compare_info info;
478 info.ac_compare = ap->ua_pool->uap_cmp;
479 info.ac_private = private;
480 info.ac_right = elem;
481 info.ac_found = NULL;
483 result = avl_find(&ap->ua_tree, &info, out);
485 *out = INDEX_ENCODE(ap, *out);
487 if (ap->ua_debug && result != NULL)
488 uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
490 return (info.ac_found);
494 uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx)
497 uu_avl_pool_t *pp = ap->ua_pool;
498 uintptr_t *na = NODE_ARRAY(pp, elem);
501 uu_panic("uu_avl_insert(%p, %p, %p): node already "
502 "in tree, or corrupt\n",
503 (void *)ap, elem, (void *)idx);
505 uu_panic("uu_avl_insert(%p, %p, %p): node not "
507 (void *)ap, elem, (void *)idx);
508 if (na[0] != POOL_TO_MARKER(pp))
509 uu_panic("uu_avl_insert(%p, %p, %p): node from "
510 "other pool, or corrupt\n",
511 (void *)ap, elem, (void *)idx);
513 if (!INDEX_VALID(ap, idx))
514 uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
515 (void *)ap, elem, (void *)idx,
516 INDEX_CHECK(idx)? "outdated index" :
520 * invalidate outstanding uu_avl_index_ts.
522 ap->ua_index = INDEX_NEXT(ap->ua_index);
524 avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx));
528 uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx)
530 if (ap->ua_debug && !INDEX_VALID(ap, idx))
531 uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
532 (void *)ap, (void *)idx, INDEX_CHECK(idx)?
533 "outdated index" : "invalid index");
534 return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER));
538 uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx)
540 if (ap->ua_debug && !INDEX_VALID(ap, idx))
541 uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
542 (void *)ap, (void *)idx, INDEX_CHECK(idx)?
543 "outdated index" : "invalid index");
544 return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE));
548 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
555 (void) pthread_mutex_lock(&uu_apool_list_lock);
556 for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
558 (void) pthread_mutex_lock(&pp->uap_lock);
566 for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
568 (void) pthread_mutex_unlock(&pp->uap_lock);
569 (void) pthread_mutex_unlock(&uu_apool_list_lock);