2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2004 Poul-Henning Kamp
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Unit number allocation functions.
33 * These functions implement a mixed run-length/bitmap management of unit
34 * number spaces in a very memory efficient manner.
36 * Allocation policy is always lowest free number first.
38 * A return value of -1 signals that no more unit numbers are available.
40 * There is no cost associated with the range of unitnumbers, so unless
41 * the resource really is finite, specify INT_MAX to new_unrhdr() and
42 * forget about checking the return value.
44 * If a mutex is not provided when the unit number space is created, a
45 * default global mutex is used. The advantage to passing a mutex in, is
46 * that the alloc_unrl() function can be called with the mutex already
47 * held (it will not be released by alloc_unrl()).
49 * The allocation function alloc_unr{l}() never sleeps (but it may block on
50 * the mutex of course).
52 * Freeing a unit number may require allocating memory, and can therefore
53 * sleep so the free_unr() function does not come in a pre-locked variant.
55 * A userland test program is included.
57 * Memory usage is a very complex function of the exact allocation
58 * pattern, but always very compact:
59 * * For the very typical case where a single unbroken run of unit
60 * numbers are allocated 44 bytes are used on i386.
61 * * For a unit number space of 1000 units and the random pattern
62 * in the usermode test program included, the worst case usage
63 * was 252 bytes on i386 for 500 allocated and 500 free units.
64 * * For a unit number space of 10000 units and the random pattern
65 * in the usermode test program included, the worst case usage
66 * was 798 bytes on i386 for 5000 allocated and 5000 free units.
67 * * The worst case is where every other unit number is allocated and
68 * the rest are free. In that case 44 + N/4 bytes are used where
69 * N is the number of the highest unit allocated.
72 #include <sys/param.h>
73 #include <sys/types.h>
74 #include <sys/_unrhdr.h>
78 #include <sys/bitstring.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/systm.h>
82 #include <sys/limits.h>
84 #include <sys/mutex.h>
87 * In theory it would be smarter to allocate the individual blocks
88 * with the zone allocator, but at this time the expectation is that
89 * there will typically not even be enough allocations to fill a single
90 * page, so we stick with malloc for now.
92 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
94 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
95 #define Free(foo) free(foo, M_UNIT)
97 static struct mtx unitmtx;
99 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
103 alloc_unr64(struct unrhdr64 *unr64)
108 item = unr64->counter++;
109 mtx_unlock(&unitmtx);
114 #else /* ...USERLAND */
116 #include <bitstring.h>
125 #define KASSERT(cond, arg) \
134 #define Malloc(foo) _Malloc(foo, __LINE__)
136 _Malloc(size_t foo, int line)
139 KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
140 return (calloc(foo, 1));
142 #define Free(foo) free(foo)
146 #define UNR_NO_MTX ((void *)(uintptr_t)-1)
153 mtx_lock(struct mtx *mp)
155 KASSERT(mp->state == 0, ("mutex already locked"));
160 mtx_unlock(struct mtx *mp)
162 KASSERT(mp->state == 1, ("mutex not locked"));
169 mtx_assert(struct mtx *mp, int flag)
171 if (flag == MA_OWNED) {
172 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
176 #define CTASSERT(foo)
177 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0
179 #endif /* USERLAND */
182 * This is our basic building block.
184 * It can be used in three different ways depending on the value of the ptr
186 * If ptr is NULL, it represents a run of free items.
187 * If ptr points to the unrhdr it represents a run of allocated items.
188 * Otherwise it points to a bitstring of allocated items.
190 * For runs the len field is the length of the run.
191 * For bitmaps the len field represents the number of allocated items.
193 * The bitmap is the same size as struct unr to optimize memory management.
195 * Two special ranges are not covered by unrs:
196 * - at the start of the allocator space, all elements in [low, low + first)
198 * - at the end of the allocator space, all elements in [high - last, high]
202 TAILQ_ENTRY(unr) list;
208 bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)];
211 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
213 /* Number of bits we can store in the bitmap */
214 #define NBITS (NBBY * sizeof(((struct unrb *)NULL)->map))
217 is_bitmap(struct unrhdr *uh, struct unr *up)
219 return (up->ptr != uh && up->ptr != NULL);
222 /* Is the unrb empty in at least the first len bits? */
224 ub_empty(struct unrb *ub, int len) {
227 bit_ffs(ub->map, len, &first_set);
228 return (first_set == -1);
231 /* Is the unrb full? That is, is the number of set elements equal to len? */
233 ub_full(struct unrb *ub, int len)
237 bit_ffc(ub->map, len, &first_clear);
238 return (first_clear == -1);
242 * start: ipos = -1, upos = NULL;
243 * end: ipos = -1, upos = uh
253 create_iter_unr(struct unrhdr *uh)
255 struct unrhdr_iter *iter;
257 iter = Malloc(sizeof(*iter));
261 iter->upos_first_item = -1;
266 next_iter_unrl(struct unrhdr *uh, struct unrhdr_iter *iter)
273 if (iter->ipos == -1) {
274 if (iter->upos == uh)
277 if (uh->first == 0) {
278 up = TAILQ_FIRST(&uh->head);
287 iter->upos_first_item = uh->low;
295 /* Special case for the compacted [low, first) run. */
297 if (y + 1 < uh->low + uh->first) {
301 up = iter->upos = TAILQ_FIRST(&uh->head);
302 iter->upos_first_item = uh->low + uh->first;
306 if (y + 1 < iter->upos_first_item + up->len) {
310 } else if (is_bitmap(uh, up)) {
312 bit_ffs_at(&ub->map[0],
313 y + 1 - iter->upos_first_item,
316 iter->ipos = iter->upos_first_item + c;
321 iter->upos_first_item += up->len;
322 y = iter->upos_first_item - 1;
323 up = iter->upos = TAILQ_NEXT((struct unr *)iter->upos, list);
324 if (iter->upos == NULL) {
333 * returns -1 on end, otherwise the next element
336 next_iter_unr(void *handle)
339 struct unrhdr_iter *iter;
345 next_iter_unrl(uh, iter);
352 free_iter_unr(void *handle)
357 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
359 * Consistency check function.
361 * Checks the internal consistency as well as we can.
363 * Called at all boundaries of this API.
366 check_unrhdr(struct unrhdr *uh, int line)
375 TAILQ_FOREACH(up, &uh->head, list) {
377 if (is_bitmap(uh, up)) {
379 KASSERT (up->len <= NBITS,
380 ("UNR inconsistency: len %u max %zd (line %d)\n",
381 up->len, NBITS, line));
384 bit_count(ub->map, 0, up->len, &w);
386 } else if (up->ptr != NULL)
389 KASSERT (y == uh->busy,
390 ("UNR inconsistency: items %u found %u (line %d)\n",
392 KASSERT (z == uh->alloc,
393 ("UNR inconsistency: chunks %u found %u (line %d)\n",
394 uh->alloc, z, line));
400 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
408 * Userland memory management. Just use calloc and keep track of how
409 * many elements we have allocated for check_unrhdr().
412 static __inline void *
413 new_unr(struct unrhdr *uh, void **p1, void **p2)
418 KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
431 delete_unr(struct unrhdr *uh, void *ptr)
437 TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
441 clean_unrhdrl(struct unrhdr *uh)
446 mtx_assert(uh->mtx, MA_OWNED);
447 while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
448 TAILQ_REMOVE(&uh->ppfree, up, list);
459 clean_unrhdr(struct unrhdr *uh)
470 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
473 KASSERT(low >= 0 && low <= high,
474 ("UNR: use error: new_unrhdr(%d, %d)", low, high));
475 if (mutex == UNR_NO_MTX)
477 else if (mutex != NULL)
481 TAILQ_INIT(&uh->head);
482 TAILQ_INIT(&uh->ppfree);
486 uh->last = 1 + (high - low);
489 check_unrhdr(uh, __LINE__);
493 * Allocate a new unrheader set.
495 * Highest and lowest valid values given as parameters.
499 new_unrhdr(int low, int high, struct mtx *mutex)
503 uh = Malloc(sizeof *uh);
504 init_unrhdr(uh, low, high, mutex);
509 delete_unrhdr(struct unrhdr *uh)
512 check_unrhdr(uh, __LINE__);
513 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
514 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
515 KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
516 ("unrhdr has postponed item for free"));
521 clear_unrhdr(struct unrhdr *uh)
525 KASSERT(TAILQ_EMPTY(&uh->ppfree),
526 ("unrhdr has postponed item for free"));
527 TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
535 init_unrhdr(uh, uh->low, uh->high, uh->mtx);
537 check_unrhdr(uh, __LINE__);
541 * Look for sequence of items which can be combined into a bitmap, if
542 * multiple are present, take the one which saves most memory.
544 * Return (1) if a sequence was found to indicate that another call
545 * might be able to do more. Return (0) if we found no suitable sequence.
547 * NB: called from alloc_unr(), no new memory allocation allowed.
550 optimize_unr(struct unrhdr *uh)
552 struct unr *up, *uf, *us;
553 struct unrb *ub, *ubf;
557 * Look for the run of items (if any) which when collapsed into
558 * a bitmap would save most memory.
562 TAILQ_FOREACH(uf, &uh->head, list) {
563 if (uf->len >= NBITS)
566 if (is_bitmap(uh, uf))
571 up = TAILQ_NEXT(up, list);
574 if ((up->len + l) > NBITS)
577 if (is_bitmap(uh, up))
590 * If the first element is not a bitmap, make it one.
591 * Trying to do so without allocating more memory complicates things
594 if (!is_bitmap(uh, us)) {
595 uf = TAILQ_NEXT(us, list);
596 TAILQ_REMOVE(&uh->head, us, list);
598 l = us->ptr == uh ? 1 : 0;
600 bit_nclear(ub->map, 0, NBITS - 1);
602 bit_nset(ub->map, 0, a);
603 if (!is_bitmap(uh, uf)) {
605 bit_nclear(ub->map, a, a + uf->len - 1);
607 bit_nset(ub->map, a, a + uf->len - 1);
613 for (l = 0; l < uf->len; l++, a++) {
614 if (bit_test(ubf->map, l))
617 bit_clear(ub->map, a);
620 delete_unr(uh, uf->ptr);
627 uf = TAILQ_NEXT(us, list);
630 if (uf->len + us->len > NBITS)
632 if (uf->ptr == NULL) {
633 bit_nclear(ub->map, us->len, us->len + uf->len - 1);
635 TAILQ_REMOVE(&uh->head, uf, list);
637 } else if (uf->ptr == uh) {
638 bit_nset(ub->map, us->len, us->len + uf->len - 1);
640 TAILQ_REMOVE(&uh->head, uf, list);
644 for (l = 0; l < uf->len; l++, us->len++) {
645 if (bit_test(ubf->map, l))
646 bit_set(ub->map, us->len);
648 bit_clear(ub->map, us->len);
650 TAILQ_REMOVE(&uh->head, uf, list);
658 * See if a given unr should be collapsed with a neighbor.
660 * NB: called from alloc_unr(), no new memory allocation allowed.
663 collapse_unr(struct unrhdr *uh, struct unr *up)
668 /* If bitmap is all set or clear, change it to runlength */
669 if (is_bitmap(uh, up)) {
671 if (ub_full(ub, up->len)) {
672 delete_unr(uh, up->ptr);
674 } else if (ub_empty(ub, up->len)) {
675 delete_unr(uh, up->ptr);
680 /* If nothing left in runlength, delete it */
682 upp = TAILQ_PREV(up, unrhd, list);
684 upp = TAILQ_NEXT(up, list);
685 TAILQ_REMOVE(&uh->head, up, list);
690 /* If we have "hot-spot" still, merge with neighbor if possible */
692 upp = TAILQ_PREV(up, unrhd, list);
693 if (upp != NULL && up->ptr == upp->ptr) {
695 TAILQ_REMOVE(&uh->head, upp, list);
698 upp = TAILQ_NEXT(up, list);
699 if (upp != NULL && up->ptr == upp->ptr) {
701 TAILQ_REMOVE(&uh->head, upp, list);
706 /* Merge into ->first if possible */
707 upp = TAILQ_FIRST(&uh->head);
708 if (upp != NULL && upp->ptr == uh) {
709 uh->first += upp->len;
710 TAILQ_REMOVE(&uh->head, upp, list);
716 /* Merge into ->last if possible */
717 upp = TAILQ_LAST(&uh->head, unrhd);
718 if (upp != NULL && upp->ptr == NULL) {
719 uh->last += upp->len;
720 TAILQ_REMOVE(&uh->head, upp, list);
726 /* Try to make bitmaps */
727 while (optimize_unr(uh))
732 * Allocate a free unr.
735 alloc_unrl(struct unrhdr *uh)
743 mtx_assert(uh->mtx, MA_OWNED);
744 check_unrhdr(uh, __LINE__);
745 x = uh->low + uh->first;
747 up = TAILQ_FIRST(&uh->head);
750 * If we have an ideal split, just adjust the first+last
752 if (up == NULL && uh->last > 0) {
760 * We can always allocate from the first list element, so if we have
761 * nothing on the list, we must have run out of unit numbers.
766 KASSERT(up->ptr != uh, ("UNR first element is allocated"));
768 if (up->ptr == NULL) { /* free run */
771 } else { /* bitmap */
773 bit_ffc(ub->map, up->len, &y);
774 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
779 collapse_unr(uh, up);
784 alloc_unr(struct unrhdr *uh)
798 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
800 struct unr *up, *upn;
805 mtx_assert(uh->mtx, MA_OWNED);
807 if (item < uh->low + uh->first || item > uh->high)
810 up = TAILQ_FIRST(&uh->head);
812 if (up == NULL && item - uh->low == uh->first) {
816 check_unrhdr(uh, __LINE__);
820 i = item - uh->low - uh->first;
823 up = new_unr(uh, p1, p2);
826 TAILQ_INSERT_TAIL(&uh->head, up, list);
827 up = new_unr(uh, p1, p2);
830 TAILQ_INSERT_TAIL(&uh->head, up, list);
831 uh->last = uh->high - uh->low - i;
833 check_unrhdr(uh, __LINE__);
836 /* Find the item which contains the unit we want to allocate. */
837 TAILQ_FOREACH(up, &uh->head, list) {
846 up = new_unr(uh, p1, p2);
849 TAILQ_INSERT_TAIL(&uh->head, up, list);
851 up = new_unr(uh, p1, p2);
854 TAILQ_INSERT_TAIL(&uh->head, up, list);
858 if (is_bitmap(uh, up)) {
860 if (bit_test(ub->map, i) == 0) {
865 } else if (up->ptr == uh)
868 KASSERT(up->ptr == NULL,
869 ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
871 /* Split off the tail end, if any. */
872 tl = up->len - (1 + i);
874 upn = new_unr(uh, p1, p2);
877 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
880 /* Split off head end, if any */
882 upn = new_unr(uh, p1, p2);
885 TAILQ_INSERT_BEFORE(up, upn, list);
891 last = uh->high - uh->low - (item - uh->low);
895 collapse_unr(uh, up);
896 check_unrhdr(uh, __LINE__);
901 alloc_unr_specific(struct unrhdr *uh, u_int item)
906 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
908 p1 = Malloc(sizeof(struct unr));
909 p2 = Malloc(sizeof(struct unr));
913 i = alloc_unr_specificl(uh, item, &p1, &p2);
928 * If we can save unrs by using a bitmap, do so.
931 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
933 struct unr *up, *upp, *upn;
937 KASSERT(item >= uh->low && item <= uh->high,
938 ("UNR: free_unr(%u) out of range [%u...%u]",
939 item, uh->low, uh->high));
940 check_unrhdr(uh, __LINE__);
942 upp = TAILQ_FIRST(&uh->head);
944 * Freeing in the ideal split case
946 if (item + 1 == uh->first && upp == NULL) {
950 check_unrhdr(uh, __LINE__);
954 * Freeing in the ->first section. Create a run starting at the
955 * freed item. The code below will subdivide it.
957 if (item < uh->first) {
958 up = new_unr(uh, p1, p2);
960 up->len = uh->first - item;
961 TAILQ_INSERT_HEAD(&uh->head, up, list);
962 uh->first -= up->len;
967 /* Find the item which contains the unit we want to free */
968 TAILQ_FOREACH(up, &uh->head, list) {
974 /* Handle bitmap items */
975 if (is_bitmap(uh, up)) {
978 KASSERT(bit_test(ub->map, item) != 0,
979 ("UNR: Freeing free item %d (bitmap)\n", item));
980 bit_clear(ub->map, item);
982 collapse_unr(uh, up);
986 KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
988 /* Just this one left, reap it */
992 collapse_unr(uh, up);
996 /* Check if we can shift the item into the previous 'free' run */
997 upp = TAILQ_PREV(up, unrhd, list);
998 if (item == 0 && upp != NULL && upp->ptr == NULL) {
1002 collapse_unr(uh, up);
1006 /* Check if we can shift the item to the next 'free' run */
1007 upn = TAILQ_NEXT(up, list);
1008 if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
1012 collapse_unr(uh, up);
1016 /* Split off the tail end, if any. */
1017 pl = up->len - (1 + item);
1019 upp = new_unr(uh, p1, p2);
1022 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
1025 /* Split off head end, if any */
1027 upp = new_unr(uh, p1, p2);
1030 TAILQ_INSERT_BEFORE(up, upp, list);
1035 collapse_unr(uh, up);
1039 free_unr(struct unrhdr *uh, u_int item)
1043 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
1044 p1 = Malloc(sizeof(struct unr));
1045 p2 = Malloc(sizeof(struct unr));
1046 if (uh->mtx != NULL)
1048 free_unrl(uh, item, &p1, &p2);
1050 if (uh->mtx != NULL)
1051 mtx_unlock(uh->mtx);
1059 #include "opt_ddb.h"
1061 #include <ddb/ddb.h>
1065 #if (defined(_KERNEL) && defined(DDB)) || !defined(_KERNEL)
1067 #if !defined(_KERNEL)
1068 #define db_printf printf
1072 print_unr(struct unrhdr *uh, struct unr *up)
1077 db_printf(" %p len = %5u ", up, up->len);
1078 if (up->ptr == NULL)
1079 db_printf("free\n");
1080 else if (up->ptr == uh)
1081 db_printf("alloc\n");
1084 db_printf("bitmap [");
1085 for (x = 0; x < up->len; x++) {
1086 if (bit_test(ub->map, x))
1096 print_unrhdr(struct unrhdr *uh)
1102 "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
1103 uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
1104 x = uh->low + uh->first;
1105 TAILQ_FOREACH(up, &uh->head, list) {
1106 db_printf(" from = %5u", x);
1108 if (up->ptr == NULL || up->ptr == uh)
1117 #if defined(_KERNEL) && defined(DDB)
1118 DB_SHOW_COMMAND(unrhdr, unrhdr_print_unrhdr)
1121 db_printf("show unrhdr addr\n");
1125 print_unrhdr((struct unrhdr *)addr);
1129 print_unrhdr_iter(struct unrhdr_iter *iter)
1131 db_printf("iter %p unrhdr %p ipos %d upos %p ufi %d\n",
1132 iter, iter->uh, iter->ipos, iter->upos, iter->upos_first_item);
1135 DB_SHOW_COMMAND(unrhdr_iter, unrhdr_print_iter)
1138 db_printf("show unrhdr_iter addr\n");
1142 print_unrhdr_iter((struct unrhdr_iter *)addr);
1146 #ifndef _KERNEL /* USERLAND test driver */
1149 * Simple stochastic test driver for the above functions. The code resides
1150 * here so that it can access static functions and structures.
1153 static bool verbose;
1154 #define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);}
1157 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
1162 VPRINTF("F %u\n", i);
1170 VPRINTF("A %d\n", j);
1177 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
1181 j = alloc_unr_specific(uh, i);
1183 VPRINTF("F %u\n", i);
1188 VPRINTF("A %d\n", j);
1197 test_iter_compar(const void *a, const void *b)
1199 return (*(const int *)a - *(const int *)b);
1203 test_iter_fill(int *vals, struct unrhdr *uh, int i, int v, int *res)
1208 x = alloc_unr_specific(uh, v);
1210 VPRINTF("alloc_unr_specific failed %d %d\n", x, v);
1221 int i, j, v, x, res;
1224 uh = new_unrhdr(TBASE, INT_MAX, NULL);
1225 for (i = 0; i < XSIZE; i++) {
1226 vals[i] = i + TBASE;
1227 x = alloc_unr_specific(uh, i + TBASE);
1228 if (x != i + TBASE) {
1229 VPRINTF("alloc_unr_specific failed %d %d\n", x,
1234 for (; i < ISIZE; i++) {
1237 v = arc4random_uniform(INT_MAX);
1240 for (j = 0; j < i; j++) {
1241 if (v == vals[j] || v + 1 == vals[j])
1246 test_iter_fill(vals, uh, i, v, &res);
1249 test_iter_fill(vals, uh, i, v, &res);
1251 qsort(vals, ISIZE, sizeof(vals[0]), test_iter_compar);
1253 ihandle = create_iter_unr(uh);
1255 while ((v = next_iter_unr(ihandle)) != -1) {
1257 VPRINTF("iter %d: iter %d != val %d\n", i, v, vals[i]);
1264 VPRINTF("iter %d: val %d\n", i, v);
1268 free_iter_unr(ihandle);
1278 printf("%s [-h] [-i] [-r REPETITIONS] [-v]\n", argv[0]);
1282 main(int argc, char **argv)
1286 long count = 10000; /* Number of unrs to test */
1293 testing_iter = false;
1295 while ((ch = getopt(argc, argv, "hir:v")) != -1) {
1298 testing_iter = true;
1302 reps = strtol(optarg, NULL, 0);
1303 if (errno == ERANGE || errno == EINVAL) {
1319 setbuf(stdout, NULL);
1324 uh = new_unrhdr(0, count - 1, NULL);
1327 a = calloc(count, sizeof(char));
1329 err(1, "calloc failed");
1331 printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
1332 printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
1333 printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
1334 printf("NBITS %lu\n", (unsigned long)NBITS);
1335 for (m = 0; m < count * reps; m++) {
1336 i = arc4random_uniform(count);
1338 if (a[i] && (j & 1))
1341 if ((arc4random() & 1) != 0)
1342 test_alloc_unr(uh, i, a);
1344 test_alloc_unr_specific(uh, i, a);
1348 check_unrhdr(uh, __LINE__);
1350 for (i = 0; i < (u_int)count; i++) {
1353 printf("C %u\n", i);