2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2004 Poul-Henning Kamp
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Unit number allocation functions.
31 * These functions implement a mixed run-length/bitmap management of unit
32 * number spaces in a very memory efficient manner.
34 * Allocation policy is always lowest free number first.
36 * A return value of -1 signals that no more unit numbers are available.
38 * There is no cost associated with the range of unitnumbers, so unless
39 * the resource really is finite, specify INT_MAX to new_unrhdr() and
40 * forget about checking the return value.
42 * If a mutex is not provided when the unit number space is created, a
43 * default global mutex is used. The advantage to passing a mutex in, is
44 * that the alloc_unrl() function can be called with the mutex already
45 * held (it will not be released by alloc_unrl()).
47 * The allocation function alloc_unr{l}() never sleeps (but it may block on
48 * the mutex of course).
50 * Freeing a unit number may require allocating memory, and can therefore
51 * sleep so the free_unr() function does not come in a pre-locked variant.
53 * A userland test program is included.
55 * Memory usage is a very complex function of the exact allocation
56 * pattern, but always very compact:
57 * * For the very typical case where a single unbroken run of unit
58 * numbers are allocated 44 bytes are used on i386.
59 * * For a unit number space of 1000 units and the random pattern
60 * in the usermode test program included, the worst case usage
61 * was 252 bytes on i386 for 500 allocated and 500 free units.
62 * * For a unit number space of 10000 units and the random pattern
63 * in the usermode test program included, the worst case usage
64 * was 798 bytes on i386 for 5000 allocated and 5000 free units.
65 * * The worst case is where every other unit number is allocated and
66 * the rest are free. In that case 44 + N/4 bytes are used where
67 * N is the number of the highest unit allocated.
70 #include <sys/param.h>
71 #include <sys/types.h>
72 #include <sys/_unrhdr.h>
76 #include <sys/bitstring.h>
77 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/systm.h>
80 #include <sys/limits.h>
82 #include <sys/mutex.h>
85 * In theory it would be smarter to allocate the individual blocks
86 * with the zone allocator, but at this time the expectation is that
87 * there will typically not even be enough allocations to fill a single
88 * page, so we stick with malloc for now.
90 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
92 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
93 #define Free(foo) free(foo, M_UNIT)
95 static struct mtx unitmtx;
97 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
99 #else /* ...USERLAND */
101 #include <bitstring.h>
110 #define KASSERT(cond, arg) \
119 #define Malloc(foo) _Malloc(foo, __LINE__)
121 _Malloc(size_t foo, int line)
124 KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
125 return (calloc(foo, 1));
127 #define Free(foo) free(foo)
131 #define UNR_NO_MTX ((void *)(uintptr_t)-1)
138 mtx_lock(struct mtx *mp)
140 KASSERT(mp->state == 0, ("mutex already locked"));
145 mtx_unlock(struct mtx *mp)
147 KASSERT(mp->state == 1, ("mutex not locked"));
154 mtx_assert(struct mtx *mp, int flag)
156 if (flag == MA_OWNED) {
157 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
161 #define CTASSERT(foo)
162 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0
164 #endif /* USERLAND */
167 * This is our basic building block.
169 * It can be used in three different ways depending on the value of the ptr
171 * If ptr is NULL, it represents a run of free items.
172 * If ptr points to the unrhdr it represents a run of allocated items.
173 * Otherwise it points to a bitstring of allocated items.
175 * For runs the len field is the length of the run.
176 * For bitmaps the len field represents the number of allocated items.
178 * The bitmap is the same size as struct unr to optimize memory management.
180 * Two special ranges are not covered by unrs:
181 * - at the start of the allocator space, all elements in [low, low + first)
183 * - at the end of the allocator space, all elements in [high - last, high]
187 TAILQ_ENTRY(unr) list;
193 bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)];
196 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
198 /* Number of bits we can store in the bitmap */
199 #define NBITS (NBBY * sizeof(((struct unrb *)NULL)->map))
202 is_bitmap(struct unrhdr *uh, struct unr *up)
204 return (up->ptr != uh && up->ptr != NULL);
207 /* Is the unrb empty in at least the first len bits? */
209 ub_empty(struct unrb *ub, int len) {
212 bit_ffs(ub->map, len, &first_set);
213 return (first_set == -1);
216 /* Is the unrb full? That is, is the number of set elements equal to len? */
218 ub_full(struct unrb *ub, int len)
222 bit_ffc(ub->map, len, &first_clear);
223 return (first_clear == -1);
227 * start: ipos = -1, upos = NULL;
228 * end: ipos = -1, upos = uh
238 create_iter_unr(struct unrhdr *uh)
240 struct unrhdr_iter *iter;
242 iter = Malloc(sizeof(*iter));
246 iter->upos_first_item = -1;
251 next_iter_unrl(struct unrhdr *uh, struct unrhdr_iter *iter)
258 if (iter->ipos == -1) {
259 if (iter->upos == uh)
262 if (uh->first == 0) {
263 up = TAILQ_FIRST(&uh->head);
272 iter->upos_first_item = uh->low;
280 /* Special case for the compacted [low, first) run. */
282 if (y + 1 < uh->low + uh->first) {
286 up = iter->upos = TAILQ_FIRST(&uh->head);
287 iter->upos_first_item = uh->low + uh->first;
291 if (y + 1 < iter->upos_first_item + up->len) {
295 } else if (is_bitmap(uh, up)) {
297 bit_ffs_at(&ub->map[0],
298 y + 1 - iter->upos_first_item,
301 iter->ipos = iter->upos_first_item + c;
306 iter->upos_first_item += up->len;
307 y = iter->upos_first_item - 1;
308 up = iter->upos = TAILQ_NEXT((struct unr *)iter->upos, list);
309 if (iter->upos == NULL) {
318 * returns -1 on end, otherwise the next element
321 next_iter_unr(void *handle)
324 struct unrhdr_iter *iter;
330 next_iter_unrl(uh, iter);
337 free_iter_unr(void *handle)
342 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
348 * Consistency check function.
350 * Checks the internal consistency as well as we can.
352 * Called at all boundaries of this API.
355 check_unrhdr(struct unrhdr *uh, int line)
360 u_int y __diagused, z __diagused;
364 TAILQ_FOREACH(up, &uh->head, list) {
366 if (is_bitmap(uh, up)) {
368 KASSERT (up->len <= NBITS,
369 ("UNR inconsistency: len %u max %zd (line %d)\n",
370 up->len, NBITS, line));
373 bit_count(ub->map, 0, up->len, &w);
375 } else if (up->ptr != NULL)
378 KASSERT (y == uh->busy,
379 ("UNR inconsistency: items %u found %u (line %d)\n",
381 KASSERT (z == uh->alloc,
382 ("UNR inconsistency: chunks %u found %u (line %d)\n",
383 uh->alloc, z, line));
389 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
397 * Userland memory management. Just use calloc and keep track of how
398 * many elements we have allocated for check_unrhdr().
401 static __inline void *
402 new_unr(struct unrhdr *uh, void **p1, void **p2)
407 KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
420 delete_unr(struct unrhdr *uh, void *ptr)
426 TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
430 clean_unrhdrl(struct unrhdr *uh)
435 mtx_assert(uh->mtx, MA_OWNED);
436 while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
437 TAILQ_REMOVE(&uh->ppfree, up, list);
448 clean_unrhdr(struct unrhdr *uh)
459 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
462 KASSERT(low >= 0 && low <= high,
463 ("UNR: use error: new_unrhdr(%d, %d)", low, high));
464 if (mutex == UNR_NO_MTX)
466 else if (mutex != NULL)
470 TAILQ_INIT(&uh->head);
471 TAILQ_INIT(&uh->ppfree);
475 uh->last = 1 + (high - low);
478 check_unrhdr(uh, __LINE__);
482 * Allocate a new unrheader set.
484 * Highest and lowest valid values given as parameters.
488 new_unrhdr(int low, int high, struct mtx *mutex)
492 uh = Malloc(sizeof *uh);
493 init_unrhdr(uh, low, high, mutex);
498 delete_unrhdr(struct unrhdr *uh)
501 check_unrhdr(uh, __LINE__);
502 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
503 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
504 KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
505 ("unrhdr has postponed item for free"));
510 clear_unrhdr(struct unrhdr *uh)
514 KASSERT(TAILQ_EMPTY(&uh->ppfree),
515 ("unrhdr has postponed item for free"));
516 TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
524 init_unrhdr(uh, uh->low, uh->high, uh->mtx);
526 check_unrhdr(uh, __LINE__);
530 * Look for sequence of items which can be combined into a bitmap, if
531 * multiple are present, take the one which saves most memory.
533 * Return (1) if a sequence was found to indicate that another call
534 * might be able to do more. Return (0) if we found no suitable sequence.
536 * NB: called from alloc_unr(), no new memory allocation allowed.
539 optimize_unr(struct unrhdr *uh)
541 struct unr *up, *uf, *us;
542 struct unrb *ub, *ubf;
546 * Look for the run of items (if any) which when collapsed into
547 * a bitmap would save most memory.
551 TAILQ_FOREACH(uf, &uh->head, list) {
552 if (uf->len >= NBITS)
555 if (is_bitmap(uh, uf))
560 up = TAILQ_NEXT(up, list);
563 if ((up->len + l) > NBITS)
566 if (is_bitmap(uh, up))
579 * If the first element is not a bitmap, make it one.
580 * Trying to do so without allocating more memory complicates things
583 if (!is_bitmap(uh, us)) {
584 uf = TAILQ_NEXT(us, list);
585 TAILQ_REMOVE(&uh->head, us, list);
587 l = us->ptr == uh ? 1 : 0;
589 bit_nclear(ub->map, 0, NBITS - 1);
591 bit_nset(ub->map, 0, a);
592 if (!is_bitmap(uh, uf)) {
594 bit_nclear(ub->map, a, a + uf->len - 1);
596 bit_nset(ub->map, a, a + uf->len - 1);
602 for (l = 0; l < uf->len; l++, a++) {
603 if (bit_test(ubf->map, l))
606 bit_clear(ub->map, a);
609 delete_unr(uh, uf->ptr);
616 uf = TAILQ_NEXT(us, list);
619 if (uf->len + us->len > NBITS)
621 if (uf->ptr == NULL) {
622 bit_nclear(ub->map, us->len, us->len + uf->len - 1);
624 TAILQ_REMOVE(&uh->head, uf, list);
626 } else if (uf->ptr == uh) {
627 bit_nset(ub->map, us->len, us->len + uf->len - 1);
629 TAILQ_REMOVE(&uh->head, uf, list);
633 for (l = 0; l < uf->len; l++, us->len++) {
634 if (bit_test(ubf->map, l))
635 bit_set(ub->map, us->len);
637 bit_clear(ub->map, us->len);
639 TAILQ_REMOVE(&uh->head, uf, list);
647 * See if a given unr should be collapsed with a neighbor.
649 * NB: called from alloc_unr(), no new memory allocation allowed.
652 collapse_unr(struct unrhdr *uh, struct unr *up)
657 /* If bitmap is all set or clear, change it to runlength */
658 if (is_bitmap(uh, up)) {
660 if (ub_full(ub, up->len)) {
661 delete_unr(uh, up->ptr);
663 } else if (ub_empty(ub, up->len)) {
664 delete_unr(uh, up->ptr);
669 /* If nothing left in runlength, delete it */
671 upp = TAILQ_PREV(up, unrhd, list);
673 upp = TAILQ_NEXT(up, list);
674 TAILQ_REMOVE(&uh->head, up, list);
679 /* If we have "hot-spot" still, merge with neighbor if possible */
681 upp = TAILQ_PREV(up, unrhd, list);
682 if (upp != NULL && up->ptr == upp->ptr) {
684 TAILQ_REMOVE(&uh->head, upp, list);
687 upp = TAILQ_NEXT(up, list);
688 if (upp != NULL && up->ptr == upp->ptr) {
690 TAILQ_REMOVE(&uh->head, upp, list);
695 /* Merge into ->first if possible */
696 upp = TAILQ_FIRST(&uh->head);
697 if (upp != NULL && upp->ptr == uh) {
698 uh->first += upp->len;
699 TAILQ_REMOVE(&uh->head, upp, list);
705 /* Merge into ->last if possible */
706 upp = TAILQ_LAST(&uh->head, unrhd);
707 if (upp != NULL && upp->ptr == NULL) {
708 uh->last += upp->len;
709 TAILQ_REMOVE(&uh->head, upp, list);
715 /* Try to make bitmaps */
716 while (optimize_unr(uh))
721 * Allocate a free unr.
724 alloc_unrl(struct unrhdr *uh)
732 mtx_assert(uh->mtx, MA_OWNED);
733 check_unrhdr(uh, __LINE__);
734 x = uh->low + uh->first;
736 up = TAILQ_FIRST(&uh->head);
739 * If we have an ideal split, just adjust the first+last
741 if (up == NULL && uh->last > 0) {
749 * We can always allocate from the first list element, so if we have
750 * nothing on the list, we must have run out of unit numbers.
755 KASSERT(up->ptr != uh, ("UNR first element is allocated"));
757 if (up->ptr == NULL) { /* free run */
760 } else { /* bitmap */
762 bit_ffc(ub->map, up->len, &y);
763 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
768 collapse_unr(uh, up);
773 alloc_unr(struct unrhdr *uh)
787 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
789 struct unr *up, *upn;
794 mtx_assert(uh->mtx, MA_OWNED);
796 if (item < uh->low + uh->first || item > uh->high)
799 up = TAILQ_FIRST(&uh->head);
801 if (up == NULL && item - uh->low == uh->first) {
805 check_unrhdr(uh, __LINE__);
809 i = item - uh->low - uh->first;
812 up = new_unr(uh, p1, p2);
815 TAILQ_INSERT_TAIL(&uh->head, up, list);
816 up = new_unr(uh, p1, p2);
819 TAILQ_INSERT_TAIL(&uh->head, up, list);
820 uh->last = uh->high - uh->low - i;
822 check_unrhdr(uh, __LINE__);
825 /* Find the item which contains the unit we want to allocate. */
826 TAILQ_FOREACH(up, &uh->head, list) {
835 up = new_unr(uh, p1, p2);
838 TAILQ_INSERT_TAIL(&uh->head, up, list);
840 up = new_unr(uh, p1, p2);
843 TAILQ_INSERT_TAIL(&uh->head, up, list);
847 if (is_bitmap(uh, up)) {
849 if (bit_test(ub->map, i) == 0) {
854 } else if (up->ptr == uh)
857 KASSERT(up->ptr == NULL,
858 ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
860 /* Split off the tail end, if any. */
861 tl = up->len - (1 + i);
863 upn = new_unr(uh, p1, p2);
866 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
869 /* Split off head end, if any */
871 upn = new_unr(uh, p1, p2);
874 TAILQ_INSERT_BEFORE(up, upn, list);
880 last = uh->high - uh->low - (item - uh->low);
884 collapse_unr(uh, up);
885 check_unrhdr(uh, __LINE__);
890 alloc_unr_specific(struct unrhdr *uh, u_int item)
895 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
897 p1 = Malloc(sizeof(struct unr));
898 p2 = Malloc(sizeof(struct unr));
902 i = alloc_unr_specificl(uh, item, &p1, &p2);
917 * If we can save unrs by using a bitmap, do so.
920 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
922 struct unr *up, *upp, *upn;
926 KASSERT(item >= uh->low && item <= uh->high,
927 ("UNR: free_unr(%u) out of range [%u...%u]",
928 item, uh->low, uh->high));
929 check_unrhdr(uh, __LINE__);
931 upp = TAILQ_FIRST(&uh->head);
933 * Freeing in the ideal split case
935 if (item + 1 == uh->first && upp == NULL) {
939 check_unrhdr(uh, __LINE__);
943 * Freeing in the ->first section. Create a run starting at the
944 * freed item. The code below will subdivide it.
946 if (item < uh->first) {
947 up = new_unr(uh, p1, p2);
949 up->len = uh->first - item;
950 TAILQ_INSERT_HEAD(&uh->head, up, list);
951 uh->first -= up->len;
956 /* Find the item which contains the unit we want to free */
957 TAILQ_FOREACH(up, &uh->head, list) {
963 /* Handle bitmap items */
964 if (is_bitmap(uh, up)) {
967 KASSERT(bit_test(ub->map, item) != 0,
968 ("UNR: Freeing free item %d (bitmap)\n", item));
969 bit_clear(ub->map, item);
971 collapse_unr(uh, up);
975 KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
977 /* Just this one left, reap it */
981 collapse_unr(uh, up);
985 /* Check if we can shift the item into the previous 'free' run */
986 upp = TAILQ_PREV(up, unrhd, list);
987 if (item == 0 && upp != NULL && upp->ptr == NULL) {
991 collapse_unr(uh, up);
995 /* Check if we can shift the item to the next 'free' run */
996 upn = TAILQ_NEXT(up, list);
997 if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
1001 collapse_unr(uh, up);
1005 /* Split off the tail end, if any. */
1006 pl = up->len - (1 + item);
1008 upp = new_unr(uh, p1, p2);
1011 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
1014 /* Split off head end, if any */
1016 upp = new_unr(uh, p1, p2);
1019 TAILQ_INSERT_BEFORE(up, upp, list);
1024 collapse_unr(uh, up);
1028 free_unr(struct unrhdr *uh, u_int item)
1032 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
1033 p1 = Malloc(sizeof(struct unr));
1034 p2 = Malloc(sizeof(struct unr));
1035 if (uh->mtx != NULL)
1037 free_unrl(uh, item, &p1, &p2);
1039 if (uh->mtx != NULL)
1040 mtx_unlock(uh->mtx);
1048 #include "opt_ddb.h"
1050 #include <ddb/ddb.h>
1054 #if (defined(_KERNEL) && defined(DDB)) || !defined(_KERNEL)
1056 #if !defined(_KERNEL)
1057 #define db_printf printf
1061 print_unr(struct unrhdr *uh, struct unr *up)
1066 db_printf(" %p len = %5u ", up, up->len);
1067 if (up->ptr == NULL)
1068 db_printf("free\n");
1069 else if (up->ptr == uh)
1070 db_printf("alloc\n");
1073 db_printf("bitmap [");
1074 for (x = 0; x < up->len; x++) {
1075 if (bit_test(ub->map, x))
1085 print_unrhdr(struct unrhdr *uh)
1091 "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
1092 uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
1093 x = uh->low + uh->first;
1094 TAILQ_FOREACH(up, &uh->head, list) {
1095 db_printf(" from = %5u", x);
1097 if (up->ptr == NULL || up->ptr == uh)
1106 #if defined(_KERNEL) && defined(DDB)
1107 DB_SHOW_COMMAND(unrhdr, unrhdr_print_unrhdr)
1110 db_printf("show unrhdr addr\n");
1114 print_unrhdr((struct unrhdr *)addr);
1118 print_unrhdr_iter(struct unrhdr_iter *iter)
1120 db_printf("iter %p unrhdr %p ipos %d upos %p ufi %d\n",
1121 iter, iter->uh, iter->ipos, iter->upos, iter->upos_first_item);
1124 DB_SHOW_COMMAND(unrhdr_iter, unrhdr_print_iter)
1127 db_printf("show unrhdr_iter addr\n");
1131 print_unrhdr_iter((struct unrhdr_iter *)addr);
1135 #ifndef _KERNEL /* USERLAND test driver */
1138 * Simple stochastic test driver for the above functions. The code resides
1139 * here so that it can access static functions and structures.
1142 static bool verbose;
1143 #define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);}
1146 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
1151 VPRINTF("F %u\n", i);
1159 VPRINTF("A %d\n", j);
1166 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
1170 j = alloc_unr_specific(uh, i);
1172 VPRINTF("F %u\n", i);
1177 VPRINTF("A %d\n", j);
1186 test_iter_compar(const void *a, const void *b)
1188 return (*(const int *)a - *(const int *)b);
1192 test_iter_fill(int *vals, struct unrhdr *uh, int i, int v, int *res)
1197 x = alloc_unr_specific(uh, v);
1199 VPRINTF("alloc_unr_specific failed %d %d\n", x, v);
1210 int i, j, v, x, res;
1213 uh = new_unrhdr(TBASE, INT_MAX, NULL);
1214 for (i = 0; i < XSIZE; i++) {
1215 vals[i] = i + TBASE;
1216 x = alloc_unr_specific(uh, i + TBASE);
1217 if (x != i + TBASE) {
1218 VPRINTF("alloc_unr_specific failed %d %d\n", x,
1223 for (; i < ISIZE; i++) {
1226 v = arc4random_uniform(INT_MAX);
1229 for (j = 0; j < i; j++) {
1230 if (v == vals[j] || v + 1 == vals[j])
1235 test_iter_fill(vals, uh, i, v, &res);
1238 test_iter_fill(vals, uh, i, v, &res);
1240 qsort(vals, ISIZE, sizeof(vals[0]), test_iter_compar);
1242 ihandle = create_iter_unr(uh);
1244 while ((v = next_iter_unr(ihandle)) != -1) {
1246 VPRINTF("iter %d: iter %d != val %d\n", i, v, vals[i]);
1253 VPRINTF("iter %d: val %d\n", i, v);
1257 free_iter_unr(ihandle);
1267 printf("%s [-h] [-i] [-r REPETITIONS] [-v]\n", argv[0]);
1271 main(int argc, char **argv)
1275 long count = 10000; /* Number of unrs to test */
1282 testing_iter = false;
1284 while ((ch = getopt(argc, argv, "hir:v")) != -1) {
1287 testing_iter = true;
1291 reps = strtol(optarg, NULL, 0);
1292 if (errno == ERANGE || errno == EINVAL) {
1308 setbuf(stdout, NULL);
1313 uh = new_unrhdr(0, count - 1, NULL);
1316 a = calloc(count, sizeof(char));
1318 err(1, "calloc failed");
1320 printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
1321 printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
1322 printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
1323 printf("NBITS %lu\n", (unsigned long)NBITS);
1324 for (m = 0; m < count * reps; m++) {
1325 i = arc4random_uniform(count);
1327 if (a[i] && (j & 1))
1330 if ((arc4random() & 1) != 0)
1331 test_alloc_unr(uh, i, a);
1333 test_alloc_unr_specific(uh, i, a);
1337 check_unrhdr(uh, __LINE__);
1339 for (i = 0; i < (u_int)count; i++) {
1342 printf("C %u\n", i);