2 * Copyright (c) 2004 Poul-Henning Kamp
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Unit number allocation functions.
31 * These functions implement a mixed run-length/bitmap management of unit
32 * number spaces in a very memory efficient manner.
34 * Allocation policy is always lowest free number first.
36 * A return value of -1 signals that no more unit numbers are available.
38 * There is no cost associated with the range of unitnumbers, so unless
39 * the resource really is finite, specify INT_MAX to new_unrhdr() and
40 * forget about checking the return value.
42 * If a mutex is not provided when the unit number space is created, a
43 * default global mutex is used. The advantage to passing a mutex in, is
44 * that the alloc_unrl() function can be called with the mutex already
45 * held (it will not be released by alloc_unrl()).
47 * The allocation function alloc_unr{l}() never sleeps (but it may block on
48 * the mutex of course).
50 * Freeing a unit number may require allocating memory, and can therefore
51 * sleep so the free_unr() function does not come in a pre-locked variant.
53 * A userland test program is included.
55 * Memory usage is a very complex function of the exact allocation
56 * pattern, but always very compact:
57 * * For the very typical case where a single unbroken run of unit
58 * numbers are allocated 44 bytes are used on i386.
59 * * For a unit number space of 1000 units and the random pattern
60 * in the usermode test program included, the worst case usage
61 * was 252 bytes on i386 for 500 allocated and 500 free units.
62 * * For a unit number space of 10000 units and the random pattern
63 * in the usermode test program included, the worst case usage
64 * was 798 bytes on i386 for 5000 allocated and 5000 free units.
65 * * The worst case is where every other unit number is allocated and
66 * the rest are free. In that case 44 + N/4 bytes are used where
67 * N is the number of the highest unit allocated.
70 #include <sys/param.h>
71 #include <sys/types.h>
72 #include <sys/_unrhdr.h>
76 #include <sys/bitstring.h>
77 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/systm.h>
80 #include <sys/limits.h>
82 #include <sys/mutex.h>
85 * In theory it would be smarter to allocate the individual blocks
86 * with the zone allocator, but at this time the expectation is that
87 * there will typically not even be enough allocations to fill a single
88 * page, so we stick with malloc for now.
90 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
92 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
93 #define Free(foo) free(foo, M_UNIT)
95 static struct mtx unitmtx;
97 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
99 #else /* ...USERLAND */
101 #include <bitstring.h>
110 #define KASSERT(cond, arg) \
119 #define Malloc(foo) _Malloc(foo, __LINE__)
121 _Malloc(size_t foo, int line)
124 KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
125 return (calloc(foo, 1));
127 #define Free(foo) free(foo)
137 mtx_lock(struct mtx *mp)
139 KASSERT(mp->state == 0, ("mutex already locked"));
144 mtx_unlock(struct mtx *mp)
146 KASSERT(mp->state == 1, ("mutex not locked"));
153 mtx_assert(struct mtx *mp, int flag)
155 if (flag == MA_OWNED) {
156 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
160 #define CTASSERT(foo)
161 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0
163 #endif /* USERLAND */
166 * This is our basic building block.
168 * It can be used in three different ways depending on the value of the ptr
170 * If ptr is NULL, it represents a run of free items.
171 * If ptr points to the unrhdr it represents a run of allocated items.
172 * Otherwise it points to a bitstring of allocated items.
174 * For runs the len field is the length of the run.
175 * For bitmaps the len field represents the number of allocated items.
177 * The bitmap is the same size as struct unr to optimize memory management.
180 TAILQ_ENTRY(unr) list;
186 bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)];
189 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
191 /* Number of bits we can store in the bitmap */
192 #define NBITS (8 * sizeof(((struct unrb*)NULL)->map))
194 /* Is the unrb empty in at least the first len bits? */
196 ub_empty(struct unrb *ub, int len) {
199 bit_ffs(ub->map, len, &first_set);
200 return (first_set == -1);
203 /* Is the unrb full? That is, is the number of set elements equal to len? */
205 ub_full(struct unrb *ub, int len)
209 bit_ffc(ub->map, len, &first_clear);
210 return (first_clear == -1);
214 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
216 * Consistency check function.
218 * Checks the internal consistency as well as we can.
220 * Called at all boundaries of this API.
223 check_unrhdr(struct unrhdr *uh, int line)
231 TAILQ_FOREACH(up, &uh->head, list) {
233 if (up->ptr != uh && up->ptr != NULL) {
235 KASSERT (up->len <= NBITS,
236 ("UNR inconsistency: len %u max %zd (line %d)\n",
237 up->len, NBITS, line));
240 for (x = 0; x < up->len; x++)
241 if (bit_test(ub->map, x))
244 } else if (up->ptr != NULL)
247 KASSERT (y == uh->busy,
248 ("UNR inconsistency: items %u found %u (line %d)\n",
250 KASSERT (z == uh->alloc,
251 ("UNR inconsistency: chunks %u found %u (line %d)\n",
252 uh->alloc, z, line));
258 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
267 * Userland memory management. Just use calloc and keep track of how
268 * many elements we have allocated for check_unrhdr().
271 static __inline void *
272 new_unr(struct unrhdr *uh, void **p1, void **p2)
277 KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
290 delete_unr(struct unrhdr *uh, void *ptr)
296 TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
300 clean_unrhdrl(struct unrhdr *uh)
304 mtx_assert(uh->mtx, MA_OWNED);
305 while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
306 TAILQ_REMOVE(&uh->ppfree, up, list);
315 clean_unrhdr(struct unrhdr *uh)
324 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
327 KASSERT(low >= 0 && low <= high,
328 ("UNR: use error: new_unrhdr(%d, %d)", low, high));
333 TAILQ_INIT(&uh->head);
334 TAILQ_INIT(&uh->ppfree);
338 uh->last = 1 + (high - low);
339 check_unrhdr(uh, __LINE__);
343 * Allocate a new unrheader set.
345 * Highest and lowest valid values given as parameters.
349 new_unrhdr(int low, int high, struct mtx *mutex)
353 uh = Malloc(sizeof *uh);
354 init_unrhdr(uh, low, high, mutex);
359 delete_unrhdr(struct unrhdr *uh)
362 check_unrhdr(uh, __LINE__);
363 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
364 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
365 KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
366 ("unrhdr has postponed item for free"));
371 is_bitmap(struct unrhdr *uh, struct unr *up)
373 return (up->ptr != uh && up->ptr != NULL);
377 * Look for sequence of items which can be combined into a bitmap, if
378 * multiple are present, take the one which saves most memory.
380 * Return (1) if a sequence was found to indicate that another call
381 * might be able to do more. Return (0) if we found no suitable sequence.
383 * NB: called from alloc_unr(), no new memory allocation allowed.
386 optimize_unr(struct unrhdr *uh)
388 struct unr *up, *uf, *us;
389 struct unrb *ub, *ubf;
393 * Look for the run of items (if any) which when collapsed into
394 * a bitmap would save most memory.
398 TAILQ_FOREACH(uf, &uh->head, list) {
399 if (uf->len >= NBITS)
402 if (is_bitmap(uh, uf))
407 up = TAILQ_NEXT(up, list);
410 if ((up->len + l) > NBITS)
413 if (is_bitmap(uh, up))
426 * If the first element is not a bitmap, make it one.
427 * Trying to do so without allocating more memory complicates things
430 if (!is_bitmap(uh, us)) {
431 uf = TAILQ_NEXT(us, list);
432 TAILQ_REMOVE(&uh->head, us, list);
434 l = us->ptr == uh ? 1 : 0;
436 bit_nclear(ub->map, 0, NBITS - 1);
438 bit_nset(ub->map, 0, a);
439 if (!is_bitmap(uh, uf)) {
441 bit_nclear(ub->map, a, a + uf->len - 1);
443 bit_nset(ub->map, a, a + uf->len - 1);
449 for (l = 0; l < uf->len; l++, a++) {
450 if (bit_test(ubf->map, l))
453 bit_clear(ub->map, a);
456 delete_unr(uh, uf->ptr);
463 uf = TAILQ_NEXT(us, list);
466 if (uf->len + us->len > NBITS)
468 if (uf->ptr == NULL) {
469 bit_nclear(ub->map, us->len, us->len + uf->len - 1);
471 TAILQ_REMOVE(&uh->head, uf, list);
473 } else if (uf->ptr == uh) {
474 bit_nset(ub->map, us->len, us->len + uf->len - 1);
476 TAILQ_REMOVE(&uh->head, uf, list);
480 for (l = 0; l < uf->len; l++, us->len++) {
481 if (bit_test(ubf->map, l))
482 bit_set(ub->map, us->len);
484 bit_clear(ub->map, us->len);
486 TAILQ_REMOVE(&uh->head, uf, list);
494 * See if a given unr should be collapsed with a neighbor.
496 * NB: called from alloc_unr(), no new memory allocation allowed.
499 collapse_unr(struct unrhdr *uh, struct unr *up)
504 /* If bitmap is all set or clear, change it to runlength */
505 if (is_bitmap(uh, up)) {
507 if (ub_full(ub, up->len)) {
508 delete_unr(uh, up->ptr);
510 } else if (ub_empty(ub, up->len)) {
511 delete_unr(uh, up->ptr);
516 /* If nothing left in runlength, delete it */
518 upp = TAILQ_PREV(up, unrhd, list);
520 upp = TAILQ_NEXT(up, list);
521 TAILQ_REMOVE(&uh->head, up, list);
526 /* If we have "hot-spot" still, merge with neighbor if possible */
528 upp = TAILQ_PREV(up, unrhd, list);
529 if (upp != NULL && up->ptr == upp->ptr) {
531 TAILQ_REMOVE(&uh->head, upp, list);
534 upp = TAILQ_NEXT(up, list);
535 if (upp != NULL && up->ptr == upp->ptr) {
537 TAILQ_REMOVE(&uh->head, upp, list);
542 /* Merge into ->first if possible */
543 upp = TAILQ_FIRST(&uh->head);
544 if (upp != NULL && upp->ptr == uh) {
545 uh->first += upp->len;
546 TAILQ_REMOVE(&uh->head, upp, list);
552 /* Merge into ->last if possible */
553 upp = TAILQ_LAST(&uh->head, unrhd);
554 if (upp != NULL && upp->ptr == NULL) {
555 uh->last += upp->len;
556 TAILQ_REMOVE(&uh->head, upp, list);
562 /* Try to make bitmaps */
563 while (optimize_unr(uh))
568 * Allocate a free unr.
571 alloc_unrl(struct unrhdr *uh)
578 mtx_assert(uh->mtx, MA_OWNED);
579 check_unrhdr(uh, __LINE__);
580 x = uh->low + uh->first;
582 up = TAILQ_FIRST(&uh->head);
585 * If we have an ideal split, just adjust the first+last
587 if (up == NULL && uh->last > 0) {
595 * We can always allocate from the first list element, so if we have
596 * nothing on the list, we must have run out of unit numbers.
601 KASSERT(up->ptr != uh, ("UNR first element is allocated"));
603 if (up->ptr == NULL) { /* free run */
606 } else { /* bitmap */
608 bit_ffc(ub->map, up->len, &y);
609 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
614 collapse_unr(uh, up);
619 alloc_unr(struct unrhdr *uh)
631 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
633 struct unr *up, *upn;
637 mtx_assert(uh->mtx, MA_OWNED);
639 if (item < uh->low + uh->first || item > uh->high)
642 up = TAILQ_FIRST(&uh->head);
644 if (up == NULL && item - uh->low == uh->first) {
648 check_unrhdr(uh, __LINE__);
652 i = item - uh->low - uh->first;
655 up = new_unr(uh, p1, p2);
658 TAILQ_INSERT_TAIL(&uh->head, up, list);
659 up = new_unr(uh, p1, p2);
662 TAILQ_INSERT_TAIL(&uh->head, up, list);
663 uh->last = uh->high - uh->low - i;
665 check_unrhdr(uh, __LINE__);
668 /* Find the item which contains the unit we want to allocate. */
669 TAILQ_FOREACH(up, &uh->head, list) {
678 up = new_unr(uh, p1, p2);
681 TAILQ_INSERT_TAIL(&uh->head, up, list);
683 up = new_unr(uh, p1, p2);
686 TAILQ_INSERT_TAIL(&uh->head, up, list);
690 if (is_bitmap(uh, up)) {
692 if (bit_test(ub->map, i) == 0) {
697 } else if (up->ptr == uh)
700 KASSERT(up->ptr == NULL,
701 ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
703 /* Split off the tail end, if any. */
704 tl = up->len - (1 + i);
706 upn = new_unr(uh, p1, p2);
709 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
712 /* Split off head end, if any */
714 upn = new_unr(uh, p1, p2);
717 TAILQ_INSERT_BEFORE(up, upn, list);
723 last = uh->high - uh->low - (item - uh->low);
727 collapse_unr(uh, up);
728 check_unrhdr(uh, __LINE__);
733 alloc_unr_specific(struct unrhdr *uh, u_int item)
738 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
740 p1 = Malloc(sizeof(struct unr));
741 p2 = Malloc(sizeof(struct unr));
744 i = alloc_unr_specificl(uh, item, &p1, &p2);
758 * If we can save unrs by using a bitmap, do so.
761 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
763 struct unr *up, *upp, *upn;
767 KASSERT(item >= uh->low && item <= uh->high,
768 ("UNR: free_unr(%u) out of range [%u...%u]",
769 item, uh->low, uh->high));
770 check_unrhdr(uh, __LINE__);
772 upp = TAILQ_FIRST(&uh->head);
774 * Freeing in the ideal split case
776 if (item + 1 == uh->first && upp == NULL) {
780 check_unrhdr(uh, __LINE__);
784 * Freeing in the ->first section. Create a run starting at the
785 * freed item. The code below will subdivide it.
787 if (item < uh->first) {
788 up = new_unr(uh, p1, p2);
790 up->len = uh->first - item;
791 TAILQ_INSERT_HEAD(&uh->head, up, list);
792 uh->first -= up->len;
797 /* Find the item which contains the unit we want to free */
798 TAILQ_FOREACH(up, &uh->head, list) {
804 /* Handle bitmap items */
805 if (is_bitmap(uh, up)) {
808 KASSERT(bit_test(ub->map, item) != 0,
809 ("UNR: Freeing free item %d (bitmap)\n", item));
810 bit_clear(ub->map, item);
812 collapse_unr(uh, up);
816 KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
818 /* Just this one left, reap it */
822 collapse_unr(uh, up);
826 /* Check if we can shift the item into the previous 'free' run */
827 upp = TAILQ_PREV(up, unrhd, list);
828 if (item == 0 && upp != NULL && upp->ptr == NULL) {
832 collapse_unr(uh, up);
836 /* Check if we can shift the item to the next 'free' run */
837 upn = TAILQ_NEXT(up, list);
838 if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
842 collapse_unr(uh, up);
846 /* Split off the tail end, if any. */
847 pl = up->len - (1 + item);
849 upp = new_unr(uh, p1, p2);
852 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
855 /* Split off head end, if any */
857 upp = new_unr(uh, p1, p2);
860 TAILQ_INSERT_BEFORE(up, upp, list);
865 collapse_unr(uh, up);
869 free_unr(struct unrhdr *uh, u_int item)
873 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
874 p1 = Malloc(sizeof(struct unr));
875 p2 = Malloc(sizeof(struct unr));
877 free_unrl(uh, item, &p1, &p2);
886 #ifndef _KERNEL /* USERLAND test driver */
889 * Simple stochastic test driver for the above functions. The code resides
890 * here so that it can access static functions and structures.
894 #define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);}
897 print_unr(struct unrhdr *uh, struct unr *up)
902 printf(" %p len = %5u ", up, up->len);
905 else if (up->ptr == uh)
910 for (x = 0; x < up->len; x++) {
911 if (bit_test(ub->map, x))
921 print_unrhdr(struct unrhdr *uh)
927 "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
928 uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
929 x = uh->low + uh->first;
930 TAILQ_FOREACH(up, &uh->head, list) {
931 printf(" from = %5u", x);
933 if (up->ptr == NULL || up->ptr == uh)
941 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
946 VPRINTF("F %u\n", i);
954 VPRINTF("A %d\n", j);
961 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
965 j = alloc_unr_specific(uh, i);
967 VPRINTF("F %u\n", i);
972 VPRINTF("A %d\n", j);
979 printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]);
983 main(int argc, char **argv)
987 long count = 10000; /* Number of unrs to test */
994 while ((ch = getopt(argc, argv, "hr:v")) != -1) {
998 reps = strtol(optarg, NULL, 0);
999 if (errno == ERANGE || errno == EINVAL) {
1017 setbuf(stdout, NULL);
1018 uh = new_unrhdr(0, count - 1, NULL);
1021 a = calloc(count, sizeof(char));
1023 err(1, "calloc failed");
1026 printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
1027 printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
1028 printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
1029 printf("NBITS %lu\n", (unsigned long)NBITS);
1031 for (m = 0; m < count * reps; m++) {
1033 i = (j >> 1) % count;
1035 if (a[i] && (j & 1))
1038 if ((random() & 1) != 0)
1039 test_alloc_unr(uh, i, a);
1041 test_alloc_unr_specific(uh, i, a);
1045 check_unrhdr(uh, __LINE__);
1047 for (i = 0; i < count; i++) {
1050 printf("C %u\n", i);