2 * Copyright 1998 Massachusetts Institute of Technology
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/limits.h>
68 #include <sys/malloc.h>
69 #include <sys/mutex.h>
70 #include <sys/bus.h> /* XXX debugging */
71 #include <machine/bus.h>
73 #include <sys/sysctl.h>
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space). That is also why the indices are defined to have type
83 * `unsigned long' -- that being the largest integral type in ISO C (1990).
84 * The 1999 version of C allows `long long'; we may need to switch to that
85 * at some point in the future, particularly if we want to support 36-bit
86 * addresses on IA32 hardware.
90 TAILQ_ENTRY(resource_i) r_link;
91 LIST_ENTRY(resource_i) r_sharelink;
92 LIST_HEAD(, resource_i) *r_sharehead;
93 u_long r_start; /* index of the first entry in this resource */
94 u_long r_end; /* index of the last entry (inclusive) */
96 void *r_virtual; /* virtual address of this resource */
97 struct device *r_dev; /* device which has allocated this resource */
98 struct rman *r_rm; /* resource manager from whence this came */
99 int r_rid; /* optional rid for this resource. */
102 static int rman_debug = 0;
103 TUNABLE_INT("debug.rman_debug", &rman_debug);
104 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105 &rman_debug, 0, "rman debug");
107 #define DPRINTF(params) if (rman_debug) printf params
109 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
111 struct rman_head rman_head;
112 static struct mtx rman_mtx; /* mutex to protect rman_head */
113 static int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114 struct resource_i **whohas);
115 static int int_rman_deactivate_resource(struct resource_i *r);
116 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
118 static __inline struct resource_i *
119 int_alloc_resource(int malloc_flag)
121 struct resource_i *r;
123 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
131 rman_init(struct rman *rm)
137 TAILQ_INIT(&rman_head);
138 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
141 if (rm->rm_start == 0 && rm->rm_end == 0)
143 if (rm->rm_type == RMAN_UNINIT)
145 if (rm->rm_type == RMAN_GAUGE)
146 panic("implement RMAN_GAUGE");
148 TAILQ_INIT(&rm->rm_list);
149 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
150 if (rm->rm_mtx == NULL)
152 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
155 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
156 mtx_unlock(&rman_mtx);
161 rman_manage_region(struct rman *rm, u_long start, u_long end)
163 struct resource_i *r, *s, *t;
165 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
166 rm->rm_descr, start, end));
167 if (start < rm->rm_start || end > rm->rm_end)
169 r = int_alloc_resource(M_NOWAIT);
176 mtx_lock(rm->rm_mtx);
178 /* Skip entries before us. */
179 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
180 if (s->r_end == ULONG_MAX)
182 if (s->r_end + 1 >= r->r_start)
186 /* If we ran off the end of the list, insert at the tail. */
188 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
190 /* Check for any overlap with the current region. */
191 if (r->r_start <= s->r_end && r->r_end >= s->r_start)
194 /* Check for any overlap with the next region. */
195 t = TAILQ_NEXT(s, r_link);
196 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
200 * See if this region can be merged with the next region. If
201 * not, clear the pointer.
203 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
206 /* See if we can merge with the current region. */
207 if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
208 /* Can we merge all 3 regions? */
211 TAILQ_REMOVE(&rm->rm_list, t, r_link);
218 } else if (t != NULL) {
219 /* Can we merge with just the next region? */
220 t->r_start = r->r_start;
222 } else if (s->r_end < r->r_start) {
223 TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
225 TAILQ_INSERT_BEFORE(s, r, r_link);
229 mtx_unlock(rm->rm_mtx);
234 rman_init_from_resource(struct rman *rm, struct resource *r)
238 if ((rv = rman_init(rm)) != 0)
240 return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
244 rman_fini(struct rman *rm)
246 struct resource_i *r;
248 mtx_lock(rm->rm_mtx);
249 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
250 if (r->r_flags & RF_ALLOCATED) {
251 mtx_unlock(rm->rm_mtx);
257 * There really should only be one of these if we are in this
258 * state and the code is working properly, but it can't hurt.
260 while (!TAILQ_EMPTY(&rm->rm_list)) {
261 r = TAILQ_FIRST(&rm->rm_list);
262 TAILQ_REMOVE(&rm->rm_list, r, r_link);
265 mtx_unlock(rm->rm_mtx);
267 TAILQ_REMOVE(&rman_head, rm, rm_link);
268 mtx_unlock(&rman_mtx);
269 mtx_destroy(rm->rm_mtx);
270 free(rm->rm_mtx, M_RMAN);
276 rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
278 struct resource_i *r;
280 mtx_lock(rm->rm_mtx);
281 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
282 if (!(r->r_flags & RF_ALLOCATED)) {
285 mtx_unlock(rm->rm_mtx);
289 mtx_unlock(rm->rm_mtx);
294 rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
296 struct resource_i *r;
298 mtx_lock(rm->rm_mtx);
299 TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
300 if (!(r->r_flags & RF_ALLOCATED)) {
303 mtx_unlock(rm->rm_mtx);
307 mtx_unlock(rm->rm_mtx);
311 /* Shrink or extend one or both ends of an allocated resource. */
313 rman_adjust_resource(struct resource *rr, u_long start, u_long end)
315 struct resource_i *r, *s, *t, *new;
318 /* Not supported for shared resources. */
320 if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
324 * This does not support wholesale moving of a resource. At
325 * least part of the desired new range must overlap with the
328 if (end < r->r_start || r->r_end < start)
332 * Find the two resource regions immediately adjacent to the
333 * allocated resource.
336 mtx_lock(rm->rm_mtx);
338 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
343 panic("resource not in list");
345 s = TAILQ_PREV(r, resource_head, r_link);
346 t = TAILQ_NEXT(r, r_link);
347 KASSERT(s == NULL || s->r_end + 1 == r->r_start,
348 ("prev resource mismatch"));
349 KASSERT(t == NULL || r->r_end + 1 == t->r_start,
350 ("next resource mismatch"));
353 * See if the changes are permitted. Shrinking is always allowed,
354 * but growing requires sufficient room in the adjacent region.
356 if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
357 s->r_start > start)) {
358 mtx_unlock(rm->rm_mtx);
361 if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
363 mtx_unlock(rm->rm_mtx);
368 * While holding the lock, grow either end of the resource as
369 * needed and shrink either end if the shrinking does not require
370 * allocating a new resource. We can safely drop the lock and then
371 * insert a new range to handle the shrinking case afterwards.
373 if (start < r->r_start ||
374 (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
375 KASSERT(s->r_flags == 0, ("prev is busy"));
377 if (s->r_start == start) {
378 TAILQ_REMOVE(&rm->rm_list, s, r_link);
381 s->r_end = start - 1;
383 if (end > r->r_end ||
384 (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
385 KASSERT(t->r_flags == 0, ("next is busy"));
387 if (t->r_end == end) {
388 TAILQ_REMOVE(&rm->rm_list, t, r_link);
391 t->r_start = end + 1;
393 mtx_unlock(rm->rm_mtx);
396 * Handle the shrinking cases that require allocating a new
397 * resource to hold the newly-free region. We have to recheck
398 * if we still need this new region after acquiring the lock.
400 if (start > r->r_start) {
401 new = int_alloc_resource(M_WAITOK);
402 new->r_start = r->r_start;
403 new->r_end = start - 1;
405 mtx_lock(rm->rm_mtx);
407 s = TAILQ_PREV(r, resource_head, r_link);
408 if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
409 s->r_end = start - 1;
412 TAILQ_INSERT_BEFORE(r, new, r_link);
413 mtx_unlock(rm->rm_mtx);
415 if (end < r->r_end) {
416 new = int_alloc_resource(M_WAITOK);
417 new->r_start = end + 1;
418 new->r_end = r->r_end;
420 mtx_lock(rm->rm_mtx);
422 t = TAILQ_NEXT(r, r_link);
423 if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
424 t->r_start = end + 1;
427 TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
428 mtx_unlock(rm->rm_mtx);
434 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
435 u_long count, u_long bound, u_int flags,
439 struct resource_i *r, *s, *rv;
440 u_long rstart, rend, amask, bmask;
444 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
445 "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
447 dev == NULL ? "<null>" : device_get_nameunit(dev)));
448 want_activate = (flags & RF_ACTIVE);
451 mtx_lock(rm->rm_mtx);
453 for (r = TAILQ_FIRST(&rm->rm_list);
454 r && r->r_end < start;
455 r = TAILQ_NEXT(r, r_link))
459 DPRINTF(("could not find a region\n"));
463 amask = (1ul << RF_ALIGNMENT(flags)) - 1;
464 /* If bound is 0, bmask will also be 0 */
465 bmask = ~(bound - 1);
467 * First try to find an acceptable totally-unshared region.
469 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
470 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
471 if (s->r_start + count - 1 > end) {
472 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
476 if (s->r_flags & RF_ALLOCATED) {
477 DPRINTF(("region is allocated\n"));
480 rstart = ulmax(s->r_start, start);
482 * Try to find a region by adjusting to boundary and alignment
483 * until both conditions are satisfied. This is not an optimal
484 * algorithm, but in most cases it isn't really bad, either.
487 rstart = (rstart + amask) & ~amask;
488 if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
489 rstart += bound - (rstart & ~bmask);
490 } while ((rstart & amask) != 0 && rstart < end &&
492 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
494 DPRINTF(("adjusted start exceeds end\n"));
497 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
498 rstart, rend, (rend - rstart + 1), count));
500 if ((rend - rstart + 1) >= count) {
501 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
502 rstart, rend, (rend - rstart + 1)));
503 if ((s->r_end - s->r_start + 1) == count) {
504 DPRINTF(("candidate region is entire chunk\n"));
506 rv->r_flags |= RF_ALLOCATED | flags;
512 * If s->r_start < rstart and
513 * s->r_end > rstart + count - 1, then
514 * we need to split the region into three pieces
515 * (the middle one will get returned to the user).
516 * Otherwise, we are allocating at either the
517 * beginning or the end of s, so we only need to
518 * split it in two. The first case requires
519 * two new allocations; the second requires but one.
521 rv = int_alloc_resource(M_NOWAIT);
524 rv->r_start = rstart;
525 rv->r_end = rstart + count - 1;
526 rv->r_flags = flags | RF_ALLOCATED;
530 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
531 DPRINTF(("splitting region in three parts: "
532 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
533 s->r_start, rv->r_start - 1,
534 rv->r_start, rv->r_end,
535 rv->r_end + 1, s->r_end));
537 * We are allocating in the middle.
539 r = int_alloc_resource(M_NOWAIT);
545 r->r_start = rv->r_end + 1;
547 r->r_flags = s->r_flags;
549 s->r_end = rv->r_start - 1;
550 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
552 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
554 } else if (s->r_start == rv->r_start) {
555 DPRINTF(("allocating from the beginning\n"));
557 * We are allocating at the beginning.
559 s->r_start = rv->r_end + 1;
560 TAILQ_INSERT_BEFORE(s, rv, r_link);
562 DPRINTF(("allocating at the end\n"));
564 * We are allocating at the end.
566 s->r_end = rv->r_start - 1;
567 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
575 * Now find an acceptable shared region, if the client's requirements
576 * allow sharing. By our implementation restriction, a candidate
577 * region must match exactly by both size and sharing type in order
578 * to be considered compatible with the client's request. (The
579 * former restriction could probably be lifted without too much
580 * additional work, but this does not seem warranted.)
582 DPRINTF(("no unshared regions found\n"));
583 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
586 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
587 if (s->r_start > end)
589 if ((s->r_flags & flags) != flags)
591 rstart = ulmax(s->r_start, start);
592 rend = ulmin(s->r_end, ulmax(start + count - 1, end));
593 if (s->r_start >= start && s->r_end <= end
594 && (s->r_end - s->r_start + 1) == count &&
595 (s->r_start & amask) == 0 &&
596 ((s->r_start ^ s->r_end) & bmask) == 0) {
597 rv = int_alloc_resource(M_NOWAIT);
600 rv->r_start = s->r_start;
601 rv->r_end = s->r_end;
602 rv->r_flags = s->r_flags &
603 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
606 if (s->r_sharehead == NULL) {
607 s->r_sharehead = malloc(sizeof *s->r_sharehead,
608 M_RMAN, M_NOWAIT | M_ZERO);
609 if (s->r_sharehead == NULL) {
614 LIST_INIT(s->r_sharehead);
615 LIST_INSERT_HEAD(s->r_sharehead, s,
617 s->r_flags |= RF_FIRSTSHARE;
619 rv->r_sharehead = s->r_sharehead;
620 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
626 * We couldn't find anything.
630 * If the user specified RF_ACTIVE in the initial flags,
631 * which is reflected in `want_activate', we attempt to atomically
632 * activate the resource. If this fails, we release the resource
633 * and indicate overall failure. (This behavior probably doesn't
634 * make sense for RF_TIMESHARE-type resources.)
636 if (rv && want_activate) {
637 struct resource_i *whohas;
638 if (int_rman_activate_resource(rm, rv, &whohas)) {
639 int_rman_release_resource(rm, rv);
644 mtx_unlock(rm->rm_mtx);
645 return (rv == NULL ? NULL : &rv->r_r);
649 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
650 u_int flags, struct device *dev)
653 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
658 int_rman_activate_resource(struct rman *rm, struct resource_i *r,
659 struct resource_i **whohas)
661 struct resource_i *s;
665 * If we are not timesharing, then there is nothing much to do.
666 * If we already have the resource, then there is nothing at all to do.
667 * If we are not on a sharing list with anybody else, then there is
670 if ((r->r_flags & RF_TIMESHARE) == 0
671 || (r->r_flags & RF_ACTIVE) != 0
672 || r->r_sharehead == NULL) {
673 r->r_flags |= RF_ACTIVE;
678 for (s = LIST_FIRST(r->r_sharehead); s && ok;
679 s = LIST_NEXT(s, r_sharelink)) {
680 if ((s->r_flags & RF_ACTIVE) != 0) {
686 r->r_flags |= RF_ACTIVE;
693 rman_activate_resource(struct resource *re)
696 struct resource_i *r, *whohas;
701 mtx_lock(rm->rm_mtx);
702 rv = int_rman_activate_resource(rm, r, &whohas);
703 mtx_unlock(rm->rm_mtx);
708 rman_await_resource(struct resource *re, int pri, int timo)
711 struct resource_i *r, *whohas;
716 mtx_lock(rm->rm_mtx);
718 rv = int_rman_activate_resource(rm, r, &whohas);
720 return (rv); /* returns with mutex held */
722 if (r->r_sharehead == NULL)
723 panic("rman_await_resource");
724 whohas->r_flags |= RF_WANTED;
725 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
727 mtx_unlock(rm->rm_mtx);
734 int_rman_deactivate_resource(struct resource_i *r)
737 r->r_flags &= ~RF_ACTIVE;
738 if (r->r_flags & RF_WANTED) {
739 r->r_flags &= ~RF_WANTED;
740 wakeup(r->r_sharehead);
746 rman_deactivate_resource(struct resource *r)
751 mtx_lock(rm->rm_mtx);
752 int_rman_deactivate_resource(r->__r_i);
753 mtx_unlock(rm->rm_mtx);
758 int_rman_release_resource(struct rman *rm, struct resource_i *r)
760 struct resource_i *s, *t;
762 if (r->r_flags & RF_ACTIVE)
763 int_rman_deactivate_resource(r);
766 * Check for a sharing list first. If there is one, then we don't
767 * have to think as hard.
769 if (r->r_sharehead) {
771 * If a sharing list exists, then we know there are at
774 * If we are in the main circleq, appoint someone else.
776 LIST_REMOVE(r, r_sharelink);
777 s = LIST_FIRST(r->r_sharehead);
778 if (r->r_flags & RF_FIRSTSHARE) {
779 s->r_flags |= RF_FIRSTSHARE;
780 TAILQ_INSERT_BEFORE(r, s, r_link);
781 TAILQ_REMOVE(&rm->rm_list, r, r_link);
785 * Make sure that the sharing list goes away completely
786 * if the resource is no longer being shared at all.
788 if (LIST_NEXT(s, r_sharelink) == NULL) {
789 free(s->r_sharehead, M_RMAN);
790 s->r_sharehead = NULL;
791 s->r_flags &= ~RF_FIRSTSHARE;
797 * Look at the adjacent resources in the list and see if our
798 * segment can be merged with any of them. If either of the
799 * resources is allocated or is not exactly adjacent then they
800 * cannot be merged with our segment.
802 s = TAILQ_PREV(r, resource_head, r_link);
803 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
804 s->r_end + 1 != r->r_start))
806 t = TAILQ_NEXT(r, r_link);
807 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
808 r->r_end + 1 != t->r_start))
811 if (s != NULL && t != NULL) {
813 * Merge all three segments.
816 TAILQ_REMOVE(&rm->rm_list, r, r_link);
817 TAILQ_REMOVE(&rm->rm_list, t, r_link);
819 } else if (s != NULL) {
821 * Merge previous segment with ours.
824 TAILQ_REMOVE(&rm->rm_list, r, r_link);
825 } else if (t != NULL) {
827 * Merge next segment with ours.
829 t->r_start = r->r_start;
830 TAILQ_REMOVE(&rm->rm_list, r, r_link);
833 * At this point, we know there is nothing we
834 * can potentially merge with, because on each
835 * side, there is either nothing there or what is
836 * there is still allocated. In that case, we don't
837 * want to remove r from the list; we simply want to
838 * change it to an unallocated region and return
839 * without freeing anything.
841 r->r_flags &= ~RF_ALLOCATED;
852 rman_release_resource(struct resource *re)
855 struct resource_i *r;
860 mtx_lock(rm->rm_mtx);
861 rv = int_rman_release_resource(rm, r);
862 mtx_unlock(rm->rm_mtx);
867 rman_make_alignment_flags(uint32_t size)
872 * Find the hightest bit set, and add one if more than one bit
873 * set. We're effectively computing the ceil(log2(size)) here.
875 for (i = 31; i > 0; i--)
878 if (~(1 << i) & size)
881 return(RF_ALIGNMENT_LOG2(i));
885 rman_set_start(struct resource *r, u_long start)
887 r->__r_i->r_start = start;
891 rman_get_start(struct resource *r)
893 return (r->__r_i->r_start);
897 rman_set_end(struct resource *r, u_long end)
899 r->__r_i->r_end = end;
903 rman_get_end(struct resource *r)
905 return (r->__r_i->r_end);
909 rman_get_size(struct resource *r)
911 return (r->__r_i->r_end - r->__r_i->r_start + 1);
915 rman_get_flags(struct resource *r)
917 return (r->__r_i->r_flags);
921 rman_set_virtual(struct resource *r, void *v)
923 r->__r_i->r_virtual = v;
927 rman_get_virtual(struct resource *r)
929 return (r->__r_i->r_virtual);
933 rman_set_bustag(struct resource *r, bus_space_tag_t t)
939 rman_get_bustag(struct resource *r)
941 return (r->r_bustag);
945 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
951 rman_get_bushandle(struct resource *r)
953 return (r->r_bushandle);
957 rman_set_rid(struct resource *r, int rid)
959 r->__r_i->r_rid = rid;
963 rman_get_rid(struct resource *r)
965 return (r->__r_i->r_rid);
969 rman_set_device(struct resource *r, struct device *dev)
971 r->__r_i->r_dev = dev;
975 rman_get_device(struct resource *r)
977 return (r->__r_i->r_dev);
981 rman_is_region_manager(struct resource *r, struct rman *rm)
984 return (r->__r_i->r_rm == rm);
988 * Sysctl interface for scanning the resource lists.
990 * We take two input parameters; the index into the list of resource
991 * managers, and the resource offset into the list.
994 sysctl_rman(SYSCTL_HANDLER_ARGS)
996 int *name = (int *)arg1;
997 u_int namelen = arg2;
998 int rman_idx, res_idx;
1000 struct resource_i *res;
1001 struct resource_i *sres;
1003 struct u_resource ures;
1009 if (bus_data_generation_check(name[0]))
1015 * Find the indexed resource manager
1017 mtx_lock(&rman_mtx);
1018 TAILQ_FOREACH(rm, &rman_head, rm_link) {
1019 if (rman_idx-- == 0)
1022 mtx_unlock(&rman_mtx);
1027 * If the resource index is -1, we want details on the
1030 if (res_idx == -1) {
1031 bzero(&urm, sizeof(urm));
1032 urm.rm_handle = (uintptr_t)rm;
1033 if (rm->rm_descr != NULL)
1034 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1035 urm.rm_start = rm->rm_start;
1036 urm.rm_size = rm->rm_end - rm->rm_start + 1;
1037 urm.rm_type = rm->rm_type;
1039 error = SYSCTL_OUT(req, &urm, sizeof(urm));
1044 * Find the indexed resource and return it.
1046 mtx_lock(rm->rm_mtx);
1047 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1048 if (res->r_sharehead != NULL) {
1049 LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1050 if (res_idx-- == 0) {
1055 else if (res_idx-- == 0)
1058 mtx_unlock(rm->rm_mtx);
1062 bzero(&ures, sizeof(ures));
1063 ures.r_handle = (uintptr_t)res;
1064 ures.r_parent = (uintptr_t)res->r_rm;
1065 ures.r_device = (uintptr_t)res->r_dev;
1066 if (res->r_dev != NULL) {
1067 if (device_get_name(res->r_dev) != NULL) {
1068 snprintf(ures.r_devname, RM_TEXTLEN,
1070 device_get_name(res->r_dev),
1071 device_get_unit(res->r_dev));
1073 strlcpy(ures.r_devname, "nomatch",
1077 ures.r_devname[0] = '\0';
1079 ures.r_start = res->r_start;
1080 ures.r_size = res->r_end - res->r_start + 1;
1081 ures.r_flags = res->r_flags;
1083 mtx_unlock(rm->rm_mtx);
1084 error = SYSCTL_OUT(req, &ures, sizeof(ures));
1088 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
1089 "kernel resource manager");
1093 dump_rman(struct rman *rm)
1095 struct resource_i *r;
1096 const char *devname;
1100 db_printf("rman: %s\n", rm->rm_descr);
1101 db_printf(" 0x%lx-0x%lx (full range)\n", rm->rm_start, rm->rm_end);
1102 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1103 if (r->r_dev != NULL) {
1104 devname = device_get_nameunit(r->r_dev);
1105 if (devname == NULL)
1106 devname = "nomatch";
1109 db_printf(" 0x%lx-0x%lx ", r->r_start, r->r_end);
1110 if (devname != NULL)
1111 db_printf("(%s)\n", devname);
1113 db_printf("----\n");
1119 DB_SHOW_COMMAND(rman, db_show_rman)
1123 dump_rman((struct rman *)addr);
1126 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1130 TAILQ_FOREACH(rm, &rman_head, rm_link)
1133 DB_SHOW_ALIAS(allrman, db_show_all_rman);