2 * Copyright 1998 Massachusetts Institute of Technology
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/bus.h> /* XXX debugging */
68 #include <machine/bus.h>
70 #include <sys/sysctl.h>
77 * We use a linked list rather than a bitmap because we need to be able to
78 * represent potentially huge objects (like all of a processor's physical
83 TAILQ_ENTRY(resource_i) r_link;
84 LIST_ENTRY(resource_i) r_sharelink;
85 LIST_HEAD(, resource_i) *r_sharehead;
86 rman_res_t r_start; /* index of the first entry in this resource */
87 rman_res_t r_end; /* index of the last entry (inclusive) */
89 void *r_virtual; /* virtual address of this resource */
90 void *r_irq_cookie; /* interrupt cookie for this (interrupt) resource */
91 device_t r_dev; /* device which has allocated this resource */
92 struct rman *r_rm; /* resource manager from whence this came */
93 int r_rid; /* optional rid for this resource. */
94 int r_type; /* optional type for this resource. */
97 static int rman_debug = 0;
98 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
99 &rman_debug, 0, "rman debug");
101 #define DPRINTF(params) if (rman_debug) printf params
103 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
105 struct rman_head rman_head;
106 static struct mtx rman_mtx; /* mutex to protect rman_head */
107 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
109 static __inline struct resource_i *
110 int_alloc_resource(int malloc_flag)
112 struct resource_i *r;
114 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
122 rman_init(struct rman *rm)
128 TAILQ_INIT(&rman_head);
129 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
132 if (rm->rm_start == 0 && rm->rm_end == 0)
134 if (rm->rm_type == RMAN_UNINIT)
136 if (rm->rm_type == RMAN_GAUGE)
137 panic("implement RMAN_GAUGE");
139 TAILQ_INIT(&rm->rm_list);
140 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
141 if (rm->rm_mtx == NULL)
143 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
146 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
147 mtx_unlock(&rman_mtx);
152 rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
154 struct resource_i *r, *s, *t;
157 DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
158 rm->rm_descr, start, end));
159 if (start < rm->rm_start || end > rm->rm_end)
161 r = int_alloc_resource(M_NOWAIT);
168 mtx_lock(rm->rm_mtx);
170 /* Skip entries before us. */
171 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
174 if (s->r_end + 1 >= r->r_start)
178 /* If we ran off the end of the list, insert at the tail. */
180 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
182 /* Check for any overlap with the current region. */
183 if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
188 /* Check for any overlap with the next region. */
189 t = TAILQ_NEXT(s, r_link);
190 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
196 * See if this region can be merged with the next region. If
197 * not, clear the pointer.
199 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
202 /* See if we can merge with the current region. */
203 if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
204 /* Can we merge all 3 regions? */
207 TAILQ_REMOVE(&rm->rm_list, t, r_link);
214 } else if (t != NULL) {
215 /* Can we merge with just the next region? */
216 t->r_start = r->r_start;
218 } else if (s->r_end < r->r_start) {
219 TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
221 TAILQ_INSERT_BEFORE(s, r, r_link);
225 mtx_unlock(rm->rm_mtx);
230 rman_init_from_resource(struct rman *rm, struct resource *r)
234 if ((rv = rman_init(rm)) != 0)
236 return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
240 rman_fini(struct rman *rm)
242 struct resource_i *r;
244 mtx_lock(rm->rm_mtx);
245 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
246 if (r->r_flags & RF_ALLOCATED) {
247 mtx_unlock(rm->rm_mtx);
253 * There really should only be one of these if we are in this
254 * state and the code is working properly, but it can't hurt.
256 while (!TAILQ_EMPTY(&rm->rm_list)) {
257 r = TAILQ_FIRST(&rm->rm_list);
258 TAILQ_REMOVE(&rm->rm_list, r, r_link);
261 mtx_unlock(rm->rm_mtx);
263 TAILQ_REMOVE(&rman_head, rm, rm_link);
264 mtx_unlock(&rman_mtx);
265 mtx_destroy(rm->rm_mtx);
266 free(rm->rm_mtx, M_RMAN);
272 rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
274 struct resource_i *r;
276 mtx_lock(rm->rm_mtx);
277 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
278 if (!(r->r_flags & RF_ALLOCATED)) {
281 mtx_unlock(rm->rm_mtx);
285 mtx_unlock(rm->rm_mtx);
290 rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
292 struct resource_i *r;
294 mtx_lock(rm->rm_mtx);
295 TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
296 if (!(r->r_flags & RF_ALLOCATED)) {
299 mtx_unlock(rm->rm_mtx);
303 mtx_unlock(rm->rm_mtx);
307 /* Shrink or extend one or both ends of an allocated resource. */
309 rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
311 struct resource_i *r, *s, *t, *new;
314 /* Not supported for shared resources. */
316 if (r->r_flags & RF_SHAREABLE)
320 * This does not support wholesale moving of a resource. At
321 * least part of the desired new range must overlap with the
324 if (end < r->r_start || r->r_end < start)
328 * Find the two resource regions immediately adjacent to the
329 * allocated resource.
332 mtx_lock(rm->rm_mtx);
334 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
339 panic("resource not in list");
341 s = TAILQ_PREV(r, resource_head, r_link);
342 t = TAILQ_NEXT(r, r_link);
343 KASSERT(s == NULL || s->r_end + 1 == r->r_start,
344 ("prev resource mismatch"));
345 KASSERT(t == NULL || r->r_end + 1 == t->r_start,
346 ("next resource mismatch"));
349 * See if the changes are permitted. Shrinking is always allowed,
350 * but growing requires sufficient room in the adjacent region.
352 if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
353 s->r_start > start)) {
354 mtx_unlock(rm->rm_mtx);
357 if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
359 mtx_unlock(rm->rm_mtx);
364 * While holding the lock, grow either end of the resource as
365 * needed and shrink either end if the shrinking does not require
366 * allocating a new resource. We can safely drop the lock and then
367 * insert a new range to handle the shrinking case afterwards.
369 if (start < r->r_start ||
370 (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
371 KASSERT(s->r_flags == 0, ("prev is busy"));
373 if (s->r_start == start) {
374 TAILQ_REMOVE(&rm->rm_list, s, r_link);
377 s->r_end = start - 1;
379 if (end > r->r_end ||
380 (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
381 KASSERT(t->r_flags == 0, ("next is busy"));
383 if (t->r_end == end) {
384 TAILQ_REMOVE(&rm->rm_list, t, r_link);
387 t->r_start = end + 1;
389 mtx_unlock(rm->rm_mtx);
392 * Handle the shrinking cases that require allocating a new
393 * resource to hold the newly-free region. We have to recheck
394 * if we still need this new region after acquiring the lock.
396 if (start > r->r_start) {
397 new = int_alloc_resource(M_WAITOK);
398 new->r_start = r->r_start;
399 new->r_end = start - 1;
401 mtx_lock(rm->rm_mtx);
403 s = TAILQ_PREV(r, resource_head, r_link);
404 if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
405 s->r_end = start - 1;
408 TAILQ_INSERT_BEFORE(r, new, r_link);
409 mtx_unlock(rm->rm_mtx);
411 if (end < r->r_end) {
412 new = int_alloc_resource(M_WAITOK);
413 new->r_start = end + 1;
414 new->r_end = r->r_end;
416 mtx_lock(rm->rm_mtx);
418 t = TAILQ_NEXT(r, r_link);
419 if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
420 t->r_start = end + 1;
423 TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
424 mtx_unlock(rm->rm_mtx);
429 #define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_PREFETCHABLE))
432 rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
433 rman_res_t count, rman_res_t bound, u_int flags,
437 struct resource_i *r, *s, *rv;
438 rman_res_t rstart, rend, amask, bmask;
442 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
443 "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
445 dev == NULL ? "<null>" : device_get_nameunit(dev)));
446 KASSERT(count != 0, ("%s: attempted to allocate an empty range",
448 KASSERT((flags & RF_FIRSTSHARE) == 0,
449 ("invalid flags %#x", flags));
450 new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
452 mtx_lock(rm->rm_mtx);
454 r = TAILQ_FIRST(&rm->rm_list);
456 DPRINTF(("NULL list head\n"));
458 DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
459 r->r_end, start, count-1));
461 for (r = TAILQ_FIRST(&rm->rm_list);
462 r && r->r_end < start + count - 1;
463 r = TAILQ_NEXT(r, r_link)) {
465 DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
466 r->r_end, start, count-1));
470 DPRINTF(("could not find a region\n"));
474 amask = (1ull << RF_ALIGNMENT(flags)) - 1;
475 KASSERT(start <= RM_MAX_END - amask,
476 ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
478 /* If bound is 0, bmask will also be 0 */
479 bmask = ~(bound - 1);
481 * First try to find an acceptable totally-unshared region.
483 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
484 DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
486 * The resource list is sorted, so there is no point in
487 * searching further once r_start is too large.
489 if (s->r_start > end - (count - 1)) {
490 DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
494 if (s->r_start > RM_MAX_END - amask) {
495 DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
499 if (s->r_flags & RF_ALLOCATED) {
500 DPRINTF(("region is allocated\n"));
503 rstart = ummax(s->r_start, start);
505 * Try to find a region by adjusting to boundary and alignment
506 * until both conditions are satisfied. This is not an optimal
507 * algorithm, but in most cases it isn't really bad, either.
510 rstart = (rstart + amask) & ~amask;
511 if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
512 rstart += bound - (rstart & ~bmask);
513 } while ((rstart & amask) != 0 && rstart < end &&
515 rend = ummin(s->r_end, ummax(rstart + count - 1, end));
517 DPRINTF(("adjusted start exceeds end\n"));
520 DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
521 rstart, rend, (rend - rstart + 1), count));
523 if ((rend - rstart) >= (count - 1)) {
524 DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
525 rstart, rend, (rend - rstart + 1)));
526 if ((s->r_end - s->r_start + 1) == count) {
527 DPRINTF(("candidate region is entire chunk\n"));
529 rv->r_flags = new_rflags;
535 * If s->r_start < rstart and
536 * s->r_end > rstart + count - 1, then
537 * we need to split the region into three pieces
538 * (the middle one will get returned to the user).
539 * Otherwise, we are allocating at either the
540 * beginning or the end of s, so we only need to
541 * split it in two. The first case requires
542 * two new allocations; the second requires but one.
544 rv = int_alloc_resource(M_NOWAIT);
547 rv->r_start = rstart;
548 rv->r_end = rstart + count - 1;
549 rv->r_flags = new_rflags;
553 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
554 DPRINTF(("splitting region in three parts: "
555 "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
556 s->r_start, rv->r_start - 1,
557 rv->r_start, rv->r_end,
558 rv->r_end + 1, s->r_end));
560 * We are allocating in the middle.
562 r = int_alloc_resource(M_NOWAIT);
568 r->r_start = rv->r_end + 1;
570 r->r_flags = s->r_flags;
572 s->r_end = rv->r_start - 1;
573 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
575 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
577 } else if (s->r_start == rv->r_start) {
578 DPRINTF(("allocating from the beginning\n"));
580 * We are allocating at the beginning.
582 s->r_start = rv->r_end + 1;
583 TAILQ_INSERT_BEFORE(s, rv, r_link);
585 DPRINTF(("allocating at the end\n"));
587 * We are allocating at the end.
589 s->r_end = rv->r_start - 1;
590 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
598 * Now find an acceptable shared region, if the client's requirements
599 * allow sharing. By our implementation restriction, a candidate
600 * region must match exactly by both size and sharing type in order
601 * to be considered compatible with the client's request. (The
602 * former restriction could probably be lifted without too much
603 * additional work, but this does not seem warranted.)
605 DPRINTF(("no unshared regions found\n"));
606 if ((flags & RF_SHAREABLE) == 0)
609 for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
610 if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
611 s->r_start >= start &&
612 (s->r_end - s->r_start + 1) == count &&
613 (s->r_start & amask) == 0 &&
614 ((s->r_start ^ s->r_end) & bmask) == 0) {
615 rv = int_alloc_resource(M_NOWAIT);
618 rv->r_start = s->r_start;
619 rv->r_end = s->r_end;
620 rv->r_flags = new_rflags;
623 if (s->r_sharehead == NULL) {
624 s->r_sharehead = malloc(sizeof *s->r_sharehead,
625 M_RMAN, M_NOWAIT | M_ZERO);
626 if (s->r_sharehead == NULL) {
631 LIST_INIT(s->r_sharehead);
632 LIST_INSERT_HEAD(s->r_sharehead, s,
634 s->r_flags |= RF_FIRSTSHARE;
636 rv->r_sharehead = s->r_sharehead;
637 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
642 * We couldn't find anything.
646 mtx_unlock(rm->rm_mtx);
647 return (rv == NULL ? NULL : &rv->r_r);
651 rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
652 rman_res_t count, u_int flags, device_t dev)
655 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
660 rman_activate_resource(struct resource *re)
662 struct resource_i *r;
667 mtx_lock(rm->rm_mtx);
668 r->r_flags |= RF_ACTIVE;
669 mtx_unlock(rm->rm_mtx);
674 rman_deactivate_resource(struct resource *r)
679 mtx_lock(rm->rm_mtx);
680 r->__r_i->r_flags &= ~RF_ACTIVE;
681 mtx_unlock(rm->rm_mtx);
686 int_rman_release_resource(struct rman *rm, struct resource_i *r)
688 struct resource_i *s, *t;
690 if (r->r_flags & RF_ACTIVE)
691 r->r_flags &= ~RF_ACTIVE;
694 * Check for a sharing list first. If there is one, then we don't
695 * have to think as hard.
697 if (r->r_sharehead) {
699 * If a sharing list exists, then we know there are at
702 * If we are in the main circleq, appoint someone else.
704 LIST_REMOVE(r, r_sharelink);
705 s = LIST_FIRST(r->r_sharehead);
706 if (r->r_flags & RF_FIRSTSHARE) {
707 s->r_flags |= RF_FIRSTSHARE;
708 TAILQ_INSERT_BEFORE(r, s, r_link);
709 TAILQ_REMOVE(&rm->rm_list, r, r_link);
713 * Make sure that the sharing list goes away completely
714 * if the resource is no longer being shared at all.
716 if (LIST_NEXT(s, r_sharelink) == NULL) {
717 free(s->r_sharehead, M_RMAN);
718 s->r_sharehead = NULL;
719 s->r_flags &= ~RF_FIRSTSHARE;
725 * Look at the adjacent resources in the list and see if our
726 * segment can be merged with any of them. If either of the
727 * resources is allocated or is not exactly adjacent then they
728 * cannot be merged with our segment.
730 s = TAILQ_PREV(r, resource_head, r_link);
731 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
732 s->r_end + 1 != r->r_start))
734 t = TAILQ_NEXT(r, r_link);
735 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
736 r->r_end + 1 != t->r_start))
739 if (s != NULL && t != NULL) {
741 * Merge all three segments.
744 TAILQ_REMOVE(&rm->rm_list, r, r_link);
745 TAILQ_REMOVE(&rm->rm_list, t, r_link);
747 } else if (s != NULL) {
749 * Merge previous segment with ours.
752 TAILQ_REMOVE(&rm->rm_list, r, r_link);
753 } else if (t != NULL) {
755 * Merge next segment with ours.
757 t->r_start = r->r_start;
758 TAILQ_REMOVE(&rm->rm_list, r, r_link);
761 * At this point, we know there is nothing we
762 * can potentially merge with, because on each
763 * side, there is either nothing there or what is
764 * there is still allocated. In that case, we don't
765 * want to remove r from the list; we simply want to
766 * change it to an unallocated region and return
767 * without freeing anything.
769 r->r_flags &= ~RF_ALLOCATED;
780 rman_release_resource(struct resource *re)
783 struct resource_i *r;
788 mtx_lock(rm->rm_mtx);
789 rv = int_rman_release_resource(rm, r);
790 mtx_unlock(rm->rm_mtx);
795 rman_make_alignment_flags(uint32_t size)
800 * Find the hightest bit set, and add one if more than one bit
801 * set. We're effectively computing the ceil(log2(size)) here.
803 for (i = 31; i > 0; i--)
806 if (~(1 << i) & size)
809 return(RF_ALIGNMENT_LOG2(i));
813 rman_get_start(struct resource *r)
816 return (r->__r_i->r_start);
820 rman_get_end(struct resource *r)
823 return (r->__r_i->r_end);
827 rman_get_size(struct resource *r)
830 return (r->__r_i->r_end - r->__r_i->r_start + 1);
834 rman_get_flags(struct resource *r)
837 return (r->__r_i->r_flags);
841 rman_set_virtual(struct resource *r, void *v)
844 r->__r_i->r_virtual = v;
848 rman_get_virtual(struct resource *r)
851 return (r->__r_i->r_virtual);
855 rman_set_irq_cookie(struct resource *r, void *c)
858 r->__r_i->r_irq_cookie = c;
862 rman_get_irq_cookie(struct resource *r)
865 return (r->__r_i->r_irq_cookie);
869 rman_set_bustag(struct resource *r, bus_space_tag_t t)
876 rman_get_bustag(struct resource *r)
879 return (r->r_bustag);
883 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
890 rman_get_bushandle(struct resource *r)
893 return (r->r_bushandle);
897 rman_set_mapping(struct resource *r, struct resource_map *map)
900 KASSERT(rman_get_size(r) == map->r_size,
901 ("rman_set_mapping: size mismatch"));
902 rman_set_bustag(r, map->r_bustag);
903 rman_set_bushandle(r, map->r_bushandle);
904 rman_set_virtual(r, map->r_vaddr);
908 rman_get_mapping(struct resource *r, struct resource_map *map)
911 map->r_bustag = rman_get_bustag(r);
912 map->r_bushandle = rman_get_bushandle(r);
913 map->r_size = rman_get_size(r);
914 map->r_vaddr = rman_get_virtual(r);
918 rman_set_rid(struct resource *r, int rid)
921 r->__r_i->r_rid = rid;
925 rman_get_rid(struct resource *r)
928 return (r->__r_i->r_rid);
932 rman_set_type(struct resource *r, int type)
934 r->__r_i->r_type = type;
938 rman_get_type(struct resource *r)
940 return (r->__r_i->r_type);
944 rman_set_device(struct resource *r, device_t dev)
947 r->__r_i->r_dev = dev;
951 rman_get_device(struct resource *r)
954 return (r->__r_i->r_dev);
958 rman_is_region_manager(struct resource *r, struct rman *rm)
961 return (r->__r_i->r_rm == rm);
965 * Sysctl interface for scanning the resource lists.
967 * We take two input parameters; the index into the list of resource
968 * managers, and the resource offset into the list.
971 sysctl_rman(SYSCTL_HANDLER_ARGS)
973 int *name = (int *)arg1;
974 u_int namelen = arg2;
975 int rman_idx, res_idx;
977 struct resource_i *res;
978 struct resource_i *sres;
980 struct u_resource ures;
986 if (bus_data_generation_check(name[0]))
992 * Find the indexed resource manager
995 TAILQ_FOREACH(rm, &rman_head, rm_link) {
999 mtx_unlock(&rman_mtx);
1004 * If the resource index is -1, we want details on the
1007 if (res_idx == -1) {
1008 bzero(&urm, sizeof(urm));
1009 urm.rm_handle = (uintptr_t)rm;
1010 if (rm->rm_descr != NULL)
1011 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1012 urm.rm_start = rm->rm_start;
1013 urm.rm_size = rm->rm_end - rm->rm_start + 1;
1014 urm.rm_type = rm->rm_type;
1016 error = SYSCTL_OUT(req, &urm, sizeof(urm));
1021 * Find the indexed resource and return it.
1023 mtx_lock(rm->rm_mtx);
1024 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1025 if (res->r_sharehead != NULL) {
1026 LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1027 if (res_idx-- == 0) {
1032 else if (res_idx-- == 0)
1035 mtx_unlock(rm->rm_mtx);
1039 bzero(&ures, sizeof(ures));
1040 ures.r_handle = (uintptr_t)res;
1041 ures.r_parent = (uintptr_t)res->r_rm;
1042 ures.r_device = (uintptr_t)res->r_dev;
1043 if (res->r_dev != NULL) {
1044 if (device_get_name(res->r_dev) != NULL) {
1045 snprintf(ures.r_devname, RM_TEXTLEN,
1047 device_get_name(res->r_dev),
1048 device_get_unit(res->r_dev));
1050 strlcpy(ures.r_devname, "nomatch",
1054 ures.r_devname[0] = '\0';
1056 ures.r_start = res->r_start;
1057 ures.r_size = res->r_end - res->r_start + 1;
1058 ures.r_flags = res->r_flags;
1060 mtx_unlock(rm->rm_mtx);
1061 error = SYSCTL_OUT(req, &ures, sizeof(ures));
1065 static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
1067 "kernel resource manager");
1071 dump_rman_header(struct rman *rm)
1076 db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1077 rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
1081 dump_rman(struct rman *rm)
1083 struct resource_i *r;
1084 const char *devname;
1088 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1089 if (r->r_dev != NULL) {
1090 devname = device_get_nameunit(r->r_dev);
1091 if (devname == NULL)
1092 devname = "nomatch";
1095 db_printf(" 0x%jx-0x%jx (RID=%d) ",
1096 r->r_start, r->r_end, r->r_rid);
1097 if (devname != NULL)
1098 db_printf("(%s)\n", devname);
1100 db_printf("----\n");
1106 DB_SHOW_COMMAND(rman, db_show_rman)
1110 dump_rman_header((struct rman *)addr);
1111 dump_rman((struct rman *)addr);
1115 DB_SHOW_COMMAND_FLAGS(rmans, db_show_rmans, DB_CMD_MEMSAFE)
1119 TAILQ_FOREACH(rm, &rman_head, rm_link) {
1120 dump_rman_header(rm);
1124 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1128 TAILQ_FOREACH(rm, &rman_head, rm_link) {
1129 dump_rman_header(rm);
1133 DB_SHOW_ALIAS_FLAGS(allrman, db_show_all_rman, DB_CMD_MEMSAFE);