2 * Copyright 1998 Massachusetts Institute of Technology
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/bus.h> /* XXX debugging */
68 #include <machine/bus.h>
70 #include <sys/sysctl.h>
73 * We use a linked list rather than a bitmap because we need to be able to
74 * represent potentially huge objects (like all of a processor's physical
75 * address space). That is also why the indices are defined to have type
76 * `unsigned long' -- that being the largest integral type in ISO C (1990).
77 * The 1999 version of C allows `long long'; we may need to switch to that
78 * at some point in the future, particularly if we want to support 36-bit
79 * addresses on IA32 hardware.
83 TAILQ_ENTRY(resource_i) r_link;
84 LIST_ENTRY(resource_i) r_sharelink;
85 LIST_HEAD(, resource_i) *r_sharehead;
86 u_long r_start; /* index of the first entry in this resource */
87 u_long r_end; /* index of the last entry (inclusive) */
89 void *r_virtual; /* virtual address of this resource */
90 struct device *r_dev; /* device which has allocated this resource */
91 struct rman *r_rm; /* resource manager from whence this came */
92 void *r_spare1; /* Spare pointer 1 */
93 void *r_spare2; /* Spare pointer 2 */
94 int r_rid; /* optional rid for this resource. */
98 TUNABLE_INT("debug.rman_debug", &rman_debug);
99 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
100 &rman_debug, 0, "rman debug");
102 #define DPRINTF(params) if (rman_debug) printf params
104 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
106 struct rman_head rman_head;
107 static struct mtx rman_mtx; /* mutex to protect rman_head */
108 static int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
109 struct resource_i **whohas);
110 static int int_rman_deactivate_resource(struct resource_i *r);
111 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
113 static __inline struct resource_i *
114 int_alloc_resource(int malloc_flag)
116 struct resource_i *r;
118 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
126 * XXX: puc.c is a big hack.
127 * XXX: it should be rewritten to act like a bridge and offer
128 * XXX: its own resource manager.
129 * XXX: until somebody has time, help it out with these two functions
133 rman_secret_puc_alloc_resource(int malloc_flag)
135 struct resource_i *r;
137 r = int_alloc_resource(malloc_flag);
144 rman_secret_puc_free_resource(struct resource *r)
147 free(r->__r_i, M_RMAN);
151 rman_init(struct rman *rm)
157 TAILQ_INIT(&rman_head);
158 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
161 if (rm->rm_type == RMAN_UNINIT)
163 if (rm->rm_type == RMAN_GAUGE)
164 panic("implement RMAN_GAUGE");
166 TAILQ_INIT(&rm->rm_list);
167 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
168 if (rm->rm_mtx == NULL)
170 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
173 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
174 mtx_unlock(&rman_mtx);
179 * NB: this interface is not robust against programming errors which
180 * add multiple copies of the same region.
183 rman_manage_region(struct rman *rm, u_long start, u_long end)
185 struct resource_i *r, *s;
187 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
188 rm->rm_descr, start, end));
189 r = int_alloc_resource(M_NOWAIT);
196 mtx_lock(rm->rm_mtx);
197 for (s = TAILQ_FIRST(&rm->rm_list);
198 s && s->r_end < r->r_start;
199 s = TAILQ_NEXT(s, r_link))
203 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
205 TAILQ_INSERT_BEFORE(s, r, r_link);
208 mtx_unlock(rm->rm_mtx);
213 rman_fini(struct rman *rm)
215 struct resource_i *r;
217 mtx_lock(rm->rm_mtx);
218 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
219 if (r->r_flags & RF_ALLOCATED) {
220 mtx_unlock(rm->rm_mtx);
226 * There really should only be one of these if we are in this
227 * state and the code is working properly, but it can't hurt.
229 while (!TAILQ_EMPTY(&rm->rm_list)) {
230 r = TAILQ_FIRST(&rm->rm_list);
231 TAILQ_REMOVE(&rm->rm_list, r, r_link);
234 mtx_unlock(rm->rm_mtx);
236 TAILQ_REMOVE(&rman_head, rm, rm_link);
237 mtx_unlock(&rman_mtx);
238 mtx_destroy(rm->rm_mtx);
239 free(rm->rm_mtx, M_RMAN);
245 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
246 u_long count, u_long bound, u_int flags,
250 struct resource_i *r, *s, *rv;
251 u_long rstart, rend, amask, bmask;
255 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
256 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
257 flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
258 want_activate = (flags & RF_ACTIVE);
261 mtx_lock(rm->rm_mtx);
263 for (r = TAILQ_FIRST(&rm->rm_list);
264 r && r->r_end < start;
265 r = TAILQ_NEXT(r, r_link))
269 DPRINTF(("could not find a region\n"));
273 amask = (1ul << RF_ALIGNMENT(flags)) - 1;
274 /* If bound is 0, bmask will also be 0 */
275 bmask = ~(bound - 1);
277 * First try to find an acceptable totally-unshared region.
279 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
280 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
281 if (s->r_start + count - 1 > end) {
282 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
286 if (s->r_flags & RF_ALLOCATED) {
287 DPRINTF(("region is allocated\n"));
290 rstart = ulmax(s->r_start, start);
292 * Try to find a region by adjusting to boundary and alignment
293 * until both conditions are satisfied. This is not an optimal
294 * algorithm, but in most cases it isn't really bad, either.
297 rstart = (rstart + amask) & ~amask;
298 if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
299 rstart += bound - (rstart & ~bmask);
300 } while ((rstart & amask) != 0 && rstart < end &&
302 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
304 DPRINTF(("adjusted start exceeds end\n"));
307 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
308 rstart, rend, (rend - rstart + 1), count));
310 if ((rend - rstart + 1) >= count) {
311 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
312 rstart, rend, (rend - rstart + 1)));
313 if ((s->r_end - s->r_start + 1) == count) {
314 DPRINTF(("candidate region is entire chunk\n"));
316 rv->r_flags |= RF_ALLOCATED | flags;
322 * If s->r_start < rstart and
323 * s->r_end > rstart + count - 1, then
324 * we need to split the region into three pieces
325 * (the middle one will get returned to the user).
326 * Otherwise, we are allocating at either the
327 * beginning or the end of s, so we only need to
328 * split it in two. The first case requires
329 * two new allocations; the second requires but one.
331 rv = int_alloc_resource(M_NOWAIT);
334 rv->r_start = rstart;
335 rv->r_end = rstart + count - 1;
336 rv->r_flags = flags | RF_ALLOCATED;
340 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
341 DPRINTF(("splitting region in three parts: "
342 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
343 s->r_start, rv->r_start - 1,
344 rv->r_start, rv->r_end,
345 rv->r_end + 1, s->r_end));
347 * We are allocating in the middle.
349 r = int_alloc_resource(M_NOWAIT);
355 r->r_start = rv->r_end + 1;
357 r->r_flags = s->r_flags;
359 s->r_end = rv->r_start - 1;
360 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
362 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
364 } else if (s->r_start == rv->r_start) {
365 DPRINTF(("allocating from the beginning\n"));
367 * We are allocating at the beginning.
369 s->r_start = rv->r_end + 1;
370 TAILQ_INSERT_BEFORE(s, rv, r_link);
372 DPRINTF(("allocating at the end\n"));
374 * We are allocating at the end.
376 s->r_end = rv->r_start - 1;
377 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
385 * Now find an acceptable shared region, if the client's requirements
386 * allow sharing. By our implementation restriction, a candidate
387 * region must match exactly by both size and sharing type in order
388 * to be considered compatible with the client's request. (The
389 * former restriction could probably be lifted without too much
390 * additional work, but this does not seem warranted.)
392 DPRINTF(("no unshared regions found\n"));
393 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
396 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
397 if (s->r_start > end)
399 if ((s->r_flags & flags) != flags)
401 rstart = ulmax(s->r_start, start);
402 rend = ulmin(s->r_end, ulmax(start + count - 1, end));
403 if (s->r_start >= start && s->r_end <= end
404 && (s->r_end - s->r_start + 1) == count &&
405 (s->r_start & amask) == 0 &&
406 ((s->r_start ^ s->r_end) & bmask) == 0) {
407 rv = int_alloc_resource(M_NOWAIT);
410 rv->r_start = s->r_start;
411 rv->r_end = s->r_end;
412 rv->r_flags = s->r_flags &
413 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
416 if (s->r_sharehead == NULL) {
417 s->r_sharehead = malloc(sizeof *s->r_sharehead,
418 M_RMAN, M_NOWAIT | M_ZERO);
419 if (s->r_sharehead == NULL) {
424 LIST_INIT(s->r_sharehead);
425 LIST_INSERT_HEAD(s->r_sharehead, s,
427 s->r_flags |= RF_FIRSTSHARE;
429 rv->r_sharehead = s->r_sharehead;
430 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
436 * We couldn't find anything.
440 * If the user specified RF_ACTIVE in the initial flags,
441 * which is reflected in `want_activate', we attempt to atomically
442 * activate the resource. If this fails, we release the resource
443 * and indicate overall failure. (This behavior probably doesn't
444 * make sense for RF_TIMESHARE-type resources.)
446 if (rv && want_activate) {
447 struct resource_i *whohas;
448 if (int_rman_activate_resource(rm, rv, &whohas)) {
449 int_rman_release_resource(rm, rv);
454 mtx_unlock(rm->rm_mtx);
455 return (rv == NULL ? NULL : &rv->r_r);
459 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
460 u_int flags, struct device *dev)
463 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
468 int_rman_activate_resource(struct rman *rm, struct resource_i *r,
469 struct resource_i **whohas)
471 struct resource_i *s;
475 * If we are not timesharing, then there is nothing much to do.
476 * If we already have the resource, then there is nothing at all to do.
477 * If we are not on a sharing list with anybody else, then there is
480 if ((r->r_flags & RF_TIMESHARE) == 0
481 || (r->r_flags & RF_ACTIVE) != 0
482 || r->r_sharehead == NULL) {
483 r->r_flags |= RF_ACTIVE;
488 for (s = LIST_FIRST(r->r_sharehead); s && ok;
489 s = LIST_NEXT(s, r_sharelink)) {
490 if ((s->r_flags & RF_ACTIVE) != 0) {
496 r->r_flags |= RF_ACTIVE;
503 rman_activate_resource(struct resource *re)
506 struct resource_i *r, *whohas;
511 mtx_lock(rm->rm_mtx);
512 rv = int_rman_activate_resource(rm, r, &whohas);
513 mtx_unlock(rm->rm_mtx);
518 rman_await_resource(struct resource *re, int pri, int timo)
521 struct resource_i *r, *whohas;
526 mtx_lock(rm->rm_mtx);
528 rv = int_rman_activate_resource(rm, r, &whohas);
530 return (rv); /* returns with mutex held */
532 if (r->r_sharehead == NULL)
533 panic("rman_await_resource");
534 whohas->r_flags |= RF_WANTED;
535 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
537 mtx_unlock(rm->rm_mtx);
544 int_rman_deactivate_resource(struct resource_i *r)
547 r->r_flags &= ~RF_ACTIVE;
548 if (r->r_flags & RF_WANTED) {
549 r->r_flags &= ~RF_WANTED;
550 wakeup(r->r_sharehead);
556 rman_deactivate_resource(struct resource *r)
561 mtx_lock(rm->rm_mtx);
562 int_rman_deactivate_resource(r->__r_i);
563 mtx_unlock(rm->rm_mtx);
568 int_rman_release_resource(struct rman *rm, struct resource_i *r)
570 struct resource_i *s, *t;
572 if (r->r_flags & RF_ACTIVE)
573 int_rman_deactivate_resource(r);
576 * Check for a sharing list first. If there is one, then we don't
577 * have to think as hard.
579 if (r->r_sharehead) {
581 * If a sharing list exists, then we know there are at
584 * If we are in the main circleq, appoint someone else.
586 LIST_REMOVE(r, r_sharelink);
587 s = LIST_FIRST(r->r_sharehead);
588 if (r->r_flags & RF_FIRSTSHARE) {
589 s->r_flags |= RF_FIRSTSHARE;
590 TAILQ_INSERT_BEFORE(r, s, r_link);
591 TAILQ_REMOVE(&rm->rm_list, r, r_link);
595 * Make sure that the sharing list goes away completely
596 * if the resource is no longer being shared at all.
598 if (LIST_NEXT(s, r_sharelink) == NULL) {
599 free(s->r_sharehead, M_RMAN);
600 s->r_sharehead = NULL;
601 s->r_flags &= ~RF_FIRSTSHARE;
607 * Look at the adjacent resources in the list and see if our
608 * segment can be merged with any of them. If either of the
609 * resources is allocated or is not exactly adjacent then they
610 * cannot be merged with our segment.
612 s = TAILQ_PREV(r, resource_head, r_link);
613 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
614 s->r_end + 1 != r->r_start))
616 t = TAILQ_NEXT(r, r_link);
617 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
618 r->r_end + 1 != t->r_start))
621 if (s != NULL && t != NULL) {
623 * Merge all three segments.
626 TAILQ_REMOVE(&rm->rm_list, r, r_link);
627 TAILQ_REMOVE(&rm->rm_list, t, r_link);
629 } else if (s != NULL) {
631 * Merge previous segment with ours.
634 TAILQ_REMOVE(&rm->rm_list, r, r_link);
635 } else if (t != NULL) {
637 * Merge next segment with ours.
639 t->r_start = r->r_start;
640 TAILQ_REMOVE(&rm->rm_list, r, r_link);
643 * At this point, we know there is nothing we
644 * can potentially merge with, because on each
645 * side, there is either nothing there or what is
646 * there is still allocated. In that case, we don't
647 * want to remove r from the list; we simply want to
648 * change it to an unallocated region and return
649 * without freeing anything.
651 r->r_flags &= ~RF_ALLOCATED;
661 rman_release_resource(struct resource *re)
664 struct resource_i *r;
669 mtx_lock(rm->rm_mtx);
670 rv = int_rman_release_resource(rm, r);
671 mtx_unlock(rm->rm_mtx);
676 rman_make_alignment_flags(uint32_t size)
681 * Find the hightest bit set, and add one if more than one bit
682 * set. We're effectively computing the ceil(log2(size)) here.
684 for (i = 31; i > 0; i--)
687 if (~(1 << i) & size)
690 return(RF_ALIGNMENT_LOG2(i));
694 rman_get_start(struct resource *r)
696 return (r->__r_i->r_start);
700 rman_get_end(struct resource *r)
702 return (r->__r_i->r_end);
706 rman_get_size(struct resource *r)
708 return (r->__r_i->r_end - r->__r_i->r_start + 1);
712 rman_get_flags(struct resource *r)
714 return (r->__r_i->r_flags);
718 rman_set_virtual(struct resource *r, void *v)
720 r->__r_i->r_virtual = v;
724 rman_get_virtual(struct resource *r)
726 return (r->__r_i->r_virtual);
730 rman_set_bustag(struct resource *r, bus_space_tag_t t)
736 rman_get_bustag(struct resource *r)
738 return (r->r_bustag);
742 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
748 rman_get_bushandle(struct resource *r)
750 return (r->r_bushandle);
754 rman_set_rid(struct resource *r, int rid)
756 r->__r_i->r_rid = rid;
760 rman_set_start(struct resource *r, u_long start)
762 r->__r_i->r_start = start;
766 rman_set_end(struct resource *r, u_long end)
768 r->__r_i->r_end = end;
772 rman_get_rid(struct resource *r)
774 return (r->__r_i->r_rid);
778 rman_get_device(struct resource *r)
780 return (r->__r_i->r_dev);
784 rman_set_device(struct resource *r, struct device *dev)
786 r->__r_i->r_dev = dev;
790 rman_is_region_manager(struct resource *r, struct rman *rm)
793 return (r->__r_i->r_rm == rm);
797 * Sysctl interface for scanning the resource lists.
799 * We take two input parameters; the index into the list of resource
800 * managers, and the resource offset into the list.
803 sysctl_rman(SYSCTL_HANDLER_ARGS)
805 int *name = (int *)arg1;
806 u_int namelen = arg2;
807 int rman_idx, res_idx;
809 struct resource_i *res;
811 struct u_resource ures;
817 if (bus_data_generation_check(name[0]))
823 * Find the indexed resource manager
826 TAILQ_FOREACH(rm, &rman_head, rm_link) {
830 mtx_unlock(&rman_mtx);
835 * If the resource index is -1, we want details on the
839 bzero(&urm, sizeof(urm));
840 urm.rm_handle = (uintptr_t)rm;
841 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
842 urm.rm_start = rm->rm_start;
843 urm.rm_size = rm->rm_end - rm->rm_start + 1;
844 urm.rm_type = rm->rm_type;
846 error = SYSCTL_OUT(req, &urm, sizeof(urm));
851 * Find the indexed resource and return it.
853 mtx_lock(rm->rm_mtx);
854 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
855 if (res_idx-- == 0) {
856 bzero(&ures, sizeof(ures));
857 ures.r_handle = (uintptr_t)res;
858 ures.r_parent = (uintptr_t)res->r_rm;
859 ures.r_device = (uintptr_t)res->r_dev;
860 if (res->r_dev != NULL) {
861 if (device_get_name(res->r_dev) != NULL) {
862 snprintf(ures.r_devname, RM_TEXTLEN,
864 device_get_name(res->r_dev),
865 device_get_unit(res->r_dev));
867 strlcpy(ures.r_devname, "nomatch",
871 ures.r_devname[0] = '\0';
873 ures.r_start = res->r_start;
874 ures.r_size = res->r_end - res->r_start + 1;
875 ures.r_flags = res->r_flags;
877 mtx_unlock(rm->rm_mtx);
878 error = SYSCTL_OUT(req, &ures, sizeof(ures));
882 mtx_unlock(rm->rm_mtx);
886 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
887 "kernel resource manager");