1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 * Generic simple memory manager implementation. Intended to be used as a base
34 * class implementation for more advanced memory managers.
36 * Note that the algorithm used is quite simple and there might be substantial
37 * performance gains if a smarter free list is implemented. Currently it is just an
38 * unordered stack of free regions. This could easily be improved if an RB-tree
39 * is used instead. At least if we expect heavy fragmentation.
41 * Aligned allocations can also see improvement.
44 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
47 #include <dev/drm2/drmP.h>
48 #include <dev/drm2/drm_mm.h>
50 #define MM_UNUSED_TARGET 4
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
54 struct drm_mm_node *child;
56 child = malloc(sizeof(*child), DRM_MEM_MM, M_ZERO |
57 (atomic ? M_NOWAIT : M_WAITOK));
59 if (unlikely(child == NULL)) {
60 mtx_lock(&mm->unused_lock);
61 if (list_empty(&mm->unused_nodes))
65 list_entry(mm->unused_nodes.next,
66 struct drm_mm_node, node_list);
67 list_del(&child->node_list);
70 mtx_unlock(&mm->unused_lock);
75 int drm_mm_pre_get(struct drm_mm *mm)
77 struct drm_mm_node *node;
79 mtx_lock(&mm->unused_lock);
80 while (mm->num_unused < MM_UNUSED_TARGET) {
81 mtx_unlock(&mm->unused_lock);
82 node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
83 mtx_lock(&mm->unused_lock);
85 if (unlikely(node == NULL)) {
86 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
87 mtx_unlock(&mm->unused_lock);
91 list_add_tail(&node->node_list, &mm->unused_nodes);
93 mtx_unlock(&mm->unused_lock);
97 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
99 return hole_node->start + hole_node->size;
102 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
104 struct drm_mm_node *next_node =
105 list_entry(hole_node->node_list.next, struct drm_mm_node,
108 return next_node->start;
111 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
112 struct drm_mm_node *node,
113 unsigned long size, unsigned alignment)
115 struct drm_mm *mm = hole_node->mm;
116 unsigned long tmp = 0, wasted = 0;
117 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
118 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
120 KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
123 tmp = hole_start % alignment;
126 hole_node->hole_follows = 0;
127 list_del_init(&hole_node->hole_stack);
129 wasted = alignment - tmp;
131 node->start = hole_start + wasted;
136 INIT_LIST_HEAD(&node->hole_stack);
137 list_add(&node->node_list, &hole_node->node_list);
139 KASSERT(node->start + node->size <= hole_end, ("hole pos"));
141 if (node->start + node->size < hole_end) {
142 list_add(&node->hole_stack, &mm->hole_stack);
143 node->hole_follows = 1;
145 node->hole_follows = 0;
149 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
154 struct drm_mm_node *node;
156 node = drm_mm_kmalloc(hole_node->mm, atomic);
157 if (unlikely(node == NULL))
160 drm_mm_insert_helper(hole_node, node, size, alignment);
165 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
166 unsigned long size, unsigned alignment)
168 struct drm_mm_node *hole_node;
170 hole_node = drm_mm_search_free(mm, size, alignment, 0);
174 drm_mm_insert_helper(hole_node, node, size, alignment);
179 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
180 struct drm_mm_node *node,
181 unsigned long size, unsigned alignment,
182 unsigned long start, unsigned long end)
184 struct drm_mm *mm = hole_node->mm;
185 unsigned long tmp = 0, wasted = 0;
186 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
187 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
189 KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
191 if (hole_start < start)
192 wasted += start - hole_start;
194 tmp = (hole_start + wasted) % alignment;
197 wasted += alignment - tmp;
200 hole_node->hole_follows = 0;
201 list_del_init(&hole_node->hole_stack);
204 node->start = hole_start + wasted;
209 INIT_LIST_HEAD(&node->hole_stack);
210 list_add(&node->node_list, &hole_node->node_list);
212 KASSERT(node->start + node->size <= hole_end, ("hole_end"));
213 KASSERT(node->start + node->size <= end, ("end"));
215 if (node->start + node->size < hole_end) {
216 list_add(&node->hole_stack, &mm->hole_stack);
217 node->hole_follows = 1;
219 node->hole_follows = 0;
223 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
230 struct drm_mm_node *node;
232 node = drm_mm_kmalloc(hole_node->mm, atomic);
233 if (unlikely(node == NULL))
236 drm_mm_insert_helper_range(hole_node, node, size, alignment,
242 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
243 unsigned long size, unsigned alignment,
244 unsigned long start, unsigned long end)
246 struct drm_mm_node *hole_node;
248 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
253 drm_mm_insert_helper_range(hole_node, node, size, alignment,
259 void drm_mm_remove_node(struct drm_mm_node *node)
261 struct drm_mm *mm = node->mm;
262 struct drm_mm_node *prev_node;
264 KASSERT(!node->scanned_block && !node->scanned_prev_free
265 && !node->scanned_next_free, ("node"));
268 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
270 if (node->hole_follows) {
271 KASSERT(drm_mm_hole_node_start(node)
272 != drm_mm_hole_node_end(node), ("hole_follows"));
273 list_del(&node->hole_stack);
275 KASSERT(drm_mm_hole_node_start(node)
276 == drm_mm_hole_node_end(node), ("!hole_follows"));
278 if (!prev_node->hole_follows) {
279 prev_node->hole_follows = 1;
280 list_add(&prev_node->hole_stack, &mm->hole_stack);
282 list_move(&prev_node->hole_stack, &mm->hole_stack);
284 list_del(&node->node_list);
289 * Put a block. Merge with the previous and / or next block if they are free.
290 * Otherwise add to the free stack.
293 void drm_mm_put_block(struct drm_mm_node *node)
295 struct drm_mm *mm = node->mm;
297 drm_mm_remove_node(node);
299 mtx_lock(&mm->unused_lock);
300 if (mm->num_unused < MM_UNUSED_TARGET) {
301 list_add(&node->node_list, &mm->unused_nodes);
304 free(node, DRM_MEM_MM);
305 mtx_unlock(&mm->unused_lock);
308 static int check_free_hole(unsigned long start, unsigned long end,
309 unsigned long size, unsigned alignment)
313 if (end - start < size)
317 unsigned tmp = start % alignment;
319 wasted = alignment - tmp;
322 if (end >= start + size + wasted) {
330 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
332 unsigned alignment, int best_match)
334 struct drm_mm_node *entry;
335 struct drm_mm_node *best;
336 unsigned long best_size;
341 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
342 KASSERT(entry->hole_follows, ("hole_follows"));
343 if (!check_free_hole(drm_mm_hole_node_start(entry),
344 drm_mm_hole_node_end(entry),
351 if (entry->size < best_size) {
353 best_size = entry->size;
360 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
367 struct drm_mm_node *entry;
368 struct drm_mm_node *best;
369 unsigned long best_size;
371 KASSERT(!mm->scanned_blocks, ("scanned"));
376 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
377 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
378 start : drm_mm_hole_node_start(entry);
379 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
380 end : drm_mm_hole_node_end(entry);
382 KASSERT(entry->hole_follows, ("hole_follows"));
383 if (!check_free_hole(adj_start, adj_end, size, alignment))
389 if (entry->size < best_size) {
391 best_size = entry->size;
398 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
400 list_replace(&old->node_list, &new->node_list);
401 list_replace(&old->hole_stack, &new->hole_stack);
402 new->hole_follows = old->hole_follows;
404 new->start = old->start;
405 new->size = old->size;
411 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
414 mm->scan_alignment = alignment;
415 mm->scan_size = size;
416 mm->scanned_blocks = 0;
417 mm->scan_hit_start = 0;
418 mm->scan_hit_size = 0;
419 mm->scan_check_range = 0;
420 mm->prev_scanned_node = NULL;
423 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
428 mm->scan_alignment = alignment;
429 mm->scan_size = size;
430 mm->scanned_blocks = 0;
431 mm->scan_hit_start = 0;
432 mm->scan_hit_size = 0;
433 mm->scan_start = start;
435 mm->scan_check_range = 1;
436 mm->prev_scanned_node = NULL;
439 int drm_mm_scan_add_block(struct drm_mm_node *node)
441 struct drm_mm *mm = node->mm;
442 struct drm_mm_node *prev_node;
443 unsigned long hole_start, hole_end;
444 unsigned long adj_start;
445 unsigned long adj_end;
447 mm->scanned_blocks++;
449 KASSERT(!node->scanned_block, ("node->scanned_block"));
450 node->scanned_block = 1;
452 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
455 node->scanned_preceeds_hole = prev_node->hole_follows;
456 prev_node->hole_follows = 1;
457 list_del(&node->node_list);
458 node->node_list.prev = &prev_node->node_list;
459 node->node_list.next = &mm->prev_scanned_node->node_list;
460 mm->prev_scanned_node = node;
462 hole_start = drm_mm_hole_node_start(prev_node);
463 hole_end = drm_mm_hole_node_end(prev_node);
464 if (mm->scan_check_range) {
465 adj_start = hole_start < mm->scan_start ?
466 mm->scan_start : hole_start;
467 adj_end = hole_end > mm->scan_end ?
468 mm->scan_end : hole_end;
470 adj_start = hole_start;
474 if (check_free_hole(adj_start , adj_end,
475 mm->scan_size, mm->scan_alignment)) {
476 mm->scan_hit_start = hole_start;
477 mm->scan_hit_size = hole_end;
485 int drm_mm_scan_remove_block(struct drm_mm_node *node)
487 struct drm_mm *mm = node->mm;
488 struct drm_mm_node *prev_node;
490 mm->scanned_blocks--;
492 KASSERT(node->scanned_block, ("scanned_block"));
493 node->scanned_block = 0;
495 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
498 prev_node->hole_follows = node->scanned_preceeds_hole;
499 INIT_LIST_HEAD(&node->node_list);
500 list_add(&node->node_list, &prev_node->node_list);
502 /* Only need to check for containement because start&size for the
503 * complete resulting free block (not just the desired part) is
505 if (node->start >= mm->scan_hit_start &&
506 node->start + node->size
507 <= mm->scan_hit_start + mm->scan_hit_size) {
514 int drm_mm_clean(struct drm_mm * mm)
516 struct list_head *head = &mm->head_node.node_list;
518 return (head->next->next == head);
521 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
523 INIT_LIST_HEAD(&mm->hole_stack);
524 INIT_LIST_HEAD(&mm->unused_nodes);
526 mm->scanned_blocks = 0;
527 mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
529 INIT_LIST_HEAD(&mm->head_node.node_list);
530 INIT_LIST_HEAD(&mm->head_node.hole_stack);
531 mm->head_node.hole_follows = 1;
532 mm->head_node.scanned_block = 0;
533 mm->head_node.scanned_prev_free = 0;
534 mm->head_node.scanned_next_free = 0;
535 mm->head_node.mm = mm;
536 mm->head_node.start = start + size;
537 mm->head_node.size = start - mm->head_node.start;
538 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
543 void drm_mm_takedown(struct drm_mm * mm)
545 struct drm_mm_node *entry, *next;
547 if (!list_empty(&mm->head_node.node_list)) {
548 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
552 mtx_lock(&mm->unused_lock);
553 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
554 list_del(&entry->node_list);
555 free(entry, DRM_MEM_MM);
558 mtx_unlock(&mm->unused_lock);
560 mtx_destroy(&mm->unused_lock);
562 KASSERT(mm->num_unused == 0, ("num_unused != 0"));
565 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
567 struct drm_mm_node *entry;
568 unsigned long total_used = 0, total_free = 0, total = 0;
569 unsigned long hole_start, hole_end, hole_size;
571 hole_start = drm_mm_hole_node_start(&mm->head_node);
572 hole_end = drm_mm_hole_node_end(&mm->head_node);
573 hole_size = hole_end - hole_start;
575 printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
576 prefix, hole_start, hole_end,
578 total_free += hole_size;
580 drm_mm_for_each_node(entry, mm) {
581 printf("%s 0x%08lx-0x%08lx: %8lu: used\n",
582 prefix, entry->start, entry->start + entry->size,
584 total_used += entry->size;
586 if (entry->hole_follows) {
587 hole_start = drm_mm_hole_node_start(entry);
588 hole_end = drm_mm_hole_node_end(entry);
589 hole_size = hole_end - hole_start;
590 printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
591 prefix, hole_start, hole_end,
593 total_free += hole_size;
596 total = total_free + total_used;
598 printf("%s total: %lu, used %lu free %lu\n", prefix, total,
599 total_used, total_free);