]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/drm_mm.c
MFV: zlib: examples: define functions as static ones. (PR #855)
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 #include <sys/cdefs.h>
30 /*
31  * Generic simple memory manager implementation. Intended to be used as a base
32  * class implementation for more advanced memory managers.
33  *
34  * Note that the algorithm used is quite simple and there might be substantial
35  * performance gains if a smarter free list is implemented. Currently it is just an
36  * unordered stack of free regions. This could easily be improved if an RB-tree
37  * is used instead. At least if we expect heavy fragmentation.
38  *
39  * Aligned allocations can also see improvement.
40  *
41  * Authors:
42  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43  */
44
45 #include <dev/drm2/drmP.h>
46 #include <dev/drm2/drm_mm.h>
47
48 #define MM_UNUSED_TARGET 4
49
50 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
51 {
52         struct drm_mm_node *child;
53
54         child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT | M_ZERO);
55
56         if (unlikely(child == NULL)) {
57                 mtx_lock(&mm->unused_lock);
58                 if (list_empty(&mm->unused_nodes))
59                         child = NULL;
60                 else {
61                         child =
62                             list_entry(mm->unused_nodes.next,
63                                        struct drm_mm_node, node_list);
64                         list_del(&child->node_list);
65                         --mm->num_unused;
66                 }
67                 mtx_unlock(&mm->unused_lock);
68         }
69         return child;
70 }
71
72 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
73  * drm_mm:      memory manager struct we are pre-allocating for
74  *
75  * Returns 0 on success or -ENOMEM if allocation fails.
76  */
77 int drm_mm_pre_get(struct drm_mm *mm)
78 {
79         struct drm_mm_node *node;
80
81         mtx_lock(&mm->unused_lock);
82         while (mm->num_unused < MM_UNUSED_TARGET) {
83                 mtx_unlock(&mm->unused_lock);
84                 node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
85                 mtx_lock(&mm->unused_lock);
86
87                 if (unlikely(node == NULL)) {
88                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
89                         mtx_unlock(&mm->unused_lock);
90                         return ret;
91                 }
92                 ++mm->num_unused;
93                 list_add_tail(&node->node_list, &mm->unused_nodes);
94         }
95         mtx_unlock(&mm->unused_lock);
96         return 0;
97 }
98 EXPORT_SYMBOL(drm_mm_pre_get);
99
100 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
101 {
102         return hole_node->start + hole_node->size;
103 }
104
105 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
106 {
107         struct drm_mm_node *next_node =
108                 list_entry(hole_node->node_list.next, struct drm_mm_node,
109                            node_list);
110
111         return next_node->start;
112 }
113
114 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
115                                  struct drm_mm_node *node,
116                                  unsigned long size, unsigned alignment,
117                                  unsigned long color)
118 {
119         struct drm_mm *mm = hole_node->mm;
120         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
121         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
122         unsigned long adj_start = hole_start;
123         unsigned long adj_end = hole_end;
124
125         BUG_ON(!hole_node->hole_follows || node->allocated);
126
127         if (mm->color_adjust)
128                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
129
130         if (alignment) {
131                 unsigned tmp = adj_start % alignment;
132                 if (tmp)
133                         adj_start += alignment - tmp;
134         }
135
136         if (adj_start == hole_start) {
137                 hole_node->hole_follows = 0;
138                 list_del(&hole_node->hole_stack);
139         }
140
141         node->start = adj_start;
142         node->size = size;
143         node->mm = mm;
144         node->color = color;
145         node->allocated = 1;
146
147         INIT_LIST_HEAD(&node->hole_stack);
148         list_add(&node->node_list, &hole_node->node_list);
149
150         BUG_ON(node->start + node->size > adj_end);
151
152         node->hole_follows = 0;
153         if (node->start + node->size < hole_end) {
154                 list_add(&node->hole_stack, &mm->hole_stack);
155                 node->hole_follows = 1;
156         }
157 }
158
159 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
160                                              unsigned long size,
161                                              unsigned alignment,
162                                              unsigned long color,
163                                              int atomic)
164 {
165         struct drm_mm_node *node;
166
167         node = drm_mm_kmalloc(hole_node->mm, atomic);
168         if (unlikely(node == NULL))
169                 return NULL;
170
171         drm_mm_insert_helper(hole_node, node, size, alignment, color);
172
173         return node;
174 }
175 EXPORT_SYMBOL(drm_mm_get_block_generic);
176
177 /**
178  * Search for free space and insert a preallocated memory node. Returns
179  * -ENOSPC if no suitable free area is available. The preallocated memory node
180  * must be cleared.
181  */
182 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
183                                unsigned long size, unsigned alignment,
184                                unsigned long color)
185 {
186         struct drm_mm_node *hole_node;
187
188         hole_node = drm_mm_search_free_generic(mm, size, alignment,
189                                                color, 0);
190         if (!hole_node)
191                 return -ENOSPC;
192
193         drm_mm_insert_helper(hole_node, node, size, alignment, color);
194         return 0;
195 }
196 EXPORT_SYMBOL(drm_mm_insert_node_generic);
197
198 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
199                        unsigned long size, unsigned alignment)
200 {
201         return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
202 }
203 EXPORT_SYMBOL(drm_mm_insert_node);
204
205 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
206                                        struct drm_mm_node *node,
207                                        unsigned long size, unsigned alignment,
208                                        unsigned long color,
209                                        unsigned long start, unsigned long end)
210 {
211         struct drm_mm *mm = hole_node->mm;
212         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
213         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
214         unsigned long adj_start = hole_start;
215         unsigned long adj_end = hole_end;
216
217         BUG_ON(!hole_node->hole_follows || node->allocated);
218
219         if (adj_start < start)
220                 adj_start = start;
221         if (adj_end > end)
222                 adj_end = end;
223
224         if (mm->color_adjust)
225                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
226
227         if (alignment) {
228                 unsigned tmp = adj_start % alignment;
229                 if (tmp)
230                         adj_start += alignment - tmp;
231         }
232
233         if (adj_start == hole_start) {
234                 hole_node->hole_follows = 0;
235                 list_del(&hole_node->hole_stack);
236         }
237
238         node->start = adj_start;
239         node->size = size;
240         node->mm = mm;
241         node->color = color;
242         node->allocated = 1;
243
244         INIT_LIST_HEAD(&node->hole_stack);
245         list_add(&node->node_list, &hole_node->node_list);
246
247         BUG_ON(node->start + node->size > adj_end);
248         BUG_ON(node->start + node->size > end);
249
250         node->hole_follows = 0;
251         if (node->start + node->size < hole_end) {
252                 list_add(&node->hole_stack, &mm->hole_stack);
253                 node->hole_follows = 1;
254         }
255 }
256
257 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
258                                                 unsigned long size,
259                                                 unsigned alignment,
260                                                 unsigned long color,
261                                                 unsigned long start,
262                                                 unsigned long end,
263                                                 int atomic)
264 {
265         struct drm_mm_node *node;
266
267         node = drm_mm_kmalloc(hole_node->mm, atomic);
268         if (unlikely(node == NULL))
269                 return NULL;
270
271         drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
272                                    start, end);
273
274         return node;
275 }
276 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
277
278 /**
279  * Search for free space and insert a preallocated memory node. Returns
280  * -ENOSPC if no suitable free area is available. This is for range
281  * restricted allocations. The preallocated memory node must be cleared.
282  */
283 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
284                                         unsigned long size, unsigned alignment, unsigned long color,
285                                         unsigned long start, unsigned long end)
286 {
287         struct drm_mm_node *hole_node;
288
289         hole_node = drm_mm_search_free_in_range_generic(mm,
290                                                         size, alignment, color,
291                                                         start, end, 0);
292         if (!hole_node)
293                 return -ENOSPC;
294
295         drm_mm_insert_helper_range(hole_node, node,
296                                    size, alignment, color,
297                                    start, end);
298         return 0;
299 }
300 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
301
302 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
303                                 unsigned long size, unsigned alignment,
304                                 unsigned long start, unsigned long end)
305 {
306         return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
307 }
308 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
309
310 /**
311  * Remove a memory node from the allocator.
312  */
313 void drm_mm_remove_node(struct drm_mm_node *node)
314 {
315         struct drm_mm *mm = node->mm;
316         struct drm_mm_node *prev_node;
317
318         BUG_ON(node->scanned_block || node->scanned_prev_free
319                                    || node->scanned_next_free);
320
321         prev_node =
322             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
323
324         if (node->hole_follows) {
325                 BUG_ON(drm_mm_hole_node_start(node)
326                                 == drm_mm_hole_node_end(node));
327                 list_del(&node->hole_stack);
328         } else
329                 BUG_ON(drm_mm_hole_node_start(node)
330                                 != drm_mm_hole_node_end(node));
331
332         if (!prev_node->hole_follows) {
333                 prev_node->hole_follows = 1;
334                 list_add(&prev_node->hole_stack, &mm->hole_stack);
335         } else
336                 list_move(&prev_node->hole_stack, &mm->hole_stack);
337
338         list_del(&node->node_list);
339         node->allocated = 0;
340 }
341 EXPORT_SYMBOL(drm_mm_remove_node);
342
343 /*
344  * Remove a memory node from the allocator and free the allocated struct
345  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
346  * drm_mm_get_block functions.
347  */
348 void drm_mm_put_block(struct drm_mm_node *node)
349 {
350
351         struct drm_mm *mm = node->mm;
352
353         drm_mm_remove_node(node);
354
355         mtx_lock(&mm->unused_lock);
356         if (mm->num_unused < MM_UNUSED_TARGET) {
357                 list_add(&node->node_list, &mm->unused_nodes);
358                 ++mm->num_unused;
359         } else
360                 free(node, DRM_MEM_MM);
361         mtx_unlock(&mm->unused_lock);
362 }
363 EXPORT_SYMBOL(drm_mm_put_block);
364
365 static int check_free_hole(unsigned long start, unsigned long end,
366                            unsigned long size, unsigned alignment)
367 {
368         if (end - start < size)
369                 return 0;
370
371         if (alignment) {
372                 unsigned tmp = start % alignment;
373                 if (tmp)
374                         start += alignment - tmp;
375         }
376
377         return end >= start + size;
378 }
379
380 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
381                                                unsigned long size,
382                                                unsigned alignment,
383                                                unsigned long color,
384                                                bool best_match)
385 {
386         struct drm_mm_node *entry;
387         struct drm_mm_node *best;
388         unsigned long best_size;
389
390         BUG_ON(mm->scanned_blocks);
391
392         best = NULL;
393         best_size = ~0UL;
394
395         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
396                 unsigned long adj_start = drm_mm_hole_node_start(entry);
397                 unsigned long adj_end = drm_mm_hole_node_end(entry);
398
399                 if (mm->color_adjust) {
400                         mm->color_adjust(entry, color, &adj_start, &adj_end);
401                         if (adj_end <= adj_start)
402                                 continue;
403                 }
404
405                 BUG_ON(!entry->hole_follows);
406                 if (!check_free_hole(adj_start, adj_end, size, alignment))
407                         continue;
408
409                 if (!best_match)
410                         return entry;
411
412                 if (entry->size < best_size) {
413                         best = entry;
414                         best_size = entry->size;
415                 }
416         }
417
418         return best;
419 }
420 EXPORT_SYMBOL(drm_mm_search_free_generic);
421
422 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
423                                                         unsigned long size,
424                                                         unsigned alignment,
425                                                         unsigned long color,
426                                                         unsigned long start,
427                                                         unsigned long end,
428                                                         bool best_match)
429 {
430         struct drm_mm_node *entry;
431         struct drm_mm_node *best;
432         unsigned long best_size;
433
434         BUG_ON(mm->scanned_blocks);
435
436         best = NULL;
437         best_size = ~0UL;
438
439         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
440                 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
441                         start : drm_mm_hole_node_start(entry);
442                 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
443                         end : drm_mm_hole_node_end(entry);
444
445                 BUG_ON(!entry->hole_follows);
446
447                 if (mm->color_adjust) {
448                         mm->color_adjust(entry, color, &adj_start, &adj_end);
449                         if (adj_end <= adj_start)
450                                 continue;
451                 }
452
453                 if (!check_free_hole(adj_start, adj_end, size, alignment))
454                         continue;
455
456                 if (!best_match)
457                         return entry;
458
459                 if (entry->size < best_size) {
460                         best = entry;
461                         best_size = entry->size;
462                 }
463         }
464
465         return best;
466 }
467 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
468
469 /**
470  * Moves an allocation. To be used with embedded struct drm_mm_node.
471  */
472 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
473 {
474         list_replace(&old->node_list, &new->node_list);
475         list_replace(&old->hole_stack, &new->hole_stack);
476         new->hole_follows = old->hole_follows;
477         new->mm = old->mm;
478         new->start = old->start;
479         new->size = old->size;
480         new->color = old->color;
481
482         old->allocated = 0;
483         new->allocated = 1;
484 }
485 EXPORT_SYMBOL(drm_mm_replace_node);
486
487 /**
488  * Initializa lru scanning.
489  *
490  * This simply sets up the scanning routines with the parameters for the desired
491  * hole.
492  *
493  * Warning: As long as the scan list is non-empty, no other operations than
494  * adding/removing nodes to/from the scan list are allowed.
495  */
496 void drm_mm_init_scan(struct drm_mm *mm,
497                       unsigned long size,
498                       unsigned alignment,
499                       unsigned long color)
500 {
501         mm->scan_color = color;
502         mm->scan_alignment = alignment;
503         mm->scan_size = size;
504         mm->scanned_blocks = 0;
505         mm->scan_hit_start = 0;
506         mm->scan_hit_end = 0;
507         mm->scan_check_range = 0;
508         mm->prev_scanned_node = NULL;
509 }
510 EXPORT_SYMBOL(drm_mm_init_scan);
511
512 /**
513  * Initializa lru scanning.
514  *
515  * This simply sets up the scanning routines with the parameters for the desired
516  * hole. This version is for range-restricted scans.
517  *
518  * Warning: As long as the scan list is non-empty, no other operations than
519  * adding/removing nodes to/from the scan list are allowed.
520  */
521 void drm_mm_init_scan_with_range(struct drm_mm *mm,
522                                  unsigned long size,
523                                  unsigned alignment,
524                                  unsigned long color,
525                                  unsigned long start,
526                                  unsigned long end)
527 {
528         mm->scan_color = color;
529         mm->scan_alignment = alignment;
530         mm->scan_size = size;
531         mm->scanned_blocks = 0;
532         mm->scan_hit_start = 0;
533         mm->scan_hit_end = 0;
534         mm->scan_start = start;
535         mm->scan_end = end;
536         mm->scan_check_range = 1;
537         mm->prev_scanned_node = NULL;
538 }
539 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
540
541 /**
542  * Add a node to the scan list that might be freed to make space for the desired
543  * hole.
544  *
545  * Returns non-zero, if a hole has been found, zero otherwise.
546  */
547 int drm_mm_scan_add_block(struct drm_mm_node *node)
548 {
549         struct drm_mm *mm = node->mm;
550         struct drm_mm_node *prev_node;
551         unsigned long hole_start, hole_end;
552         unsigned long adj_start, adj_end;
553
554         mm->scanned_blocks++;
555
556         BUG_ON(node->scanned_block);
557         node->scanned_block = 1;
558
559         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
560                                node_list);
561
562         node->scanned_preceeds_hole = prev_node->hole_follows;
563         prev_node->hole_follows = 1;
564         list_del(&node->node_list);
565         node->node_list.prev = &prev_node->node_list;
566         node->node_list.next = &mm->prev_scanned_node->node_list;
567         mm->prev_scanned_node = node;
568
569         adj_start = hole_start = drm_mm_hole_node_start(prev_node);
570         adj_end = hole_end = drm_mm_hole_node_end(prev_node);
571
572         if (mm->scan_check_range) {
573                 if (adj_start < mm->scan_start)
574                         adj_start = mm->scan_start;
575                 if (adj_end > mm->scan_end)
576                         adj_end = mm->scan_end;
577         }
578
579         if (mm->color_adjust)
580                 mm->color_adjust(prev_node, mm->scan_color,
581                                  &adj_start, &adj_end);
582
583         if (check_free_hole(adj_start, adj_end,
584                             mm->scan_size, mm->scan_alignment)) {
585                 mm->scan_hit_start = hole_start;
586                 mm->scan_hit_end = hole_end;
587                 return 1;
588         }
589
590         return 0;
591 }
592 EXPORT_SYMBOL(drm_mm_scan_add_block);
593
594 /**
595  * Remove a node from the scan list.
596  *
597  * Nodes _must_ be removed in the exact same order from the scan list as they
598  * have been added, otherwise the internal state of the memory manager will be
599  * corrupted.
600  *
601  * When the scan list is empty, the selected memory nodes can be freed. An
602  * immediately following drm_mm_search_free with best_match = 0 will then return
603  * the just freed block (because its at the top of the free_stack list).
604  *
605  * Returns one if this block should be evicted, zero otherwise. Will always
606  * return zero when no hole has been found.
607  */
608 int drm_mm_scan_remove_block(struct drm_mm_node *node)
609 {
610         struct drm_mm *mm = node->mm;
611         struct drm_mm_node *prev_node;
612
613         mm->scanned_blocks--;
614
615         BUG_ON(!node->scanned_block);
616         node->scanned_block = 0;
617
618         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
619                                node_list);
620
621         prev_node->hole_follows = node->scanned_preceeds_hole;
622         list_add(&node->node_list, &prev_node->node_list);
623
624          return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
625                  node->start < mm->scan_hit_end);
626 }
627 EXPORT_SYMBOL(drm_mm_scan_remove_block);
628
629 int drm_mm_clean(struct drm_mm * mm)
630 {
631         struct list_head *head = &mm->head_node.node_list;
632
633         return (head->next->next == head);
634 }
635 EXPORT_SYMBOL(drm_mm_clean);
636
637 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
638 {
639         INIT_LIST_HEAD(&mm->hole_stack);
640         INIT_LIST_HEAD(&mm->unused_nodes);
641         mm->num_unused = 0;
642         mm->scanned_blocks = 0;
643         mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
644
645         /* Clever trick to avoid a special case in the free hole tracking. */
646         INIT_LIST_HEAD(&mm->head_node.node_list);
647         INIT_LIST_HEAD(&mm->head_node.hole_stack);
648         mm->head_node.hole_follows = 1;
649         mm->head_node.scanned_block = 0;
650         mm->head_node.scanned_prev_free = 0;
651         mm->head_node.scanned_next_free = 0;
652         mm->head_node.mm = mm;
653         mm->head_node.start = start + size;
654         mm->head_node.size = start - mm->head_node.start;
655         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
656
657         mm->color_adjust = NULL;
658
659         return 0;
660 }
661 EXPORT_SYMBOL(drm_mm_init);
662
663 void drm_mm_takedown(struct drm_mm * mm)
664 {
665         struct drm_mm_node *entry, *next;
666
667         if (!list_empty(&mm->head_node.node_list)) {
668                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
669                 return;
670         }
671
672         mtx_lock(&mm->unused_lock);
673         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
674                 list_del(&entry->node_list);
675                 free(entry, DRM_MEM_MM);
676                 --mm->num_unused;
677         }
678         mtx_unlock(&mm->unused_lock);
679
680         BUG_ON(mm->num_unused != 0);
681 }
682 EXPORT_SYMBOL(drm_mm_takedown);
683
684 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
685 {
686         struct drm_mm_node *entry;
687         unsigned long total_used = 0, total_free = 0, total = 0;
688         unsigned long hole_start, hole_end, hole_size;
689
690         hole_start = drm_mm_hole_node_start(&mm->head_node);
691         hole_end = drm_mm_hole_node_end(&mm->head_node);
692         hole_size = hole_end - hole_start;
693         if (hole_size)
694                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
695                         prefix, hole_start, hole_end,
696                         hole_size);
697         total_free += hole_size;
698
699         drm_mm_for_each_node(entry, mm) {
700                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
701                         prefix, entry->start, entry->start + entry->size,
702                         entry->size);
703                 total_used += entry->size;
704
705                 if (entry->hole_follows) {
706                         hole_start = drm_mm_hole_node_start(entry);
707                         hole_end = drm_mm_hole_node_end(entry);
708                         hole_size = hole_end - hole_start;
709                         printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
710                                 prefix, hole_start, hole_end,
711                                 hole_size);
712                         total_free += hole_size;
713                 }
714         }
715         total = total_free + total_used;
716
717         printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
718                 total_used, total_free);
719 }
720 EXPORT_SYMBOL(drm_mm_debug_table);