2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
38 #include "dev/pci/pcireg.h"
40 #include "dev/drm/drmP.h"
42 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
43 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
44 * address for accessing them. Cleaned up at unload.
46 static int drm_alloc_resource(struct drm_device *dev, int resource)
51 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
53 if (resource >= DRM_MAX_PCI_RESOURCE) {
54 DRM_ERROR("Resource %d too large\n", resource);
58 if (dev->pcir[resource] != NULL) {
63 rid = PCIR_BAR(resource);
64 res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
68 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
72 if (dev->pcir[resource] == NULL) {
73 dev->pcirid[resource] = rid;
74 dev->pcir[resource] = res;
80 unsigned long drm_get_resource_start(struct drm_device *dev,
81 unsigned int resource)
83 if (drm_alloc_resource(dev, resource) != 0)
86 return rman_get_start(dev->pcir[resource]);
89 unsigned long drm_get_resource_len(struct drm_device *dev,
90 unsigned int resource)
92 if (drm_alloc_resource(dev, resource) != 0)
95 return rman_get_size(dev->pcir[resource]);
98 int drm_addmap(struct drm_device * dev, unsigned long offset,
100 enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
102 drm_local_map_t *map;
104 /*drm_agp_mem_t *entry;
107 /* Only allow shared memory to be removable since we only keep enough
108 * book keeping information about shared memory to allow for removal
109 * when processes fork.
111 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
112 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
115 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
116 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
120 if (offset + size < offset) {
121 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
126 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
129 /* Check if this is just another version of a kernel-allocated map, and
130 * just hand that back if so.
132 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
134 TAILQ_FOREACH(map, &dev->maplist, link) {
135 if (map->type == type && (map->offset == offset ||
136 (map->type == _DRM_SHM &&
137 map->flags == _DRM_CONTAINS_LOCK))) {
139 DRM_DEBUG("Found kernel map %d\n", type);
146 /* Allocate a new map structure, fill it in, and do any type-specific
147 * initialization necessary.
149 map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
155 map->offset = offset;
162 map->handle = drm_ioremap(dev, map);
163 if (!(map->flags & _DRM_WRITE_COMBINING))
166 case _DRM_FRAME_BUFFER:
167 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
171 map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
172 DRM_DEBUG("%lu %d %p\n",
173 map->size, drm_order(map->size), map->handle);
175 free(map, DRM_MEM_MAPS);
179 map->offset = (unsigned long)map->handle;
180 if (map->flags & _DRM_CONTAINS_LOCK) {
181 /* Prevent a 2nd X Server from creating a 2nd lock */
183 if (dev->lock.hw_lock != NULL) {
185 free(map->handle, DRM_MEM_MAPS);
186 free(map, DRM_MEM_MAPS);
189 dev->lock.hw_lock = map->handle; /* Pointer to lock */
195 /* In some cases (i810 driver), user space may have already
196 * added the AGP base itself, because dev->agp->base previously
197 * only got set during AGP enable. So, only add the base
198 * address if the map's offset isn't already within the
201 if (map->offset < dev->agp->base ||
202 map->offset > dev->agp->base +
203 dev->agp->info.ai_aperture_size - 1) {
204 map->offset += dev->agp->base;
206 map->mtrr = dev->agp->mtrr; /* for getmap */
207 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
208 if ((map->offset >= entry->bound) &&
209 (map->offset + map->size <=
210 entry->bound + entry->pages * PAGE_SIZE)) {
216 free(map, DRM_MEM_MAPS);
221 case _DRM_SCATTER_GATHER:
223 free(map, DRM_MEM_MAPS);
227 map->offset += dev->sg->handle;
229 case _DRM_CONSISTENT:
230 /* Unfortunately, we don't get any alignment specification from
231 * the caller, so we have to guess. drm_pci_alloc requires
232 * a power-of-two alignment, so try to align the bus address of
233 * the map to it size if possible, otherwise just assume
234 * PAGE_SIZE alignment.
237 if ((align & (align - 1)) != 0)
239 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
240 if (map->dmah == NULL) {
241 free(map, DRM_MEM_MAPS);
245 map->handle = map->dmah->vaddr;
246 map->offset = map->dmah->busaddr;
249 DRM_ERROR("Bad map type %d\n", map->type);
250 free(map, DRM_MEM_MAPS);
256 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
259 /* Jumped to, with lock held, when a kernel map is found. */
261 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
269 int drm_addmap_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file_priv)
272 struct drm_map *request = data;
273 drm_local_map_t *map;
276 if (!(dev->flags & (FREAD|FWRITE)))
277 return EACCES; /* Require read/write */
279 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
283 err = drm_addmap(dev, request->offset, request->size, request->type,
284 request->flags, &map);
289 request->offset = map->offset;
290 request->size = map->size;
291 request->type = map->type;
292 request->flags = map->flags;
293 request->mtrr = map->mtrr;
294 request->handle = map->handle;
296 if (request->type != _DRM_SHM) {
297 request->handle = (void *)request->offset;
303 void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
305 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
310 TAILQ_REMOVE(&dev->maplist, map, link);
314 if (map->bsr == NULL)
315 drm_ioremapfree(map);
317 case _DRM_FRAME_BUFFER:
319 int __unused retcode;
321 retcode = drm_mtrr_del(0, map->offset, map->size,
323 DRM_DEBUG("mtrr_del = %d\n", retcode);
327 free(map->handle, DRM_MEM_MAPS);
330 case _DRM_SCATTER_GATHER:
332 case _DRM_CONSISTENT:
333 drm_pci_free(dev, map->dmah);
336 DRM_ERROR("Bad map type %d\n", map->type);
340 if (map->bsr != NULL) {
341 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
345 free(map, DRM_MEM_MAPS);
348 /* Remove a map private from list and deallocate resources if the mapping
352 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
353 struct drm_file *file_priv)
355 drm_local_map_t *map;
356 struct drm_map *request = data;
359 TAILQ_FOREACH(map, &dev->maplist, link) {
360 if (map->handle == request->handle &&
361 map->flags & _DRM_REMOVABLE)
365 /* No match found. */
379 static void drm_cleanup_buf_error(struct drm_device *dev,
380 drm_buf_entry_t *entry)
384 if (entry->seg_count) {
385 for (i = 0; i < entry->seg_count; i++) {
386 drm_pci_free(dev, entry->seglist[i]);
388 free(entry->seglist, DRM_MEM_SEGS);
390 entry->seg_count = 0;
393 if (entry->buf_count) {
394 for (i = 0; i < entry->buf_count; i++) {
395 free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
397 free(entry->buflist, DRM_MEM_BUFS);
399 entry->buf_count = 0;
403 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
405 drm_device_dma_t *dma = dev->dma;
406 drm_buf_entry_t *entry;
407 /*drm_agp_mem_t *agp_entry;
410 unsigned long offset;
411 unsigned long agp_offset;
420 drm_buf_t **temp_buflist;
422 count = request->count;
423 order = drm_order(request->size);
426 alignment = (request->flags & _DRM_PAGE_ALIGN)
427 ? round_page(size) : size;
428 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
429 total = PAGE_SIZE << page_order;
432 agp_offset = dev->agp->base + request->agp_start;
434 DRM_DEBUG("count: %d\n", count);
435 DRM_DEBUG("order: %d\n", order);
436 DRM_DEBUG("size: %d\n", size);
437 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
438 DRM_DEBUG("alignment: %d\n", alignment);
439 DRM_DEBUG("page_order: %d\n", page_order);
440 DRM_DEBUG("total: %d\n", total);
442 /* Make sure buffers are located in AGP memory that we own */
443 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
444 * memory. Safe to ignore for now because these ioctls are still
448 for (agp_entry = dev->agp->memory; agp_entry;
449 agp_entry = agp_entry->next) {
450 if ((agp_offset >= agp_entry->bound) &&
451 (agp_offset + total * count <=
452 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
458 DRM_DEBUG("zone invalid\n");
462 entry = &dma->bufs[order];
464 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
466 if (!entry->buflist) {
470 entry->buf_size = size;
471 entry->page_order = page_order;
475 while (entry->buf_count < count) {
476 buf = &entry->buflist[entry->buf_count];
477 buf->idx = dma->buf_count + entry->buf_count;
478 buf->total = alignment;
482 buf->offset = (dma->byte_count + offset);
483 buf->bus_address = agp_offset + offset;
484 buf->address = (void *)(agp_offset + offset);
487 buf->file_priv = NULL;
489 buf->dev_priv_size = dev->driver->buf_priv_size;
490 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
492 if (buf->dev_private == NULL) {
493 /* Set count correctly so we free the proper amount. */
494 entry->buf_count = count;
495 drm_cleanup_buf_error(dev, entry);
501 byte_count += PAGE_SIZE << page_order;
504 DRM_DEBUG("byte_count: %d\n", byte_count);
506 temp_buflist = realloc(dma->buflist,
507 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
508 DRM_MEM_BUFS, M_NOWAIT);
509 if (temp_buflist == NULL) {
510 /* Free the entry because it isn't valid */
511 drm_cleanup_buf_error(dev, entry);
514 dma->buflist = temp_buflist;
516 for (i = 0; i < entry->buf_count; i++) {
517 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
520 dma->buf_count += entry->buf_count;
521 dma->byte_count += byte_count;
523 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
524 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
526 request->count = entry->buf_count;
527 request->size = size;
529 dma->flags = _DRM_DMA_USE_AGP;
534 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
536 drm_device_dma_t *dma = dev->dma;
542 drm_buf_entry_t *entry;
545 unsigned long offset;
549 unsigned long *temp_pagelist;
550 drm_buf_t **temp_buflist;
552 count = request->count;
553 order = drm_order(request->size);
556 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
557 request->count, request->size, size, order);
559 alignment = (request->flags & _DRM_PAGE_ALIGN)
560 ? round_page(size) : size;
561 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
562 total = PAGE_SIZE << page_order;
564 entry = &dma->bufs[order];
566 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
568 entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
571 /* Keep the original pagelist until we know all the allocations
574 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
575 sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
577 if (entry->buflist == NULL || entry->seglist == NULL ||
578 temp_pagelist == NULL) {
579 free(temp_pagelist, DRM_MEM_PAGES);
580 free(entry->seglist, DRM_MEM_SEGS);
581 free(entry->buflist, DRM_MEM_BUFS);
585 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
586 sizeof(*dma->pagelist));
588 DRM_DEBUG("pagelist: %d entries\n",
589 dma->page_count + (count << page_order));
591 entry->buf_size = size;
592 entry->page_order = page_order;
596 while (entry->buf_count < count) {
597 DRM_SPINUNLOCK(&dev->dma_lock);
598 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
600 DRM_SPINLOCK(&dev->dma_lock);
602 /* Set count correctly so we free the proper amount. */
603 entry->buf_count = count;
604 entry->seg_count = count;
605 drm_cleanup_buf_error(dev, entry);
606 free(temp_pagelist, DRM_MEM_PAGES);
610 entry->seglist[entry->seg_count++] = dmah;
611 for (i = 0; i < (1 << page_order); i++) {
612 DRM_DEBUG("page %d @ %p\n",
613 dma->page_count + page_count,
614 (char *)dmah->vaddr + PAGE_SIZE * i);
615 temp_pagelist[dma->page_count + page_count++] =
616 (long)dmah->vaddr + PAGE_SIZE * i;
619 offset + size <= total && entry->buf_count < count;
620 offset += alignment, ++entry->buf_count) {
621 buf = &entry->buflist[entry->buf_count];
622 buf->idx = dma->buf_count + entry->buf_count;
623 buf->total = alignment;
626 buf->offset = (dma->byte_count + byte_count + offset);
627 buf->address = ((char *)dmah->vaddr + offset);
628 buf->bus_address = dmah->busaddr + offset;
631 buf->file_priv = NULL;
633 buf->dev_priv_size = dev->driver->buf_priv_size;
634 buf->dev_private = malloc(buf->dev_priv_size,
635 DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
636 if (buf->dev_private == NULL) {
637 /* Set count correctly so we free the proper amount. */
638 entry->buf_count = count;
639 entry->seg_count = count;
640 drm_cleanup_buf_error(dev, entry);
641 free(temp_pagelist, DRM_MEM_PAGES);
645 DRM_DEBUG("buffer %d @ %p\n",
646 entry->buf_count, buf->address);
648 byte_count += PAGE_SIZE << page_order;
651 temp_buflist = realloc(dma->buflist,
652 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
653 DRM_MEM_BUFS, M_NOWAIT);
654 if (temp_buflist == NULL) {
655 /* Free the entry because it isn't valid */
656 drm_cleanup_buf_error(dev, entry);
657 free(temp_pagelist, DRM_MEM_PAGES);
660 dma->buflist = temp_buflist;
662 for (i = 0; i < entry->buf_count; i++) {
663 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
666 /* No allocations failed, so now we can replace the orginal pagelist
669 free(dma->pagelist, DRM_MEM_PAGES);
670 dma->pagelist = temp_pagelist;
672 dma->buf_count += entry->buf_count;
673 dma->seg_count += entry->seg_count;
674 dma->page_count += entry->seg_count << page_order;
675 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
677 request->count = entry->buf_count;
678 request->size = size;
684 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
686 drm_device_dma_t *dma = dev->dma;
687 drm_buf_entry_t *entry;
689 unsigned long offset;
690 unsigned long agp_offset;
699 drm_buf_t **temp_buflist;
701 count = request->count;
702 order = drm_order(request->size);
705 alignment = (request->flags & _DRM_PAGE_ALIGN)
706 ? round_page(size) : size;
707 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
708 total = PAGE_SIZE << page_order;
711 agp_offset = request->agp_start;
713 DRM_DEBUG("count: %d\n", count);
714 DRM_DEBUG("order: %d\n", order);
715 DRM_DEBUG("size: %d\n", size);
716 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
717 DRM_DEBUG("alignment: %d\n", alignment);
718 DRM_DEBUG("page_order: %d\n", page_order);
719 DRM_DEBUG("total: %d\n", total);
721 entry = &dma->bufs[order];
723 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
725 if (entry->buflist == NULL)
728 entry->buf_size = size;
729 entry->page_order = page_order;
733 while (entry->buf_count < count) {
734 buf = &entry->buflist[entry->buf_count];
735 buf->idx = dma->buf_count + entry->buf_count;
736 buf->total = alignment;
740 buf->offset = (dma->byte_count + offset);
741 buf->bus_address = agp_offset + offset;
742 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
745 buf->file_priv = NULL;
747 buf->dev_priv_size = dev->driver->buf_priv_size;
748 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
750 if (buf->dev_private == NULL) {
751 /* Set count correctly so we free the proper amount. */
752 entry->buf_count = count;
753 drm_cleanup_buf_error(dev, entry);
757 DRM_DEBUG("buffer %d @ %p\n",
758 entry->buf_count, buf->address);
762 byte_count += PAGE_SIZE << page_order;
765 DRM_DEBUG("byte_count: %d\n", byte_count);
767 temp_buflist = realloc(dma->buflist,
768 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
769 DRM_MEM_BUFS, M_NOWAIT);
770 if (temp_buflist == NULL) {
771 /* Free the entry because it isn't valid */
772 drm_cleanup_buf_error(dev, entry);
775 dma->buflist = temp_buflist;
777 for (i = 0; i < entry->buf_count; i++) {
778 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
781 dma->buf_count += entry->buf_count;
782 dma->byte_count += byte_count;
784 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
785 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
787 request->count = entry->buf_count;
788 request->size = size;
790 dma->flags = _DRM_DMA_USE_SG;
795 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
799 if (request->count < 0 || request->count > 4096)
802 order = drm_order(request->size);
803 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
806 DRM_SPINLOCK(&dev->dma_lock);
808 /* No more allocations after first buffer-using ioctl. */
809 if (dev->buf_use != 0) {
810 DRM_SPINUNLOCK(&dev->dma_lock);
813 /* No more than one allocation per order */
814 if (dev->dma->bufs[order].buf_count != 0) {
815 DRM_SPINUNLOCK(&dev->dma_lock);
819 ret = drm_do_addbufs_agp(dev, request);
821 DRM_SPINUNLOCK(&dev->dma_lock);
826 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
830 if (!DRM_SUSER(DRM_CURPROC))
833 if (request->count < 0 || request->count > 4096)
836 order = drm_order(request->size);
837 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
840 DRM_SPINLOCK(&dev->dma_lock);
842 /* No more allocations after first buffer-using ioctl. */
843 if (dev->buf_use != 0) {
844 DRM_SPINUNLOCK(&dev->dma_lock);
847 /* No more than one allocation per order */
848 if (dev->dma->bufs[order].buf_count != 0) {
849 DRM_SPINUNLOCK(&dev->dma_lock);
853 ret = drm_do_addbufs_sg(dev, request);
855 DRM_SPINUNLOCK(&dev->dma_lock);
860 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
864 if (!DRM_SUSER(DRM_CURPROC))
867 if (request->count < 0 || request->count > 4096)
870 order = drm_order(request->size);
871 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
874 DRM_SPINLOCK(&dev->dma_lock);
876 /* No more allocations after first buffer-using ioctl. */
877 if (dev->buf_use != 0) {
878 DRM_SPINUNLOCK(&dev->dma_lock);
881 /* No more than one allocation per order */
882 if (dev->dma->bufs[order].buf_count != 0) {
883 DRM_SPINUNLOCK(&dev->dma_lock);
887 ret = drm_do_addbufs_pci(dev, request);
889 DRM_SPINUNLOCK(&dev->dma_lock);
894 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
896 struct drm_buf_desc *request = data;
899 if (request->flags & _DRM_AGP_BUFFER)
900 err = drm_addbufs_agp(dev, request);
901 else if (request->flags & _DRM_SG_BUFFER)
902 err = drm_addbufs_sg(dev, request);
904 err = drm_addbufs_pci(dev, request);
909 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
911 drm_device_dma_t *dma = dev->dma;
912 struct drm_buf_info *request = data;
917 DRM_SPINLOCK(&dev->dma_lock);
918 ++dev->buf_use; /* Can't allocate more after this call */
919 DRM_SPINUNLOCK(&dev->dma_lock);
921 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
922 if (dma->bufs[i].buf_count)
926 DRM_DEBUG("count = %d\n", count);
928 if (request->count >= count) {
929 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
930 if (dma->bufs[i].buf_count) {
931 struct drm_buf_desc from;
933 from.count = dma->bufs[i].buf_count;
934 from.size = dma->bufs[i].buf_size;
935 from.low_mark = dma->bufs[i].freelist.low_mark;
936 from.high_mark = dma->bufs[i].freelist.high_mark;
938 if (DRM_COPY_TO_USER(&request->list[count], &from,
939 sizeof(struct drm_buf_desc)) != 0) {
944 DRM_DEBUG("%d %d %d %d %d\n",
945 i, dma->bufs[i].buf_count,
946 dma->bufs[i].buf_size,
947 dma->bufs[i].freelist.low_mark,
948 dma->bufs[i].freelist.high_mark);
953 request->count = count;
958 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
960 drm_device_dma_t *dma = dev->dma;
961 struct drm_buf_desc *request = data;
964 DRM_DEBUG("%d, %d, %d\n",
965 request->size, request->low_mark, request->high_mark);
968 order = drm_order(request->size);
969 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
970 request->low_mark < 0 || request->high_mark < 0) {
974 DRM_SPINLOCK(&dev->dma_lock);
975 if (request->low_mark > dma->bufs[order].buf_count ||
976 request->high_mark > dma->bufs[order].buf_count) {
977 DRM_SPINUNLOCK(&dev->dma_lock);
981 dma->bufs[order].freelist.low_mark = request->low_mark;
982 dma->bufs[order].freelist.high_mark = request->high_mark;
983 DRM_SPINUNLOCK(&dev->dma_lock);
988 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
990 drm_device_dma_t *dma = dev->dma;
991 struct drm_buf_free *request = data;
997 DRM_DEBUG("%d\n", request->count);
999 DRM_SPINLOCK(&dev->dma_lock);
1000 for (i = 0; i < request->count; i++) {
1001 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1005 if (idx < 0 || idx >= dma->buf_count) {
1006 DRM_ERROR("Index %d (of %d max)\n",
1007 idx, dma->buf_count - 1);
1011 buf = dma->buflist[idx];
1012 if (buf->file_priv != file_priv) {
1013 DRM_ERROR("Process %d freeing buffer not owned\n",
1018 drm_free_buffer(dev, buf);
1020 DRM_SPINUNLOCK(&dev->dma_lock);
1025 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1027 drm_device_dma_t *dma = dev->dma;
1030 vm_offset_t address;
1031 struct vmspace *vms;
1035 struct drm_buf_map *request = data;
1038 vms = DRM_CURPROC->td_proc->p_vmspace;
1040 DRM_SPINLOCK(&dev->dma_lock);
1041 dev->buf_use++; /* Can't allocate more after this call */
1042 DRM_SPINUNLOCK(&dev->dma_lock);
1044 if (request->count < dma->buf_count)
1047 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1048 (drm_core_check_feature(dev, DRIVER_SG) &&
1049 (dma->flags & _DRM_DMA_USE_SG))) {
1050 drm_local_map_t *map = dev->agp_buffer_map;
1056 size = round_page(map->size);
1059 size = round_page(dma->byte_count),
1063 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1064 #if __FreeBSD_version >= 600023
1065 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1066 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1067 dev->devnode, foff);
1069 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1070 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1071 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1076 request->virtual = (void *)vaddr;
1078 for (i = 0; i < dma->buf_count; i++) {
1079 if (DRM_COPY_TO_USER(&request->list[i].idx,
1080 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1084 if (DRM_COPY_TO_USER(&request->list[i].total,
1085 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1089 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1094 address = vaddr + dma->buflist[i]->offset; /* *** */
1095 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1103 request->count = dma->buf_count;
1105 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1111 * Compute order. Can be made faster.
1113 int drm_order(unsigned long size)
1120 order = flsl(size) - 1;
1121 if (size & ~(1ul << order))