2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
38 #include "dev/pci/pcireg.h"
40 #include "dev/drm/drmP.h"
42 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
43 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
44 * address for accessing them. Cleaned up at unload.
46 static int drm_alloc_resource(struct drm_device *dev, int resource)
48 if (resource >= DRM_MAX_PCI_RESOURCE) {
49 DRM_ERROR("Resource %d too large\n", resource);
54 if (dev->pcir[resource] != NULL) {
59 dev->pcirid[resource] = PCIR_BAR(resource);
60 dev->pcir[resource] = bus_alloc_resource_any(dev->device,
61 SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
64 if (dev->pcir[resource] == NULL) {
65 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
72 unsigned long drm_get_resource_start(struct drm_device *dev,
73 unsigned int resource)
75 if (drm_alloc_resource(dev, resource) != 0)
78 return rman_get_start(dev->pcir[resource]);
81 unsigned long drm_get_resource_len(struct drm_device *dev,
82 unsigned int resource)
84 if (drm_alloc_resource(dev, resource) != 0)
87 return rman_get_size(dev->pcir[resource]);
90 int drm_addmap(struct drm_device * dev, unsigned long offset,
92 enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
96 /*drm_agp_mem_t *entry;
99 /* Only allow shared memory to be removable since we only keep enough
100 * book keeping information about shared memory to allow for removal
101 * when processes fork.
103 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
104 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
107 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
108 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
112 if (offset + size < offset) {
113 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
118 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
121 /* Check if this is just another version of a kernel-allocated map, and
122 * just hand that back if so.
124 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
126 TAILQ_FOREACH(map, &dev->maplist, link) {
127 if (map->type == type && (map->offset == offset ||
128 (map->type == _DRM_SHM &&
129 map->flags == _DRM_CONTAINS_LOCK))) {
131 DRM_DEBUG("Found kernel map %d\n", type);
138 /* Allocate a new map structure, fill it in, and do any type-specific
139 * initialization necessary.
141 map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
147 map->offset = offset;
154 map->handle = drm_ioremap(dev, map);
155 if (!(map->flags & _DRM_WRITE_COMBINING))
158 case _DRM_FRAME_BUFFER:
159 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
163 map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
164 DRM_DEBUG("%lu %d %p\n",
165 map->size, drm_order(map->size), map->handle);
167 free(map, DRM_MEM_MAPS);
171 map->offset = (unsigned long)map->handle;
172 if (map->flags & _DRM_CONTAINS_LOCK) {
173 /* Prevent a 2nd X Server from creating a 2nd lock */
175 if (dev->lock.hw_lock != NULL) {
177 free(map->handle, DRM_MEM_MAPS);
178 free(map, DRM_MEM_MAPS);
181 dev->lock.hw_lock = map->handle; /* Pointer to lock */
187 /* In some cases (i810 driver), user space may have already
188 * added the AGP base itself, because dev->agp->base previously
189 * only got set during AGP enable. So, only add the base
190 * address if the map's offset isn't already within the
193 if (map->offset < dev->agp->base ||
194 map->offset > dev->agp->base +
195 dev->agp->info.ai_aperture_size - 1) {
196 map->offset += dev->agp->base;
198 map->mtrr = dev->agp->mtrr; /* for getmap */
199 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
200 if ((map->offset >= entry->bound) &&
201 (map->offset + map->size <=
202 entry->bound + entry->pages * PAGE_SIZE)) {
208 free(map, DRM_MEM_MAPS);
213 case _DRM_SCATTER_GATHER:
215 free(map, DRM_MEM_MAPS);
219 map->offset += dev->sg->handle;
221 case _DRM_CONSISTENT:
222 /* Unfortunately, we don't get any alignment specification from
223 * the caller, so we have to guess. drm_pci_alloc requires
224 * a power-of-two alignment, so try to align the bus address of
225 * the map to it size if possible, otherwise just assume
226 * PAGE_SIZE alignment.
229 if ((align & (align - 1)) != 0)
231 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
232 if (map->dmah == NULL) {
233 free(map, DRM_MEM_MAPS);
237 map->handle = map->dmah->vaddr;
238 map->offset = map->dmah->busaddr;
241 DRM_ERROR("Bad map type %d\n", map->type);
242 free(map, DRM_MEM_MAPS);
248 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
251 /* Jumped to, with lock held, when a kernel map is found. */
253 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
261 int drm_addmap_ioctl(struct drm_device *dev, void *data,
262 struct drm_file *file_priv)
264 struct drm_map *request = data;
265 drm_local_map_t *map;
268 if (!(dev->flags & (FREAD|FWRITE)))
269 return EACCES; /* Require read/write */
271 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
275 err = drm_addmap(dev, request->offset, request->size, request->type,
276 request->flags, &map);
281 request->offset = map->offset;
282 request->size = map->size;
283 request->type = map->type;
284 request->flags = map->flags;
285 request->mtrr = map->mtrr;
286 request->handle = map->handle;
288 if (request->type != _DRM_SHM) {
289 request->handle = (void *)request->offset;
295 void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
297 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
302 TAILQ_REMOVE(&dev->maplist, map, link);
306 if (map->bsr == NULL)
307 drm_ioremapfree(map);
309 case _DRM_FRAME_BUFFER:
311 int __unused retcode;
313 retcode = drm_mtrr_del(0, map->offset, map->size,
315 DRM_DEBUG("mtrr_del = %d\n", retcode);
319 free(map->handle, DRM_MEM_MAPS);
322 case _DRM_SCATTER_GATHER:
324 case _DRM_CONSISTENT:
325 drm_pci_free(dev, map->dmah);
328 DRM_ERROR("Bad map type %d\n", map->type);
332 if (map->bsr != NULL) {
333 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
337 free(map, DRM_MEM_MAPS);
340 /* Remove a map private from list and deallocate resources if the mapping
344 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
345 struct drm_file *file_priv)
347 drm_local_map_t *map;
348 struct drm_map *request = data;
351 TAILQ_FOREACH(map, &dev->maplist, link) {
352 if (map->handle == request->handle &&
353 map->flags & _DRM_REMOVABLE)
357 /* No match found. */
371 static void drm_cleanup_buf_error(struct drm_device *dev,
372 drm_buf_entry_t *entry)
376 if (entry->seg_count) {
377 for (i = 0; i < entry->seg_count; i++) {
378 drm_pci_free(dev, entry->seglist[i]);
380 free(entry->seglist, DRM_MEM_SEGS);
382 entry->seg_count = 0;
385 if (entry->buf_count) {
386 for (i = 0; i < entry->buf_count; i++) {
387 free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
389 free(entry->buflist, DRM_MEM_BUFS);
391 entry->buf_count = 0;
395 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
397 drm_device_dma_t *dma = dev->dma;
398 drm_buf_entry_t *entry;
399 /*drm_agp_mem_t *agp_entry;
402 unsigned long offset;
403 unsigned long agp_offset;
412 drm_buf_t **temp_buflist;
414 count = request->count;
415 order = drm_order(request->size);
418 alignment = (request->flags & _DRM_PAGE_ALIGN)
419 ? round_page(size) : size;
420 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
421 total = PAGE_SIZE << page_order;
424 agp_offset = dev->agp->base + request->agp_start;
426 DRM_DEBUG("count: %d\n", count);
427 DRM_DEBUG("order: %d\n", order);
428 DRM_DEBUG("size: %d\n", size);
429 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
430 DRM_DEBUG("alignment: %d\n", alignment);
431 DRM_DEBUG("page_order: %d\n", page_order);
432 DRM_DEBUG("total: %d\n", total);
434 /* Make sure buffers are located in AGP memory that we own */
435 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
436 * memory. Safe to ignore for now because these ioctls are still
440 for (agp_entry = dev->agp->memory; agp_entry;
441 agp_entry = agp_entry->next) {
442 if ((agp_offset >= agp_entry->bound) &&
443 (agp_offset + total * count <=
444 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
450 DRM_DEBUG("zone invalid\n");
454 entry = &dma->bufs[order];
456 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
458 if (!entry->buflist) {
462 entry->buf_size = size;
463 entry->page_order = page_order;
467 while (entry->buf_count < count) {
468 buf = &entry->buflist[entry->buf_count];
469 buf->idx = dma->buf_count + entry->buf_count;
470 buf->total = alignment;
474 buf->offset = (dma->byte_count + offset);
475 buf->bus_address = agp_offset + offset;
476 buf->address = (void *)(agp_offset + offset);
479 buf->file_priv = NULL;
481 buf->dev_priv_size = dev->driver->buf_priv_size;
482 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
484 if (buf->dev_private == NULL) {
485 /* Set count correctly so we free the proper amount. */
486 entry->buf_count = count;
487 drm_cleanup_buf_error(dev, entry);
493 byte_count += PAGE_SIZE << page_order;
496 DRM_DEBUG("byte_count: %d\n", byte_count);
498 temp_buflist = realloc(dma->buflist,
499 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
500 DRM_MEM_BUFS, M_NOWAIT);
501 if (temp_buflist == NULL) {
502 /* Free the entry because it isn't valid */
503 drm_cleanup_buf_error(dev, entry);
506 dma->buflist = temp_buflist;
508 for (i = 0; i < entry->buf_count; i++) {
509 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
512 dma->buf_count += entry->buf_count;
513 dma->byte_count += byte_count;
515 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
516 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
518 request->count = entry->buf_count;
519 request->size = size;
521 dma->flags = _DRM_DMA_USE_AGP;
526 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
528 drm_device_dma_t *dma = dev->dma;
534 drm_buf_entry_t *entry;
537 unsigned long offset;
541 unsigned long *temp_pagelist;
542 drm_buf_t **temp_buflist;
544 count = request->count;
545 order = drm_order(request->size);
548 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
549 request->count, request->size, size, order);
551 alignment = (request->flags & _DRM_PAGE_ALIGN)
552 ? round_page(size) : size;
553 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
554 total = PAGE_SIZE << page_order;
556 entry = &dma->bufs[order];
558 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
560 entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
563 /* Keep the original pagelist until we know all the allocations
566 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
567 sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
569 if (entry->buflist == NULL || entry->seglist == NULL ||
570 temp_pagelist == NULL) {
571 free(temp_pagelist, DRM_MEM_PAGES);
572 free(entry->seglist, DRM_MEM_SEGS);
573 free(entry->buflist, DRM_MEM_BUFS);
577 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
578 sizeof(*dma->pagelist));
580 DRM_DEBUG("pagelist: %d entries\n",
581 dma->page_count + (count << page_order));
583 entry->buf_size = size;
584 entry->page_order = page_order;
588 while (entry->buf_count < count) {
589 DRM_SPINUNLOCK(&dev->dma_lock);
590 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
592 DRM_SPINLOCK(&dev->dma_lock);
594 /* Set count correctly so we free the proper amount. */
595 entry->buf_count = count;
596 entry->seg_count = count;
597 drm_cleanup_buf_error(dev, entry);
598 free(temp_pagelist, DRM_MEM_PAGES);
602 entry->seglist[entry->seg_count++] = dmah;
603 for (i = 0; i < (1 << page_order); i++) {
604 DRM_DEBUG("page %d @ %p\n",
605 dma->page_count + page_count,
606 (char *)dmah->vaddr + PAGE_SIZE * i);
607 temp_pagelist[dma->page_count + page_count++] =
608 (long)dmah->vaddr + PAGE_SIZE * i;
611 offset + size <= total && entry->buf_count < count;
612 offset += alignment, ++entry->buf_count) {
613 buf = &entry->buflist[entry->buf_count];
614 buf->idx = dma->buf_count + entry->buf_count;
615 buf->total = alignment;
618 buf->offset = (dma->byte_count + byte_count + offset);
619 buf->address = ((char *)dmah->vaddr + offset);
620 buf->bus_address = dmah->busaddr + offset;
623 buf->file_priv = NULL;
625 buf->dev_priv_size = dev->driver->buf_priv_size;
626 buf->dev_private = malloc(buf->dev_priv_size,
627 DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
628 if (buf->dev_private == NULL) {
629 /* Set count correctly so we free the proper amount. */
630 entry->buf_count = count;
631 entry->seg_count = count;
632 drm_cleanup_buf_error(dev, entry);
633 free(temp_pagelist, DRM_MEM_PAGES);
637 DRM_DEBUG("buffer %d @ %p\n",
638 entry->buf_count, buf->address);
640 byte_count += PAGE_SIZE << page_order;
643 temp_buflist = realloc(dma->buflist,
644 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
645 DRM_MEM_BUFS, M_NOWAIT);
646 if (temp_buflist == NULL) {
647 /* Free the entry because it isn't valid */
648 drm_cleanup_buf_error(dev, entry);
649 free(temp_pagelist, DRM_MEM_PAGES);
652 dma->buflist = temp_buflist;
654 for (i = 0; i < entry->buf_count; i++) {
655 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
658 /* No allocations failed, so now we can replace the orginal pagelist
661 free(dma->pagelist, DRM_MEM_PAGES);
662 dma->pagelist = temp_pagelist;
664 dma->buf_count += entry->buf_count;
665 dma->seg_count += entry->seg_count;
666 dma->page_count += entry->seg_count << page_order;
667 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
669 request->count = entry->buf_count;
670 request->size = size;
676 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
678 drm_device_dma_t *dma = dev->dma;
679 drm_buf_entry_t *entry;
681 unsigned long offset;
682 unsigned long agp_offset;
691 drm_buf_t **temp_buflist;
693 count = request->count;
694 order = drm_order(request->size);
697 alignment = (request->flags & _DRM_PAGE_ALIGN)
698 ? round_page(size) : size;
699 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
700 total = PAGE_SIZE << page_order;
703 agp_offset = request->agp_start;
705 DRM_DEBUG("count: %d\n", count);
706 DRM_DEBUG("order: %d\n", order);
707 DRM_DEBUG("size: %d\n", size);
708 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
709 DRM_DEBUG("alignment: %d\n", alignment);
710 DRM_DEBUG("page_order: %d\n", page_order);
711 DRM_DEBUG("total: %d\n", total);
713 entry = &dma->bufs[order];
715 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
717 if (entry->buflist == NULL)
720 entry->buf_size = size;
721 entry->page_order = page_order;
725 while (entry->buf_count < count) {
726 buf = &entry->buflist[entry->buf_count];
727 buf->idx = dma->buf_count + entry->buf_count;
728 buf->total = alignment;
732 buf->offset = (dma->byte_count + offset);
733 buf->bus_address = agp_offset + offset;
734 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
737 buf->file_priv = NULL;
739 buf->dev_priv_size = dev->driver->buf_priv_size;
740 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
742 if (buf->dev_private == NULL) {
743 /* Set count correctly so we free the proper amount. */
744 entry->buf_count = count;
745 drm_cleanup_buf_error(dev, entry);
749 DRM_DEBUG("buffer %d @ %p\n",
750 entry->buf_count, buf->address);
754 byte_count += PAGE_SIZE << page_order;
757 DRM_DEBUG("byte_count: %d\n", byte_count);
759 temp_buflist = realloc(dma->buflist,
760 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
761 DRM_MEM_BUFS, M_NOWAIT);
762 if (temp_buflist == NULL) {
763 /* Free the entry because it isn't valid */
764 drm_cleanup_buf_error(dev, entry);
767 dma->buflist = temp_buflist;
769 for (i = 0; i < entry->buf_count; i++) {
770 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
773 dma->buf_count += entry->buf_count;
774 dma->byte_count += byte_count;
776 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
777 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
779 request->count = entry->buf_count;
780 request->size = size;
782 dma->flags = _DRM_DMA_USE_SG;
787 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
791 if (request->count < 0 || request->count > 4096)
794 order = drm_order(request->size);
795 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
798 DRM_SPINLOCK(&dev->dma_lock);
800 /* No more allocations after first buffer-using ioctl. */
801 if (dev->buf_use != 0) {
802 DRM_SPINUNLOCK(&dev->dma_lock);
805 /* No more than one allocation per order */
806 if (dev->dma->bufs[order].buf_count != 0) {
807 DRM_SPINUNLOCK(&dev->dma_lock);
811 ret = drm_do_addbufs_agp(dev, request);
813 DRM_SPINUNLOCK(&dev->dma_lock);
818 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
822 if (!DRM_SUSER(DRM_CURPROC))
825 if (request->count < 0 || request->count > 4096)
828 order = drm_order(request->size);
829 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
832 DRM_SPINLOCK(&dev->dma_lock);
834 /* No more allocations after first buffer-using ioctl. */
835 if (dev->buf_use != 0) {
836 DRM_SPINUNLOCK(&dev->dma_lock);
839 /* No more than one allocation per order */
840 if (dev->dma->bufs[order].buf_count != 0) {
841 DRM_SPINUNLOCK(&dev->dma_lock);
845 ret = drm_do_addbufs_sg(dev, request);
847 DRM_SPINUNLOCK(&dev->dma_lock);
852 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
856 if (!DRM_SUSER(DRM_CURPROC))
859 if (request->count < 0 || request->count > 4096)
862 order = drm_order(request->size);
863 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
866 DRM_SPINLOCK(&dev->dma_lock);
868 /* No more allocations after first buffer-using ioctl. */
869 if (dev->buf_use != 0) {
870 DRM_SPINUNLOCK(&dev->dma_lock);
873 /* No more than one allocation per order */
874 if (dev->dma->bufs[order].buf_count != 0) {
875 DRM_SPINUNLOCK(&dev->dma_lock);
879 ret = drm_do_addbufs_pci(dev, request);
881 DRM_SPINUNLOCK(&dev->dma_lock);
886 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
888 struct drm_buf_desc *request = data;
891 if (request->flags & _DRM_AGP_BUFFER)
892 err = drm_addbufs_agp(dev, request);
893 else if (request->flags & _DRM_SG_BUFFER)
894 err = drm_addbufs_sg(dev, request);
896 err = drm_addbufs_pci(dev, request);
901 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
903 drm_device_dma_t *dma = dev->dma;
904 struct drm_buf_info *request = data;
909 DRM_SPINLOCK(&dev->dma_lock);
910 ++dev->buf_use; /* Can't allocate more after this call */
911 DRM_SPINUNLOCK(&dev->dma_lock);
913 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
914 if (dma->bufs[i].buf_count)
918 DRM_DEBUG("count = %d\n", count);
920 if (request->count >= count) {
921 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
922 if (dma->bufs[i].buf_count) {
923 struct drm_buf_desc from;
925 from.count = dma->bufs[i].buf_count;
926 from.size = dma->bufs[i].buf_size;
927 from.low_mark = dma->bufs[i].freelist.low_mark;
928 from.high_mark = dma->bufs[i].freelist.high_mark;
930 if (DRM_COPY_TO_USER(&request->list[count], &from,
931 sizeof(struct drm_buf_desc)) != 0) {
936 DRM_DEBUG("%d %d %d %d %d\n",
937 i, dma->bufs[i].buf_count,
938 dma->bufs[i].buf_size,
939 dma->bufs[i].freelist.low_mark,
940 dma->bufs[i].freelist.high_mark);
945 request->count = count;
950 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
952 drm_device_dma_t *dma = dev->dma;
953 struct drm_buf_desc *request = data;
956 DRM_DEBUG("%d, %d, %d\n",
957 request->size, request->low_mark, request->high_mark);
960 order = drm_order(request->size);
961 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
962 request->low_mark < 0 || request->high_mark < 0) {
966 DRM_SPINLOCK(&dev->dma_lock);
967 if (request->low_mark > dma->bufs[order].buf_count ||
968 request->high_mark > dma->bufs[order].buf_count) {
969 DRM_SPINUNLOCK(&dev->dma_lock);
973 dma->bufs[order].freelist.low_mark = request->low_mark;
974 dma->bufs[order].freelist.high_mark = request->high_mark;
975 DRM_SPINUNLOCK(&dev->dma_lock);
980 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
982 drm_device_dma_t *dma = dev->dma;
983 struct drm_buf_free *request = data;
989 DRM_DEBUG("%d\n", request->count);
991 DRM_SPINLOCK(&dev->dma_lock);
992 for (i = 0; i < request->count; i++) {
993 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
997 if (idx < 0 || idx >= dma->buf_count) {
998 DRM_ERROR("Index %d (of %d max)\n",
999 idx, dma->buf_count - 1);
1003 buf = dma->buflist[idx];
1004 if (buf->file_priv != file_priv) {
1005 DRM_ERROR("Process %d freeing buffer not owned\n",
1010 drm_free_buffer(dev, buf);
1012 DRM_SPINUNLOCK(&dev->dma_lock);
1017 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1019 drm_device_dma_t *dma = dev->dma;
1022 vm_offset_t address;
1023 struct vmspace *vms;
1027 struct drm_buf_map *request = data;
1030 vms = DRM_CURPROC->td_proc->p_vmspace;
1032 DRM_SPINLOCK(&dev->dma_lock);
1033 dev->buf_use++; /* Can't allocate more after this call */
1034 DRM_SPINUNLOCK(&dev->dma_lock);
1036 if (request->count < dma->buf_count)
1039 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1040 (drm_core_check_feature(dev, DRIVER_SG) &&
1041 (dma->flags & _DRM_DMA_USE_SG))) {
1042 drm_local_map_t *map = dev->agp_buffer_map;
1048 size = round_page(map->size);
1051 size = round_page(dma->byte_count),
1055 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1056 #if __FreeBSD_version >= 600023
1057 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1058 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1059 dev->devnode, foff);
1061 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1062 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1063 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1068 request->virtual = (void *)vaddr;
1070 for (i = 0; i < dma->buf_count; i++) {
1071 if (DRM_COPY_TO_USER(&request->list[i].idx,
1072 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1076 if (DRM_COPY_TO_USER(&request->list[i].total,
1077 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1081 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1086 address = vaddr + dma->buflist[i]->offset; /* *** */
1087 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1095 request->count = dma->buf_count;
1097 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1103 * Compute order. Can be made faster.
1105 int drm_order(unsigned long size)
1112 order = flsl(size) - 1;
1113 if (size & ~(1ul << order))