1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
5 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 * Rickard E. (Rik) Faith <faith@valinux.com>
30 * Gareth Hughes <gareth@valinux.com>
35 #include "dev/drm/drmP.h"
38 * Compute order. Can be made faster.
40 int drm_order(unsigned long size)
45 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
47 if ( size & ~(1 << order) )
53 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
58 resource = resource * 4 + 0x10;
60 bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &resource,
61 RF_ACTIVE | RF_SHAREABLE);
63 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
67 offset = rman_get_start(bsr);
69 bus_release_resource(dev->device, SYS_RES_MEMORY, resource, bsr);
74 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
79 resource = resource * 4 + 0x10;
81 bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &resource,
82 RF_ACTIVE | RF_SHAREABLE);
84 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
88 len = rman_get_size(bsr);
90 bus_release_resource(dev->device, SYS_RES_MEMORY, resource, bsr);
95 int drm_initmap(drm_device_t *dev, unsigned long start, unsigned long len,
96 unsigned int resource, int type, int flags)
101 if (type != _DRM_REGISTERS && type != _DRM_FRAME_BUFFER)
106 map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
110 map->rid = resource * 4 + 0x10;
111 bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &map->rid,
112 RF_ACTIVE | RF_SHAREABLE);
114 DRM_ERROR("Couldn't allocate %s resource\n",
115 ((type == _DRM_REGISTERS) ? "mmio" : "framebuffer"));
120 map->kernel_owned = 1;
124 map->bst = rman_get_bustag(bsr);
125 map->bsh = rman_get_bushandle(bsr);
129 if (type == _DRM_REGISTERS)
130 map->handle = rman_get_virtual(bsr);
132 DRM_DEBUG("initmap %d,0x%x@0x%lx/0x%lx\n", map->type, map->flags,
133 map->offset, map->size);
135 if (map->flags & _DRM_WRITE_COMBINING) {
138 err = drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC);
144 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
150 int drm_addmap(DRM_IOCTL_ARGS)
154 drm_local_map_t *map;
157 if (!(dev->flags & (FREAD|FWRITE)))
158 return DRM_ERR(EACCES); /* Require read/write */
160 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
162 /* Only allow shared memory to be removable since we only keep enough
163 * book keeping information about shared memory to allow for removal
164 * when processes fork.
166 if ((request.flags & _DRM_REMOVABLE) && request.type != _DRM_SHM)
168 if ((request.offset & PAGE_MASK) || (request.size & PAGE_MASK))
170 if (request.offset + request.size < request.offset)
173 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
174 request.offset, request.size, request.type);
176 /* Check if this is just another version of a kernel-allocated map, and
177 * just hand that back if so.
179 if (request.type == _DRM_REGISTERS || request.type == _DRM_FRAME_BUFFER)
182 TAILQ_FOREACH(map, &dev->maplist, link) {
183 if (map->kernel_owned && map->type == request.type &&
184 map->offset == request.offset) {
185 /* XXX: this size setting is questionable. */
186 map->size = request.size;
187 DRM_DEBUG("Found kernel map %d\n", request.type);
194 /* Allocate a new map structure, fill it in, and do any type-specific
195 * initialization necessary.
197 map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
199 return DRM_ERR(ENOMEM);
201 map->offset = request.offset;
202 map->size = request.size;
203 map->type = request.type;
204 map->flags = request.flags;
206 switch ( map->type ) {
208 map->handle = drm_ioremap(dev, map);
209 if (!(map->flags & _DRM_WRITE_COMBINING))
212 case _DRM_FRAME_BUFFER:
213 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
217 map->handle = malloc(map->size, M_DRM, M_NOWAIT);
218 DRM_DEBUG( "%lu %d %p\n",
219 map->size, drm_order(map->size), map->handle );
220 if ( !map->handle ) {
222 return DRM_ERR(ENOMEM);
224 map->offset = (unsigned long)map->handle;
225 if ( map->flags & _DRM_CONTAINS_LOCK ) {
226 /* Prevent a 2nd X Server from creating a 2nd lock */
228 if (dev->lock.hw_lock != NULL) {
230 free(map->handle, M_DRM);
232 return DRM_ERR(EBUSY);
234 dev->lock.hw_lock = map->handle; /* Pointer to lock */
239 map->offset += dev->agp->base;
240 map->mtrr = dev->agp->mtrr; /* for getmap */
242 case _DRM_SCATTER_GATHER:
245 return DRM_ERR(EINVAL);
247 map->offset = map->offset + dev->sg->handle;
249 case _DRM_CONSISTENT:
250 map->handle = drm_pci_alloc(dev, map->size, map->size,
251 0xfffffffful, &bus_addr);
252 if (map->handle == NULL) {
256 map->offset = (unsigned long)bus_addr;
260 return DRM_ERR(EINVAL);
264 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
267 /* Jumped to, with lock held, when a kernel map is found. */
268 request.offset = map->offset;
269 request.size = map->size;
270 request.type = map->type;
271 request.flags = map->flags;
272 request.mtrr = map->mtrr;
273 request.handle = map->handle;
276 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", request.type, request.offset, request.size);
278 if ( request.type != _DRM_SHM ) {
279 request.handle = (void *)request.offset;
282 DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
287 void drm_remove_map(drm_device_t *dev, drm_local_map_t *map)
289 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
291 TAILQ_REMOVE(&dev->maplist, map, link);
295 if (map->bsr == NULL)
296 drm_ioremapfree(map);
298 case _DRM_FRAME_BUFFER:
300 int __unused retcode;
302 retcode = drm_mtrr_del(map->offset, map->size,
304 DRM_DEBUG("mtrr_del = %d\n", retcode);
308 free(map->handle, M_DRM);
311 case _DRM_SCATTER_GATHER:
313 case _DRM_CONSISTENT:
314 drm_pci_free(dev, map->size, map->handle, map->offset);
318 if (map->bsr != NULL) {
319 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
326 /* Remove a map private from list and deallocate resources if the mapping
330 int drm_rmmap(DRM_IOCTL_ARGS)
333 drm_local_map_t *map;
336 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
339 TAILQ_FOREACH(map, &dev->maplist, link) {
340 if (map->handle == request.handle &&
341 map->flags & _DRM_REMOVABLE)
345 /* No match found. */
348 return DRM_ERR(EINVAL);
351 drm_remove_map(dev, map);
359 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
363 if (entry->seg_count) {
364 for (i = 0; i < entry->seg_count; i++) {
365 drm_pci_free(dev, entry->buf_size,
366 (void *)entry->seglist[i],
367 entry->seglist_bus[i]);
369 free(entry->seglist, M_DRM);
370 free(entry->seglist_bus, M_DRM);
372 entry->seg_count = 0;
375 if (entry->buf_count) {
376 for (i = 0; i < entry->buf_count; i++) {
377 free(entry->buflist[i].dev_private, M_DRM);
379 free(entry->buflist, M_DRM);
381 entry->buf_count = 0;
385 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
387 drm_device_dma_t *dma = dev->dma;
388 drm_buf_entry_t *entry;
390 unsigned long offset;
391 unsigned long agp_offset;
400 drm_buf_t **temp_buflist;
402 count = request->count;
403 order = drm_order(request->size);
406 alignment = (request->flags & _DRM_PAGE_ALIGN)
407 ? round_page(size) : size;
408 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
409 total = PAGE_SIZE << page_order;
412 agp_offset = dev->agp->base + request->agp_start;
414 DRM_DEBUG( "count: %d\n", count );
415 DRM_DEBUG( "order: %d\n", order );
416 DRM_DEBUG( "size: %d\n", size );
417 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
418 DRM_DEBUG( "alignment: %d\n", alignment );
419 DRM_DEBUG( "page_order: %d\n", page_order );
420 DRM_DEBUG( "total: %d\n", total );
422 entry = &dma->bufs[order];
424 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
426 if ( !entry->buflist ) {
427 return DRM_ERR(ENOMEM);
430 entry->buf_size = size;
431 entry->page_order = page_order;
435 while ( entry->buf_count < count ) {
436 buf = &entry->buflist[entry->buf_count];
437 buf->idx = dma->buf_count + entry->buf_count;
438 buf->total = alignment;
442 buf->offset = (dma->byte_count + offset);
443 buf->bus_address = agp_offset + offset;
444 buf->address = (void *)(agp_offset + offset);
449 buf->dev_priv_size = dev->dev_priv_size;
450 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
452 if (buf->dev_private == NULL) {
453 /* Set count correctly so we free the proper amount. */
454 entry->buf_count = count;
455 drm_cleanup_buf_error(dev, entry);
456 return DRM_ERR(ENOMEM);
461 byte_count += PAGE_SIZE << page_order;
464 DRM_DEBUG( "byte_count: %d\n", byte_count );
466 temp_buflist = realloc(dma->buflist,
467 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
469 if (temp_buflist == NULL) {
470 /* Free the entry because it isn't valid */
471 drm_cleanup_buf_error(dev, entry);
472 return DRM_ERR(ENOMEM);
474 dma->buflist = temp_buflist;
476 for ( i = 0 ; i < entry->buf_count ; i++ ) {
477 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
480 dma->buf_count += entry->buf_count;
481 dma->byte_count += byte_count;
483 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
484 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
486 request->count = entry->buf_count;
487 request->size = size;
489 dma->flags = _DRM_DMA_USE_AGP;
494 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
496 drm_device_dma_t *dma = dev->dma;
502 drm_buf_entry_t *entry;
506 unsigned long offset;
510 unsigned long *temp_pagelist;
511 drm_buf_t **temp_buflist;
514 count = request->count;
515 order = drm_order(request->size);
518 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
519 request->count, request->size, size, order );
521 alignment = (request->flags & _DRM_PAGE_ALIGN)
522 ? round_page(size) : size;
523 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
524 total = PAGE_SIZE << page_order;
526 entry = &dma->bufs[order];
528 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
530 entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
532 entry->seglist_bus = malloc(count * sizeof(*entry->seglist_bus), M_DRM,
535 /* Keep the original pagelist until we know all the allocations
538 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
539 sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
541 if (entry->buflist == NULL || entry->seglist == NULL ||
542 entry->seglist_bus == NULL || temp_pagelist == NULL) {
543 free(entry->buflist, M_DRM);
544 free(entry->seglist, M_DRM);
545 free(entry->seglist_bus, M_DRM);
546 return DRM_ERR(ENOMEM);
549 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
550 sizeof(*dma->pagelist));
552 DRM_DEBUG( "pagelist: %d entries\n",
553 dma->page_count + (count << page_order) );
555 entry->buf_size = size;
556 entry->page_order = page_order;
560 while ( entry->buf_count < count ) {
561 vaddr = (vm_offset_t)drm_pci_alloc(dev, size, alignment,
562 0xfffffffful, &bus_addr);
564 /* Set count correctly so we free the proper amount. */
565 entry->buf_count = count;
566 entry->seg_count = count;
567 drm_cleanup_buf_error(dev, entry);
568 free(temp_pagelist, M_DRM);
569 return DRM_ERR(ENOMEM);
572 entry->seglist_bus[entry->seg_count] = bus_addr;
573 entry->seglist[entry->seg_count++] = vaddr;
574 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
575 DRM_DEBUG( "page %d @ 0x%08lx\n",
576 dma->page_count + page_count,
577 (long)vaddr + PAGE_SIZE * i );
578 temp_pagelist[dma->page_count + page_count++] =
579 vaddr + PAGE_SIZE * i;
582 offset + size <= total && entry->buf_count < count ;
583 offset += alignment, ++entry->buf_count ) {
584 buf = &entry->buflist[entry->buf_count];
585 buf->idx = dma->buf_count + entry->buf_count;
586 buf->total = alignment;
589 buf->offset = (dma->byte_count + byte_count + offset);
590 buf->address = (void *)(vaddr + offset);
591 buf->bus_address = bus_addr + offset;
596 buf->dev_priv_size = dev->dev_priv_size;
597 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
599 if (buf->dev_private == NULL) {
600 /* Set count correctly so we free the proper amount. */
601 entry->buf_count = count;
602 entry->seg_count = count;
603 drm_cleanup_buf_error(dev, entry);
604 free(temp_pagelist, M_DRM);
605 return DRM_ERR(ENOMEM);
608 DRM_DEBUG( "buffer %d @ %p\n",
609 entry->buf_count, buf->address );
611 byte_count += PAGE_SIZE << page_order;
614 temp_buflist = realloc(dma->buflist,
615 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
617 if (temp_buflist == NULL) {
618 /* Free the entry because it isn't valid */
619 drm_cleanup_buf_error(dev, entry);
620 free(temp_pagelist, M_DRM);
621 return DRM_ERR(ENOMEM);
623 dma->buflist = temp_buflist;
625 for ( i = 0 ; i < entry->buf_count ; i++ ) {
626 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
629 /* No allocations failed, so now we can replace the orginal pagelist
632 free(dma->pagelist, M_DRM);
633 dma->pagelist = temp_pagelist;
635 dma->buf_count += entry->buf_count;
636 dma->seg_count += entry->seg_count;
637 dma->page_count += entry->seg_count << page_order;
638 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
640 request->count = entry->buf_count;
641 request->size = size;
647 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
649 drm_device_dma_t *dma = dev->dma;
650 drm_buf_entry_t *entry;
652 unsigned long offset;
653 unsigned long agp_offset;
662 drm_buf_t **temp_buflist;
664 count = request->count;
665 order = drm_order(request->size);
668 alignment = (request->flags & _DRM_PAGE_ALIGN)
669 ? round_page(size) : size;
670 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
671 total = PAGE_SIZE << page_order;
674 agp_offset = request->agp_start;
676 DRM_DEBUG( "count: %d\n", count );
677 DRM_DEBUG( "order: %d\n", order );
678 DRM_DEBUG( "size: %d\n", size );
679 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
680 DRM_DEBUG( "alignment: %d\n", alignment );
681 DRM_DEBUG( "page_order: %d\n", page_order );
682 DRM_DEBUG( "total: %d\n", total );
684 entry = &dma->bufs[order];
686 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
688 if (entry->buflist == NULL)
689 return DRM_ERR(ENOMEM);
691 entry->buf_size = size;
692 entry->page_order = page_order;
696 while ( entry->buf_count < count ) {
697 buf = &entry->buflist[entry->buf_count];
698 buf->idx = dma->buf_count + entry->buf_count;
699 buf->total = alignment;
703 buf->offset = (dma->byte_count + offset);
704 buf->bus_address = agp_offset + offset;
705 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
710 buf->dev_priv_size = dev->dev_priv_size;
711 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
713 if (buf->dev_private == NULL) {
714 /* Set count correctly so we free the proper amount. */
715 entry->buf_count = count;
716 drm_cleanup_buf_error(dev, entry);
717 return DRM_ERR(ENOMEM);
720 DRM_DEBUG( "buffer %d @ %p\n",
721 entry->buf_count, buf->address );
725 byte_count += PAGE_SIZE << page_order;
728 DRM_DEBUG( "byte_count: %d\n", byte_count );
730 temp_buflist = realloc(dma->buflist,
731 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
733 if (temp_buflist == NULL) {
734 /* Free the entry because it isn't valid */
735 drm_cleanup_buf_error(dev, entry);
736 return DRM_ERR(ENOMEM);
738 dma->buflist = temp_buflist;
740 for ( i = 0 ; i < entry->buf_count ; i++ ) {
741 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
744 dma->buf_count += entry->buf_count;
745 dma->byte_count += byte_count;
747 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
748 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
750 request->count = entry->buf_count;
751 request->size = size;
753 dma->flags = _DRM_DMA_USE_SG;
758 int drm_addbufs(DRM_IOCTL_ARGS)
761 drm_buf_desc_t request;
765 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
767 if (request.count < 0 || request.count > 4096)
768 return DRM_ERR(EINVAL);
770 order = drm_order(request.size);
771 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
772 return DRM_ERR(EINVAL);
774 DRM_SPINLOCK(&dev->dma_lock);
775 /* No more allocations after first buffer-using ioctl. */
776 if (dev->buf_use != 0) {
777 DRM_SPINUNLOCK(&dev->dma_lock);
778 return DRM_ERR(EBUSY);
780 /* No more than one allocation per order */
781 if (dev->dma->bufs[order].buf_count != 0) {
782 DRM_SPINUNLOCK(&dev->dma_lock);
783 return DRM_ERR(ENOMEM);
786 if ( request.flags & _DRM_AGP_BUFFER )
787 err = drm_addbufs_agp(dev, &request);
789 if ( request.flags & _DRM_SG_BUFFER )
790 err = drm_addbufs_sg(dev, &request);
792 err = drm_addbufs_pci(dev, &request);
793 DRM_SPINUNLOCK(&dev->dma_lock);
795 DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
800 int drm_infobufs(DRM_IOCTL_ARGS)
803 drm_device_dma_t *dma = dev->dma;
804 drm_buf_info_t request;
809 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
811 DRM_SPINLOCK(&dev->dma_lock);
812 ++dev->buf_use; /* Can't allocate more after this call */
813 DRM_SPINUNLOCK(&dev->dma_lock);
815 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
816 if ( dma->bufs[i].buf_count ) ++count;
819 DRM_DEBUG( "count = %d\n", count );
821 if ( request.count >= count ) {
822 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
823 if ( dma->bufs[i].buf_count ) {
826 from.count = dma->bufs[i].buf_count;
827 from.size = dma->bufs[i].buf_size;
828 from.low_mark = dma->bufs[i].freelist.low_mark;
829 from.high_mark = dma->bufs[i].freelist.high_mark;
831 if (DRM_COPY_TO_USER(&request.list[count], &from,
832 sizeof(drm_buf_desc_t)) != 0) {
833 retcode = DRM_ERR(EFAULT);
837 DRM_DEBUG( "%d %d %d %d %d\n",
839 dma->bufs[i].buf_count,
840 dma->bufs[i].buf_size,
841 dma->bufs[i].freelist.low_mark,
842 dma->bufs[i].freelist.high_mark );
847 request.count = count;
849 DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
854 int drm_markbufs(DRM_IOCTL_ARGS)
857 drm_device_dma_t *dma = dev->dma;
858 drm_buf_desc_t request;
861 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
863 DRM_DEBUG( "%d, %d, %d\n",
864 request.size, request.low_mark, request.high_mark );
867 order = drm_order(request.size);
868 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
869 request.low_mark < 0 || request.high_mark < 0) {
870 return DRM_ERR(EINVAL);
873 DRM_SPINLOCK(&dev->dma_lock);
874 if (request.low_mark > dma->bufs[order].buf_count ||
875 request.high_mark > dma->bufs[order].buf_count) {
876 return DRM_ERR(EINVAL);
879 dma->bufs[order].freelist.low_mark = request.low_mark;
880 dma->bufs[order].freelist.high_mark = request.high_mark;
881 DRM_SPINUNLOCK(&dev->dma_lock);
886 int drm_freebufs(DRM_IOCTL_ARGS)
889 drm_device_dma_t *dma = dev->dma;
890 drm_buf_free_t request;
896 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
898 DRM_DEBUG( "%d\n", request.count );
900 DRM_SPINLOCK(&dev->dma_lock);
901 for ( i = 0 ; i < request.count ; i++ ) {
902 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
903 retcode = DRM_ERR(EFAULT);
906 if ( idx < 0 || idx >= dma->buf_count ) {
907 DRM_ERROR( "Index %d (of %d max)\n",
908 idx, dma->buf_count - 1 );
909 retcode = DRM_ERR(EINVAL);
912 buf = dma->buflist[idx];
913 if ( buf->filp != filp ) {
914 DRM_ERROR("Process %d freeing buffer not owned\n",
916 retcode = DRM_ERR(EINVAL);
919 drm_free_buffer(dev, buf);
921 DRM_SPINUNLOCK(&dev->dma_lock);
926 int drm_mapbufs(DRM_IOCTL_ARGS)
929 drm_device_dma_t *dma = dev->dma;
938 #elif defined(__NetBSD__) || defined(__OpenBSD__)
942 #endif /* __NetBSD__ || __OpenBSD__ */
944 drm_buf_map_t request;
947 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
949 #if defined(__NetBSD__) || defined(__OpenBSD__)
950 if (!vfinddev(kdev, VCHR, &vn))
951 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
952 #endif /* __NetBSD__ || __OpenBSD */
954 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
955 vms = p->td_proc->p_vmspace;
960 DRM_SPINLOCK(&dev->dma_lock);
961 dev->buf_use++; /* Can't allocate more after this call */
962 DRM_SPINUNLOCK(&dev->dma_lock);
964 if (request.count < dma->buf_count)
967 if ((dev->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
968 (dev->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
969 drm_local_map_t *map = dev->agp_buffer_map;
975 size = round_page(map->size);
978 size = round_page(dma->byte_count),
983 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
984 #if __FreeBSD_version >= 600023
985 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
986 VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff );
988 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
989 VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
991 #elif defined(__NetBSD__) || defined(__OpenBSD__)
992 vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
993 retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
994 UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
995 &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
996 #endif /* __NetBSD__ || __OpenBSD */
1000 request.virtual = (void *)vaddr;
1002 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1003 if (DRM_COPY_TO_USER(&request.list[i].idx,
1004 &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
1008 if (DRM_COPY_TO_USER(&request.list[i].total,
1009 &dma->buflist[i]->total, sizeof(request.list[0].total))) {
1013 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
1018 address = vaddr + dma->buflist[i]->offset; /* *** */
1019 if (DRM_COPY_TO_USER(&request.list[i].address, &address,
1027 request.count = dma->buf_count;
1029 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1031 DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
1033 return DRM_ERR(retcode);