1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
34 #define __NO_VERSION__
36 #include <linux/vmalloc.h>
37 #endif /* __linux__ */
39 #include <machine/param.h>
43 #include <vm/vm_extern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_param.h>
46 #endif /* __FreeBSD__ */
47 #include "dev/drm/drmP.h"
49 #ifndef __HAVE_PCI_DMA
50 #define __HAVE_PCI_DMA 0
57 #ifndef DRIVER_BUF_PRIV_T
58 #define DRIVER_BUF_PRIV_T u32
60 #ifndef DRIVER_AGP_BUFFERS_MAP
61 #if __HAVE_AGP && __HAVE_DMA
62 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
64 #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
69 int DRM(addbufs_agp)( DRM_OS_IOCTL );
72 int DRM(addbufs_pci)( DRM_OS_IOCTL );
75 int DRM(addbufs_sg)( DRM_OS_IOCTL );
79 * Compute order. Can be made faster.
81 int DRM(order)( unsigned long size )
86 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
88 if ( size & ~(1 << order) )
94 int DRM(addmap)( DRM_OS_IOCTL )
100 #endif /* __linux__ */
102 drm_map_list_entry_t *list;
103 #endif /* __FreeBSD__ */
106 if ( !(filp->f_mode & 3) )
107 #endif /* __linux__ */
109 if (!(dev->flags & (FREAD|FWRITE)))
110 #endif /* __FreeBSD__ */
111 return DRM_OS_ERR(EACCES); /* Require read/write */
113 map = (drm_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
115 return DRM_OS_ERR(ENOMEM);
118 if ( copy_from_user( map, (drm_map_t *)data, sizeof(*map) ) ) {
119 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
120 return DRM_OS_ERR(EFAULT);
122 #endif /* __linux__ */
124 *map = *(drm_map_t *)data;
125 #endif /* __FreeBSD__ */
127 /* Only allow shared memory to be removable since we only keep enough
128 * book keeping information about shared memory to allow for removal
129 * when processes fork.
131 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
132 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
133 return DRM_OS_ERR(EINVAL);
135 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
136 map->offset, map->size, map->type );
138 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
139 #endif /* __linux__ */
141 if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
142 #endif /* __FreeBSD__ */
143 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
144 return DRM_OS_ERR(EINVAL);
149 switch ( map->type ) {
151 case _DRM_FRAME_BUFFER:
152 #if !defined(__sparc__) && !defined(__alpha__)
153 if ( map->offset + map->size < map->offset
155 || map->offset < virt_to_phys(high_memory)
156 #endif /* __linux__ */
158 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
159 return DRM_OS_ERR(EINVAL);
163 map->offset += dev->hose->mem_space->start;
165 #if __REALLY_HAVE_MTRR
166 if ( map->type == _DRM_FRAME_BUFFER ||
167 (map->flags & _DRM_WRITE_COMBINING) ) {
168 map->mtrr = mtrr_add( map->offset, map->size,
169 MTRR_TYPE_WRCOMB, 1 );
172 map->handle = DRM(ioremap)( map->offset, map->size );
177 map->handle = vmalloc_32(map->size);
178 #endif /* __linux__ */
180 map->handle = (void *)DRM(alloc_pages)
181 (DRM(order)(map->size) - PAGE_SHIFT, DRM_MEM_SAREA);
182 #endif /* __FreeBSD__ */
183 DRM_DEBUG( "%ld %d %p\n",
184 map->size, DRM(order)( map->size ), map->handle );
185 if ( !map->handle ) {
186 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
187 return DRM_OS_ERR(ENOMEM);
189 map->offset = (unsigned long)map->handle;
190 if ( map->flags & _DRM_CONTAINS_LOCK ) {
191 dev->lock.hw_lock = map->handle; /* Pointer to lock */
194 #if __REALLY_HAVE_AGP
197 map->offset += dev->hose->mem_space->start;
199 map->offset += dev->agp->base;
200 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
203 case _DRM_SCATTER_GATHER:
205 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
206 return DRM_OS_ERR(EINVAL);
208 map->offset = map->offset + dev->sg->handle;
212 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
213 return DRM_OS_ERR(EINVAL);
216 list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
218 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
219 return DRM_OS_ERR(EINVAL);
221 memset(list, 0, sizeof(*list));
226 list_add(&list->head, &dev->maplist->head);
227 #endif /* __linux__ */
229 TAILQ_INSERT_TAIL(dev->maplist, list, link);
230 #endif /* __FreeBSD__ */
234 if ( copy_to_user( (drm_map_t *)data, map, sizeof(*map) ) )
235 return DRM_OS_ERR(EFAULT);
236 #endif /* __linux__ */
238 *(drm_map_t *)data = *map;
239 #endif /* __FreeBSD__ */
241 if ( map->type != _DRM_SHM ) {
243 if ( copy_to_user( &((drm_map_t *)data)->handle,
245 sizeof(map->offset) ) )
246 return DRM_OS_ERR(EFAULT);
247 #endif /* __linux__ */
249 ((drm_map_t *)data)->handle = (void *)map->offset;
250 #endif /* __FreeBSD__ */
256 /* Remove a map private from list and deallocate resources if the mapping
260 int DRM(rmmap)( DRM_OS_IOCTL )
264 struct list_head *list;
265 drm_map_list_t *r_list = NULL;
266 drm_vma_entry_t *pt, *prev;
267 #endif /* __linux__ */
269 drm_map_list_entry_t *list;
270 #endif /* __FreeBSD__ */
275 DRM_OS_KRNFROMUSR( request, (drm_map_t *)data, sizeof(request) );
279 list = &dev->maplist->head;
280 list_for_each(list, &dev->maplist->head) {
281 r_list = (drm_map_list_t *) list;
284 r_list->map->handle == request.handle &&
285 r_list->map->flags & _DRM_REMOVABLE) break;
288 /* List has wrapped around to the head pointer, or its empty we didn't
291 if(list == (&dev->maplist->head)) {
293 return DRM_OS_ERR(EINVAL);
297 #endif /* __linux__ */
299 TAILQ_FOREACH(list, dev->maplist, link) {
301 if(map->handle == request.handle &&
302 map->flags & _DRM_REMOVABLE) break;
305 /* List has wrapped around to the head pointer, or its empty we didn't
310 return DRM_OS_ERR(EINVAL);
312 TAILQ_REMOVE(dev->maplist, list, link);
313 #endif /* __FreeBSD__ */
314 DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
317 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
318 if (pt->vma->vm_private_data == map) found_maps++;
320 #endif /* __linux__ */
325 case _DRM_FRAME_BUFFER:
326 #if __REALLY_HAVE_MTRR
327 if (map->mtrr >= 0) {
329 retcode = mtrr_del(map->mtrr,
332 DRM_DEBUG("mtrr_del = %d\n", retcode);
335 DRM(ioremapfree)(map->handle, map->size);
340 #endif /* __linux__ */
342 DRM(free_pages)( (unsigned long)map->handle, DRM(order)(map->size), DRM_MEM_SAREA );
343 #endif /* __FreeBSD__ */
346 case _DRM_SCATTER_GATHER:
349 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
358 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
362 if (entry->seg_count) {
363 for (i = 0; i < entry->seg_count; i++) {
364 DRM(free_pages)(entry->seglist[i],
368 DRM(free)(entry->seglist,
370 sizeof(*entry->seglist),
373 entry->seg_count = 0;
376 if(entry->buf_count) {
377 for(i = 0; i < entry->buf_count; i++) {
378 if(entry->buflist[i].dev_private) {
379 DRM(free)(entry->buflist[i].dev_private,
380 entry->buflist[i].dev_priv_size,
384 DRM(free)(entry->buflist,
386 sizeof(*entry->buflist),
389 #if __HAVE_DMA_FREELIST
390 DRM(freelist_destroy)(&entry->freelist);
393 entry->buf_count = 0;
397 #if __REALLY_HAVE_AGP
398 int DRM(addbufs_agp)( DRM_OS_IOCTL )
401 drm_device_dma_t *dma = dev->dma;
402 drm_buf_desc_t request;
403 drm_buf_entry_t *entry;
405 unsigned long offset;
406 unsigned long agp_offset;
415 drm_buf_t **temp_buflist;
417 if ( !dma ) return DRM_OS_ERR(EINVAL);
419 DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
421 count = request.count;
422 order = DRM(order)( request.size );
425 alignment = (request.flags & _DRM_PAGE_ALIGN)
427 ? PAGE_ALIGN(size) : size;
428 #endif /* __linux__ */
430 ? round_page(size) : size;
431 #endif /* __FreeBSD__ */
432 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
433 total = PAGE_SIZE << page_order;
436 agp_offset = dev->agp->base + request.agp_start;
438 DRM_DEBUG( "count: %d\n", count );
439 DRM_DEBUG( "order: %d\n", order );
440 DRM_DEBUG( "size: %d\n", size );
441 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
442 DRM_DEBUG( "alignment: %d\n", alignment );
443 DRM_DEBUG( "page_order: %d\n", page_order );
444 DRM_DEBUG( "total: %d\n", total );
446 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
447 return DRM_OS_ERR(EINVAL);
448 if ( dev->queue_count )
449 return DRM_OS_ERR(EBUSY); /* Not while in use */
451 DRM_OS_SPINLOCK( &dev->count_lock );
452 if ( dev->buf_use ) {
453 DRM_OS_SPINUNLOCK( &dev->count_lock );
454 return DRM_OS_ERR(EBUSY);
456 atomic_inc( &dev->buf_alloc );
457 DRM_OS_SPINUNLOCK( &dev->count_lock );
460 entry = &dma->bufs[order];
461 if ( entry->buf_count ) {
463 atomic_dec( &dev->buf_alloc );
464 return DRM_OS_ERR(ENOMEM); /* May only call once for each order */
467 if (count < 0 || count > 4096) {
469 atomic_dec( &dev->buf_alloc );
470 return DRM_OS_ERR(EINVAL);
473 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
475 if ( !entry->buflist ) {
477 atomic_dec( &dev->buf_alloc );
478 return DRM_OS_ERR(ENOMEM);
480 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
482 entry->buf_size = size;
483 entry->page_order = page_order;
487 while ( entry->buf_count < count ) {
488 buf = &entry->buflist[entry->buf_count];
489 buf->idx = dma->buf_count + entry->buf_count;
490 buf->total = alignment;
494 buf->offset = (dma->byte_count + offset);
495 buf->bus_address = agp_offset + offset;
496 buf->address = (void *)(agp_offset + offset);
501 init_waitqueue_head( &buf->dma_wait );
502 #endif /* __linux__ */
505 #endif /* __FreeBSD__ */
508 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
509 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
511 if(!buf->dev_private) {
512 /* Set count correctly so we free the proper amount. */
513 entry->buf_count = count;
514 DRM(cleanup_buf_error)(entry);
516 memset( buf->dev_private, 0, buf->dev_priv_size );
518 #if __HAVE_DMA_HISTOGRAM
519 buf->time_queued = 0;
520 buf->time_dispatched = 0;
521 buf->time_completed = 0;
527 byte_count += PAGE_SIZE << page_order;
530 DRM_DEBUG( "byte_count: %d\n", byte_count );
532 temp_buflist = DRM(realloc)( dma->buflist,
533 dma->buf_count * sizeof(*dma->buflist),
534 (dma->buf_count + entry->buf_count)
535 * sizeof(*dma->buflist),
538 /* Free the entry because it isn't valid */
539 DRM(cleanup_buf_error)(entry);
541 atomic_dec( &dev->buf_alloc );
542 return DRM_OS_ERR(ENOMEM);
544 dma->buflist = temp_buflist;
546 for ( i = 0 ; i < entry->buf_count ; i++ ) {
547 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
550 dma->buf_count += entry->buf_count;
551 dma->byte_count += byte_count;
553 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
554 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
556 #if __HAVE_DMA_FREELIST
557 DRM(freelist_create)( &entry->freelist, entry->buf_count );
558 for ( i = 0 ; i < entry->buf_count ; i++ ) {
559 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
564 request.count = entry->buf_count;
567 DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
569 dma->flags = _DRM_DMA_USE_AGP;
571 atomic_dec( &dev->buf_alloc );
574 #endif /* __REALLY_HAVE_AGP */
577 int DRM(addbufs_pci)( DRM_OS_IOCTL )
580 drm_device_dma_t *dma = dev->dma;
581 drm_buf_desc_t request;
587 drm_buf_entry_t *entry;
591 unsigned long offset;
595 unsigned long *temp_pagelist;
596 drm_buf_t **temp_buflist;
598 if ( !dma ) return DRM_OS_ERR(EINVAL);
600 DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
602 count = request.count;
603 order = DRM(order)( request.size );
606 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
607 request.count, request.size, size,
608 order, dev->queue_count );
610 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
611 return DRM_OS_ERR(EINVAL);
612 if ( dev->queue_count )
613 return DRM_OS_ERR(EBUSY); /* Not while in use */
615 alignment = (request.flags & _DRM_PAGE_ALIGN)
617 ? PAGE_ALIGN(size) : size;
618 #endif /* __linux__ */
620 ? round_page(size) : size;
621 #endif /* __FreeBSD__ */
622 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
623 total = PAGE_SIZE << page_order;
625 DRM_OS_SPINLOCK( &dev->count_lock );
626 if ( dev->buf_use ) {
627 DRM_OS_SPINUNLOCK( &dev->count_lock );
628 return DRM_OS_ERR(EBUSY);
630 atomic_inc( &dev->buf_alloc );
631 DRM_OS_SPINUNLOCK( &dev->count_lock );
634 entry = &dma->bufs[order];
635 if ( entry->buf_count ) {
637 atomic_dec( &dev->buf_alloc );
638 return DRM_OS_ERR(ENOMEM); /* May only call once for each order */
641 if (count < 0 || count > 4096) {
643 atomic_dec( &dev->buf_alloc );
644 return DRM_OS_ERR(EINVAL);
647 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
649 if ( !entry->buflist ) {
651 atomic_dec( &dev->buf_alloc );
652 return DRM_OS_ERR(ENOMEM);
654 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
656 entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
658 if ( !entry->seglist ) {
659 DRM(free)( entry->buflist,
660 count * sizeof(*entry->buflist),
663 atomic_dec( &dev->buf_alloc );
664 return DRM_OS_ERR(ENOMEM);
666 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
668 temp_pagelist = DRM(realloc)( dma->pagelist,
669 dma->page_count * sizeof(*dma->pagelist),
670 (dma->page_count + (count << page_order))
671 * sizeof(*dma->pagelist),
674 DRM(free)( entry->buflist,
675 count * sizeof(*entry->buflist),
677 DRM(free)( entry->seglist,
678 count * sizeof(*entry->seglist),
681 atomic_dec( &dev->buf_alloc );
682 return DRM_OS_ERR(ENOMEM);
685 dma->pagelist = temp_pagelist;
686 DRM_DEBUG( "pagelist: %d entries\n",
687 dma->page_count + (count << page_order) );
689 entry->buf_size = size;
690 entry->page_order = page_order;
694 while ( entry->buf_count < count ) {
695 page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
697 entry->seglist[entry->seg_count++] = page;
698 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
699 DRM_DEBUG( "page %d @ 0x%08lx\n",
700 dma->page_count + page_count,
701 page + PAGE_SIZE * i );
702 dma->pagelist[dma->page_count + page_count++]
703 = page + PAGE_SIZE * i;
706 offset + size <= total && entry->buf_count < count ;
707 offset += alignment, ++entry->buf_count ) {
708 buf = &entry->buflist[entry->buf_count];
709 buf->idx = dma->buf_count + entry->buf_count;
710 buf->total = alignment;
713 buf->offset = (dma->byte_count + byte_count + offset);
714 buf->address = (void *)(page + offset);
719 init_waitqueue_head( &buf->dma_wait );
720 #endif /* __linux__ */
723 #endif /* __FreeBSD__ */
725 #if __HAVE_DMA_HISTOGRAM
726 buf->time_queued = 0;
727 buf->time_dispatched = 0;
728 buf->time_completed = 0;
731 DRM_DEBUG( "buffer %d @ %p\n",
732 entry->buf_count, buf->address );
734 byte_count += PAGE_SIZE << page_order;
737 temp_buflist = DRM(realloc)( dma->buflist,
738 dma->buf_count * sizeof(*dma->buflist),
739 (dma->buf_count + entry->buf_count)
740 * sizeof(*dma->buflist),
743 /* Free the entry because it isn't valid */
744 DRM(cleanup_buf_error)(entry);
746 atomic_dec( &dev->buf_alloc );
747 return DRM_OS_ERR(ENOMEM);
749 dma->buflist = temp_buflist;
751 for ( i = 0 ; i < entry->buf_count ; i++ ) {
752 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
755 dma->buf_count += entry->buf_count;
756 dma->seg_count += entry->seg_count;
757 dma->page_count += entry->seg_count << page_order;
758 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
760 #if __HAVE_DMA_FREELIST
761 DRM(freelist_create)( &entry->freelist, entry->buf_count );
762 for ( i = 0 ; i < entry->buf_count ; i++ ) {
763 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
768 request.count = entry->buf_count;
771 DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
773 atomic_dec( &dev->buf_alloc );
777 #endif /* __HAVE_PCI_DMA */
780 int DRM(addbufs_sg)( DRM_OS_IOCTL )
783 drm_device_dma_t *dma = dev->dma;
784 drm_buf_desc_t request;
785 drm_buf_entry_t *entry;
787 unsigned long offset;
788 unsigned long agp_offset;
797 drm_buf_t **temp_buflist;
799 if ( !dma ) return DRM_OS_ERR(EINVAL);
801 DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
803 count = request.count;
804 order = DRM(order)( request.size );
807 alignment = (request.flags & _DRM_PAGE_ALIGN)
809 ? PAGE_ALIGN(size) : size;
810 #endif /* __linux__ */
812 ? round_page(size) : size;
813 #endif /* __FreeBSD__ */
814 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
815 total = PAGE_SIZE << page_order;
818 agp_offset = request.agp_start;
820 DRM_DEBUG( "count: %d\n", count );
821 DRM_DEBUG( "order: %d\n", order );
822 DRM_DEBUG( "size: %d\n", size );
823 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
824 DRM_DEBUG( "alignment: %d\n", alignment );
825 DRM_DEBUG( "page_order: %d\n", page_order );
826 DRM_DEBUG( "total: %d\n", total );
828 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
829 return DRM_OS_ERR(EINVAL);
830 if ( dev->queue_count ) return DRM_OS_ERR(EBUSY); /* Not while in use */
832 DRM_OS_SPINLOCK( &dev->count_lock );
833 if ( dev->buf_use ) {
834 DRM_OS_SPINUNLOCK( &dev->count_lock );
835 return DRM_OS_ERR(EBUSY);
837 atomic_inc( &dev->buf_alloc );
838 DRM_OS_SPINUNLOCK( &dev->count_lock );
841 entry = &dma->bufs[order];
842 if ( entry->buf_count ) {
844 atomic_dec( &dev->buf_alloc );
845 return DRM_OS_ERR(ENOMEM); /* May only call once for each order */
848 if (count < 0 || count > 4096) {
850 atomic_dec( &dev->buf_alloc );
851 return DRM_OS_ERR(EINVAL);
854 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
856 if ( !entry->buflist ) {
858 atomic_dec( &dev->buf_alloc );
859 return DRM_OS_ERR(ENOMEM);
861 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
863 entry->buf_size = size;
864 entry->page_order = page_order;
868 while ( entry->buf_count < count ) {
869 buf = &entry->buflist[entry->buf_count];
870 buf->idx = dma->buf_count + entry->buf_count;
871 buf->total = alignment;
875 buf->offset = (dma->byte_count + offset);
876 buf->bus_address = agp_offset + offset;
877 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
882 init_waitqueue_head( &buf->dma_wait );
883 #endif /* __linux__ */
886 #endif /* __FreeBSD__ */
889 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
890 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
892 if(!buf->dev_private) {
893 /* Set count correctly so we free the proper amount. */
894 entry->buf_count = count;
895 DRM(cleanup_buf_error)(entry);
897 atomic_dec( &dev->buf_alloc );
898 return DRM_OS_ERR(ENOMEM);
901 memset( buf->dev_private, 0, buf->dev_priv_size );
903 # if __HAVE_DMA_HISTOGRAM
904 buf->time_queued = 0;
905 buf->time_dispatched = 0;
906 buf->time_completed = 0;
909 DRM_DEBUG( "buffer %d @ %p\n",
910 entry->buf_count, buf->address );
914 byte_count += PAGE_SIZE << page_order;
917 DRM_DEBUG( "byte_count: %d\n", byte_count );
919 temp_buflist = DRM(realloc)( dma->buflist,
920 dma->buf_count * sizeof(*dma->buflist),
921 (dma->buf_count + entry->buf_count)
922 * sizeof(*dma->buflist),
925 /* Free the entry because it isn't valid */
926 DRM(cleanup_buf_error)(entry);
928 atomic_dec( &dev->buf_alloc );
929 return DRM_OS_ERR(ENOMEM);
931 dma->buflist = temp_buflist;
933 for ( i = 0 ; i < entry->buf_count ; i++ ) {
934 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
937 dma->buf_count += entry->buf_count;
938 dma->byte_count += byte_count;
940 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
941 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
943 #if __HAVE_DMA_FREELIST
944 DRM(freelist_create)( &entry->freelist, entry->buf_count );
945 for ( i = 0 ; i < entry->buf_count ; i++ ) {
946 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
951 request.count = entry->buf_count;
954 DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
956 dma->flags = _DRM_DMA_USE_SG;
958 atomic_dec( &dev->buf_alloc );
961 #endif /* __REALLY_HAVE_SG */
963 int DRM(addbufs)( DRM_OS_IOCTL )
965 drm_buf_desc_t request;
967 DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
969 #if __REALLY_HAVE_AGP
970 if ( request.flags & _DRM_AGP_BUFFER )
971 return DRM(addbufs_agp)( IOCTL_ARGS_PASS );
975 if ( request.flags & _DRM_SG_BUFFER )
976 return DRM(addbufs_sg)( IOCTL_ARGS_PASS );
980 return DRM(addbufs_pci)( IOCTL_ARGS_PASS );
982 return DRM_OS_ERR(EINVAL);
986 int DRM(infobufs)( DRM_OS_IOCTL )
989 drm_device_dma_t *dma = dev->dma;
990 drm_buf_info_t request;
994 if ( !dma ) return DRM_OS_ERR(EINVAL);
996 DRM_OS_SPINLOCK( &dev->count_lock );
997 if ( atomic_read( &dev->buf_alloc ) ) {
998 DRM_OS_SPINUNLOCK( &dev->count_lock );
999 return DRM_OS_ERR(EBUSY);
1001 ++dev->buf_use; /* Can't allocate more after this call */
1002 DRM_OS_SPINUNLOCK( &dev->count_lock );
1004 DRM_OS_KRNFROMUSR( request, (drm_buf_info_t *)data, sizeof(request) );
1006 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1007 if ( dma->bufs[i].buf_count ) ++count;
1010 DRM_DEBUG( "count = %d\n", count );
1012 if ( request.count >= count ) {
1013 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1014 if ( dma->bufs[i].buf_count ) {
1015 drm_buf_desc_t *to = &request.list[count];
1016 drm_buf_entry_t *from = &dma->bufs[i];
1017 drm_freelist_t *list = &dma->bufs[i].freelist;
1018 if ( DRM_OS_COPYTOUSR( &to->count,
1020 sizeof(from->buf_count) ) ||
1021 DRM_OS_COPYTOUSR( &to->size,
1023 sizeof(from->buf_size) ) ||
1024 DRM_OS_COPYTOUSR( &to->low_mark,
1026 sizeof(list->low_mark) ) ||
1027 DRM_OS_COPYTOUSR( &to->high_mark,
1029 sizeof(list->high_mark) ) )
1030 return DRM_OS_ERR(EFAULT);
1032 DRM_DEBUG( "%d %d %d %d %d\n",
1034 dma->bufs[i].buf_count,
1035 dma->bufs[i].buf_size,
1036 dma->bufs[i].freelist.low_mark,
1037 dma->bufs[i].freelist.high_mark );
1042 request.count = count;
1044 DRM_OS_KRNTOUSR( (drm_buf_info_t *)data, request, sizeof(request) );
1049 int DRM(markbufs)( DRM_OS_IOCTL )
1052 drm_device_dma_t *dma = dev->dma;
1053 drm_buf_desc_t request;
1055 drm_buf_entry_t *entry;
1057 if ( !dma ) return DRM_OS_ERR(EINVAL);
1059 DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
1061 DRM_DEBUG( "%d, %d, %d\n",
1062 request.size, request.low_mark, request.high_mark );
1063 order = DRM(order)( request.size );
1064 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
1065 return DRM_OS_ERR(EINVAL);
1066 entry = &dma->bufs[order];
1068 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1069 return DRM_OS_ERR(EINVAL);
1070 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1071 return DRM_OS_ERR(EINVAL);
1073 entry->freelist.low_mark = request.low_mark;
1074 entry->freelist.high_mark = request.high_mark;
1079 int DRM(freebufs)( DRM_OS_IOCTL )
1082 drm_device_dma_t *dma = dev->dma;
1083 drm_buf_free_t request;
1088 if ( !dma ) return DRM_OS_ERR(EINVAL);
1090 DRM_OS_KRNFROMUSR( request, (drm_buf_free_t *)data, sizeof(request) );
1092 DRM_DEBUG( "%d\n", request.count );
1093 for ( i = 0 ; i < request.count ; i++ ) {
1094 if ( DRM_OS_COPYFROMUSR( &idx,
1097 return DRM_OS_ERR(EFAULT);
1098 if ( idx < 0 || idx >= dma->buf_count ) {
1099 DRM_ERROR( "Index %d (of %d max)\n",
1100 idx, dma->buf_count - 1 );
1101 return DRM_OS_ERR(EINVAL);
1103 buf = dma->buflist[idx];
1104 if ( buf->pid != DRM_OS_CURRENTPID ) {
1105 DRM_ERROR( "Process %d freeing buffer owned by %d\n",
1106 DRM_OS_CURRENTPID, buf->pid );
1107 return DRM_OS_ERR(EINVAL);
1109 DRM(free_buffer)( dev, buf );
1115 int DRM(mapbufs)( DRM_OS_IOCTL )
1118 drm_device_dma_t *dma = dev->dma;
1122 unsigned long virtual, address;
1123 #endif /* __linux__ */
1125 vm_offset_t virtual, address;
1126 #if __FreeBSD_version >= 500000
1127 struct vmspace *vms = p->td_proc->p_vmspace;
1129 struct vmspace *vms = p->p_vmspace;
1131 #endif /* __FreeBSD__ */
1132 drm_buf_map_t request;
1135 if ( !dma ) return DRM_OS_ERR(EINVAL);
1137 DRM_OS_SPINLOCK( &dev->count_lock );
1138 if ( atomic_read( &dev->buf_alloc ) ) {
1139 DRM_OS_SPINUNLOCK( &dev->count_lock );
1140 return DRM_OS_ERR(EBUSY);
1142 dev->buf_use++; /* Can't allocate more after this call */
1143 DRM_OS_SPINUNLOCK( &dev->count_lock );
1145 DRM_OS_KRNFROMUSR( request, (drm_buf_map_t *)data, sizeof(request) );
1147 if ( request.count >= dma->buf_count ) {
1148 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1149 (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1150 drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1153 retcode = DRM_OS_ERR(EINVAL);
1158 #if LINUX_VERSION_CODE <= 0x020402
1159 down( ¤t->mm->mmap_sem );
1161 down_write( ¤t->mm->mmap_sem );
1164 virtual = do_mmap( filp, 0, map->size,
1165 PROT_READ | PROT_WRITE,
1167 (unsigned long)map->offset );
1168 #if LINUX_VERSION_CODE <= 0x020402
1169 up( ¤t->mm->mmap_sem );
1171 up_write( ¤t->mm->mmap_sem );
1173 #endif /* __linux__ */
1175 virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1176 retcode = vm_mmap(&vms->vm_map,
1178 round_page(map->size),
1179 PROT_READ|PROT_WRITE, VM_PROT_ALL,
1181 SLIST_FIRST(&kdev->si_hlist),
1182 (unsigned long)map->offset );
1183 #endif /* __FreeBSD__ */
1186 #if LINUX_VERSION_CODE <= 0x020402
1187 down( ¤t->mm->mmap_sem );
1189 down_write( ¤t->mm->mmap_sem );
1192 virtual = do_mmap( filp, 0, dma->byte_count,
1193 PROT_READ | PROT_WRITE,
1195 #if LINUX_VERSION_CODE <= 0x020402
1196 up( ¤t->mm->mmap_sem );
1198 up_write( ¤t->mm->mmap_sem );
1200 #endif /* __linux__ */
1202 virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1203 retcode = vm_mmap(&vms->vm_map,
1205 round_page(dma->byte_count),
1206 PROT_READ|PROT_WRITE, VM_PROT_ALL,
1208 SLIST_FIRST(&kdev->si_hlist),
1210 #endif /* __FreeBSD__ */
1213 if ( virtual > -1024UL ) {
1215 retcode = (signed long)virtual;
1218 #endif /* __linux__ */
1222 #endif /* __FreeBSD__ */
1223 request.virtual = (void *)virtual;
1225 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1226 if ( DRM_OS_COPYTOUSR( &request.list[i].idx,
1227 &dma->buflist[i]->idx,
1228 sizeof(request.list[0].idx) ) ) {
1229 retcode = DRM_OS_ERR(EFAULT);
1232 if ( DRM_OS_COPYTOUSR( &request.list[i].total,
1233 &dma->buflist[i]->total,
1234 sizeof(request.list[0].total) ) ) {
1235 retcode = DRM_OS_ERR(EFAULT);
1238 if ( DRM_OS_COPYTOUSR( &request.list[i].used,
1241 retcode = DRM_OS_ERR(EFAULT);
1244 address = virtual + dma->buflist[i]->offset; /* *** */
1245 if ( DRM_OS_COPYTOUSR( &request.list[i].address,
1247 sizeof(address) ) ) {
1248 retcode = DRM_OS_ERR(EFAULT);
1254 request.count = dma->buf_count;
1256 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1258 DRM_OS_KRNTOUSR( (drm_buf_map_t *)data, request, sizeof(request) );
1263 #endif /* __HAVE_DMA */