]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm/drm_bufs.c
Use compile-time detection of 64-bit addressing.
[FreeBSD/FreeBSD.git] / sys / dev / drm / drm_bufs.c
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  */
4 /*-
5  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
6  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Rickard E. (Rik) Faith <faith@valinux.com>
30  *    Gareth Hughes <gareth@valinux.com>
31  *
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "dev/pci/pcireg.h"
38
39 #include "dev/drm/drmP.h"
40
41 /*
42  * Compute order.  Can be made faster.
43  */
44 int drm_order(unsigned long size)
45 {
46         int order;
47         unsigned long tmp;
48
49         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
50
51         if ( size & ~(1 << order) )
52                 ++order;
53
54         return order;
55 }
56
57 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
58  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
59  * address for accessing them.  Cleaned up at unload.
60  */
61 static int drm_alloc_resource(drm_device_t *dev, int resource)
62 {
63         if (resource >= DRM_MAX_PCI_RESOURCE) {
64                 DRM_ERROR("Resource %d too large\n", resource);
65                 return 1;
66         }
67
68         DRM_UNLOCK();
69         if (dev->pcir[resource] != NULL) {
70                 DRM_LOCK();
71                 return 0;
72         }
73
74         dev->pcirid[resource] = PCIR_BAR(resource);
75         dev->pcir[resource] = bus_alloc_resource_any(dev->device,
76             SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
77         DRM_LOCK();
78
79         if (dev->pcir[resource] == NULL) {
80                 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
81                 return 1;
82         }
83
84         return 0;
85 }
86
87 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
88 {
89         if (drm_alloc_resource(dev, resource) != 0)
90                 return 0;
91
92         return rman_get_start(dev->pcir[resource]);
93 }
94
95 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
96 {
97         if (drm_alloc_resource(dev, resource) != 0)
98                 return 0;
99
100         return rman_get_size(dev->pcir[resource]);
101 }
102
103 int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
104     drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
105 {
106         drm_local_map_t *map;
107         int align;
108         /*drm_agp_mem_t *entry;
109         int valid;*/
110
111         /* Only allow shared memory to be removable since we only keep enough
112          * book keeping information about shared memory to allow for removal
113          * when processes fork.
114          */
115         if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
116                 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
117                 return EINVAL;
118         }
119         if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
120                 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
121                     offset, size);
122                 return EINVAL;
123         }
124         if (offset + size < offset) {
125                 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
126                     offset, size);
127                 return EINVAL;
128         }
129
130         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
131             size, type);
132
133         /* Check if this is just another version of a kernel-allocated map, and
134          * just hand that back if so.
135          */
136         if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
137             type == _DRM_SHM) {
138                 TAILQ_FOREACH(map, &dev->maplist, link) {
139                         if (map->type == type && (map->offset == offset ||
140                             (map->type == _DRM_SHM &&
141                             map->flags == _DRM_CONTAINS_LOCK))) {
142                                 map->size = size;
143                                 DRM_DEBUG("Found kernel map %d\n", type);
144                                 goto done;
145                         }
146                 }
147         }
148         DRM_UNLOCK();
149
150         /* Allocate a new map structure, fill it in, and do any type-specific
151          * initialization necessary.
152          */
153         map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
154         if ( !map )
155                 return DRM_ERR(ENOMEM);
156
157         map->offset = offset;
158         map->size = size;
159         map->type = type;
160         map->flags = flags;
161
162         switch ( map->type ) {
163         case _DRM_REGISTERS:
164                 map->handle = drm_ioremap(dev, map);
165                 if (!(map->flags & _DRM_WRITE_COMBINING))
166                         break;
167                 /* FALLTHROUGH */
168         case _DRM_FRAME_BUFFER:
169                 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
170                         map->mtrr = 1;
171                 break;
172         case _DRM_SHM:
173                 map->handle = malloc(map->size, M_DRM, M_NOWAIT);
174                 DRM_DEBUG( "%lu %d %p\n",
175                            map->size, drm_order(map->size), map->handle );
176                 if ( !map->handle ) {
177                         free(map, M_DRM);
178                         return DRM_ERR(ENOMEM);
179                 }
180                 map->offset = (unsigned long)map->handle;
181                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
182                         /* Prevent a 2nd X Server from creating a 2nd lock */
183                         DRM_LOCK();
184                         if (dev->lock.hw_lock != NULL) {
185                                 DRM_UNLOCK();
186                                 free(map->handle, M_DRM);
187                                 free(map, M_DRM);
188                                 return DRM_ERR(EBUSY);
189                         }
190                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
191                         DRM_UNLOCK();
192                 }
193                 break;
194         case _DRM_AGP:
195                 /*valid = 0;*/
196                 map->offset += dev->agp->base;
197                 map->mtrr   = dev->agp->mtrr; /* for getmap */
198                 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
199                         if ((map->offset >= entry->bound) &&
200                             (map->offset + map->size <=
201                             entry->bound + entry->pages * PAGE_SIZE)) {
202                                 valid = 1;
203                                 break;
204                         }
205                 }
206                 if (!valid) {
207                         free(map, M_DRM);
208                         return DRM_ERR(EACCES);
209                 }*/
210                 break;
211         case _DRM_SCATTER_GATHER:
212                 if (!dev->sg) {
213                         free(map, M_DRM);
214                         return DRM_ERR(EINVAL);
215                 }
216                 map->offset = map->offset + dev->sg->handle;
217                 break;
218         case _DRM_CONSISTENT:
219                 /* Unfortunately, we don't get any alignment specification from
220                  * the caller, so we have to guess.  drm_pci_alloc requires
221                  * a power-of-two alignment, so try to align the bus address of
222                  * the map to it size if possible, otherwise just assume
223                  * PAGE_SIZE alignment.
224                  */
225                 align = map->size;
226                 if ((align & (align - 1)) != 0)
227                         align = PAGE_SIZE;
228                 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
229                 if (map->dmah == NULL) {
230                         free(map, M_DRM);
231                         return DRM_ERR(ENOMEM);
232                 }
233                 map->handle = map->dmah->vaddr;
234                 map->offset = map->dmah->busaddr;
235                 break;
236         default:
237                 DRM_ERROR("Bad map type %d\n", map->type);
238                 free(map, M_DRM);
239                 return DRM_ERR(EINVAL);
240         }
241
242         DRM_LOCK();
243         TAILQ_INSERT_TAIL(&dev->maplist, map, link);
244
245 done:
246         /* Jumped to, with lock held, when a kernel map is found. */
247
248         DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
249             map->size);
250
251         *map_ptr = map;
252
253         return 0;
254 }
255
256 int drm_addmap_ioctl(DRM_IOCTL_ARGS)
257 {
258         drm_map_t request;
259         drm_local_map_t *map;
260         int err;
261         DRM_DEVICE;
262
263         if (!(dev->flags & (FREAD|FWRITE)))
264                 return DRM_ERR(EACCES); /* Require read/write */
265
266         DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data, sizeof(drm_map_t));
267
268         if (!DRM_SUSER(p) && request.type != _DRM_AGP)
269                 return DRM_ERR(EACCES);
270
271         DRM_LOCK();
272         err = drm_addmap(dev, request.offset, request.size, request.type,
273             request.flags, &map);
274         DRM_UNLOCK();
275         if (err != 0)
276                 return err;
277
278         request.offset = map->offset;
279         request.size = map->size;
280         request.type = map->type;
281         request.flags = map->flags;
282         request.mtrr   = map->mtrr;
283         request.handle = map->handle;
284
285         if (request.type != _DRM_SHM) {
286                 request.handle = (void *)request.offset;
287         }
288         DRM_COPY_TO_USER_IOCTL((drm_map_t *)data, request, sizeof(drm_map_t));
289
290         return 0;
291 }
292
293 void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
294 {
295         DRM_SPINLOCK_ASSERT(&dev->dev_lock);
296
297         TAILQ_REMOVE(&dev->maplist, map, link);
298
299         switch (map->type) {
300         case _DRM_REGISTERS:
301                 if (map->bsr == NULL)
302                         drm_ioremapfree(map);
303                 /* FALLTHROUGH */
304         case _DRM_FRAME_BUFFER:
305                 if (map->mtrr) {
306                         int __unused retcode;
307                         
308                         retcode = drm_mtrr_del(0, map->offset, map->size,
309                             DRM_MTRR_WC);
310                         DRM_DEBUG("mtrr_del = %d\n", retcode);
311                 }
312                 break;
313         case _DRM_SHM:
314                 free(map->handle, M_DRM);
315                 break;
316         case _DRM_AGP:
317         case _DRM_SCATTER_GATHER:
318                 break;
319         case _DRM_CONSISTENT:
320                 drm_pci_free(dev, map->dmah);
321                 break;
322         }
323
324         if (map->bsr != NULL) {
325                 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
326                     map->bsr);
327         }
328
329         free(map, M_DRM);
330 }
331
332 /* Remove a map private from list and deallocate resources if the mapping
333  * isn't in use.
334  */
335
336 int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
337 {
338         DRM_DEVICE;
339         drm_local_map_t *map;
340         drm_map_t request;
341
342         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
343
344         DRM_LOCK();
345         TAILQ_FOREACH(map, &dev->maplist, link) {
346                 if (map->handle == request.handle &&
347                     map->flags & _DRM_REMOVABLE)
348                         break;
349         }
350
351         /* No match found. */
352         if (map == NULL) {
353                 DRM_UNLOCK();
354                 return DRM_ERR(EINVAL);
355         }
356
357         drm_rmmap(dev, map);
358
359         DRM_UNLOCK();
360
361         return 0;
362 }
363
364
365 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
366 {
367         int i;
368
369         if (entry->seg_count) {
370                 for (i = 0; i < entry->seg_count; i++) {
371                         drm_pci_free(dev, entry->seglist[i]);
372                 }
373                 free(entry->seglist, M_DRM);
374
375                 entry->seg_count = 0;
376         }
377
378         if (entry->buf_count) {
379                 for (i = 0; i < entry->buf_count; i++) {
380                         free(entry->buflist[i].dev_private, M_DRM);
381                 }
382                 free(entry->buflist, M_DRM);
383
384                 entry->buf_count = 0;
385         }
386 }
387
388 static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
389 {
390         drm_device_dma_t *dma = dev->dma;
391         drm_buf_entry_t *entry;
392         /*drm_agp_mem_t *agp_entry;
393         int valid*/
394         drm_buf_t *buf;
395         unsigned long offset;
396         unsigned long agp_offset;
397         int count;
398         int order;
399         int size;
400         int alignment;
401         int page_order;
402         int total;
403         int byte_count;
404         int i;
405         drm_buf_t **temp_buflist;
406
407         count = request->count;
408         order = drm_order(request->size);
409         size = 1 << order;
410
411         alignment  = (request->flags & _DRM_PAGE_ALIGN)
412                 ? round_page(size) : size;
413         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
414         total = PAGE_SIZE << page_order;
415
416         byte_count = 0;
417         agp_offset = dev->agp->base + request->agp_start;
418
419         DRM_DEBUG( "count:      %d\n",  count );
420         DRM_DEBUG( "order:      %d\n",  order );
421         DRM_DEBUG( "size:       %d\n",  size );
422         DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
423         DRM_DEBUG( "alignment:  %d\n",  alignment );
424         DRM_DEBUG( "page_order: %d\n",  page_order );
425         DRM_DEBUG( "total:      %d\n",  total );
426
427         /* Make sure buffers are located in AGP memory that we own */
428         /* Breaks MGA due to drm_alloc_agp not setting up entries for the
429          * memory.  Safe to ignore for now because these ioctls are still
430          * root-only.
431          */
432         /*valid = 0;
433         for (agp_entry = dev->agp->memory; agp_entry;
434             agp_entry = agp_entry->next) {
435                 if ((agp_offset >= agp_entry->bound) &&
436                     (agp_offset + total * count <=
437                     agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
438                         valid = 1;
439                         break;
440                 }
441         }
442         if (!valid) {
443                 DRM_DEBUG("zone invalid\n");
444                 return DRM_ERR(EINVAL);
445         }*/
446
447         entry = &dma->bufs[order];
448
449         entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
450             M_NOWAIT | M_ZERO);
451         if ( !entry->buflist ) {
452                 return DRM_ERR(ENOMEM);
453         }
454
455         entry->buf_size = size;
456         entry->page_order = page_order;
457
458         offset = 0;
459
460         while ( entry->buf_count < count ) {
461                 buf          = &entry->buflist[entry->buf_count];
462                 buf->idx     = dma->buf_count + entry->buf_count;
463                 buf->total   = alignment;
464                 buf->order   = order;
465                 buf->used    = 0;
466
467                 buf->offset  = (dma->byte_count + offset);
468                 buf->bus_address = agp_offset + offset;
469                 buf->address = (void *)(agp_offset + offset);
470                 buf->next    = NULL;
471                 buf->pending = 0;
472                 buf->filp    = NULL;
473
474                 buf->dev_priv_size = dev->driver.buf_priv_size;
475                 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
476                     M_NOWAIT | M_ZERO);
477                 if (buf->dev_private == NULL) {
478                         /* Set count correctly so we free the proper amount. */
479                         entry->buf_count = count;
480                         drm_cleanup_buf_error(dev, entry);
481                         return DRM_ERR(ENOMEM);
482                 }
483
484                 offset += alignment;
485                 entry->buf_count++;
486                 byte_count += PAGE_SIZE << page_order;
487         }
488
489         DRM_DEBUG( "byte_count: %d\n", byte_count );
490
491         temp_buflist = realloc(dma->buflist,
492             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
493             M_NOWAIT);
494         if (temp_buflist == NULL) {
495                 /* Free the entry because it isn't valid */
496                 drm_cleanup_buf_error(dev, entry);
497                 return DRM_ERR(ENOMEM);
498         }
499         dma->buflist = temp_buflist;
500
501         for ( i = 0 ; i < entry->buf_count ; i++ ) {
502                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
503         }
504
505         dma->buf_count += entry->buf_count;
506         dma->byte_count += byte_count;
507
508         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
509         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
510
511         request->count = entry->buf_count;
512         request->size = size;
513
514         dma->flags = _DRM_DMA_USE_AGP;
515
516         return 0;
517 }
518
519 static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
520 {
521         drm_device_dma_t *dma = dev->dma;
522         int count;
523         int order;
524         int size;
525         int total;
526         int page_order;
527         drm_buf_entry_t *entry;
528         drm_buf_t *buf;
529         int alignment;
530         unsigned long offset;
531         int i;
532         int byte_count;
533         int page_count;
534         unsigned long *temp_pagelist;
535         drm_buf_t **temp_buflist;
536
537         count = request->count;
538         order = drm_order(request->size);
539         size = 1 << order;
540
541         DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
542                    request->count, request->size, size, order );
543
544         alignment = (request->flags & _DRM_PAGE_ALIGN)
545                 ? round_page(size) : size;
546         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
547         total = PAGE_SIZE << page_order;
548
549         entry = &dma->bufs[order];
550
551         entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
552             M_NOWAIT | M_ZERO);
553         entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
554             M_NOWAIT | M_ZERO);
555
556         /* Keep the original pagelist until we know all the allocations
557          * have succeeded
558          */
559         temp_pagelist = malloc((dma->page_count + (count << page_order)) *
560             sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
561
562         if (entry->buflist == NULL || entry->seglist == NULL || 
563             temp_pagelist == NULL) {
564                 free(entry->buflist, M_DRM);
565                 free(entry->seglist, M_DRM);
566                 return DRM_ERR(ENOMEM);
567         }
568         
569         memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
570             sizeof(*dma->pagelist));
571
572         DRM_DEBUG( "pagelist: %d entries\n",
573                    dma->page_count + (count << page_order) );
574
575         entry->buf_size = size;
576         entry->page_order = page_order;
577         byte_count = 0;
578         page_count = 0;
579
580         while ( entry->buf_count < count ) {
581                 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
582                     0xfffffffful);
583                 if (dmah == NULL) {
584                         /* Set count correctly so we free the proper amount. */
585                         entry->buf_count = count;
586                         entry->seg_count = count;
587                         drm_cleanup_buf_error(dev, entry);
588                         free(temp_pagelist, M_DRM);
589                         return DRM_ERR(ENOMEM);
590                 }
591
592                 entry->seglist[entry->seg_count++] = dmah;
593                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
594                         DRM_DEBUG( "page %d @ %p\n",
595                                    dma->page_count + page_count,
596                                    (char *)dmah->vaddr + PAGE_SIZE * i );
597                         temp_pagelist[dma->page_count + page_count++] = 
598                             (long)dmah->vaddr + PAGE_SIZE * i;
599                 }
600                 for ( offset = 0 ;
601                       offset + size <= total && entry->buf_count < count ;
602                       offset += alignment, ++entry->buf_count ) {
603                         buf          = &entry->buflist[entry->buf_count];
604                         buf->idx     = dma->buf_count + entry->buf_count;
605                         buf->total   = alignment;
606                         buf->order   = order;
607                         buf->used    = 0;
608                         buf->offset  = (dma->byte_count + byte_count + offset);
609                         buf->address = ((char *)dmah->vaddr + offset);
610                         buf->bus_address = dmah->busaddr + offset;
611                         buf->next    = NULL;
612                         buf->pending = 0;
613                         buf->filp    = NULL;
614
615                         buf->dev_priv_size = dev->driver.buf_priv_size;
616                         buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
617                             M_NOWAIT | M_ZERO);
618                         if (buf->dev_private == NULL) {
619                                 /* Set count correctly so we free the proper amount. */
620                                 entry->buf_count = count;
621                                 entry->seg_count = count;
622                                 drm_cleanup_buf_error(dev, entry);
623                                 free(temp_pagelist, M_DRM);
624                                 return DRM_ERR(ENOMEM);
625                         }
626
627                         DRM_DEBUG( "buffer %d @ %p\n",
628                                    entry->buf_count, buf->address );
629                 }
630                 byte_count += PAGE_SIZE << page_order;
631         }
632
633         temp_buflist = realloc(dma->buflist,
634             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
635             M_NOWAIT);
636         if (temp_buflist == NULL) {
637                 /* Free the entry because it isn't valid */
638                 drm_cleanup_buf_error(dev, entry);
639                 free(temp_pagelist, M_DRM);
640                 return DRM_ERR(ENOMEM);
641         }
642         dma->buflist = temp_buflist;
643
644         for ( i = 0 ; i < entry->buf_count ; i++ ) {
645                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
646         }
647
648         /* No allocations failed, so now we can replace the orginal pagelist
649          * with the new one.
650          */
651         free(dma->pagelist, M_DRM);
652         dma->pagelist = temp_pagelist;
653
654         dma->buf_count += entry->buf_count;
655         dma->seg_count += entry->seg_count;
656         dma->page_count += entry->seg_count << page_order;
657         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
658
659         request->count = entry->buf_count;
660         request->size = size;
661
662         return 0;
663
664 }
665
666 static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
667 {
668         drm_device_dma_t *dma = dev->dma;
669         drm_buf_entry_t *entry;
670         drm_buf_t *buf;
671         unsigned long offset;
672         unsigned long agp_offset;
673         int count;
674         int order;
675         int size;
676         int alignment;
677         int page_order;
678         int total;
679         int byte_count;
680         int i;
681         drm_buf_t **temp_buflist;
682
683         count = request->count;
684         order = drm_order(request->size);
685         size = 1 << order;
686
687         alignment  = (request->flags & _DRM_PAGE_ALIGN)
688                 ? round_page(size) : size;
689         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
690         total = PAGE_SIZE << page_order;
691
692         byte_count = 0;
693         agp_offset = request->agp_start;
694
695         DRM_DEBUG( "count:      %d\n",  count );
696         DRM_DEBUG( "order:      %d\n",  order );
697         DRM_DEBUG( "size:       %d\n",  size );
698         DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
699         DRM_DEBUG( "alignment:  %d\n",  alignment );
700         DRM_DEBUG( "page_order: %d\n",  page_order );
701         DRM_DEBUG( "total:      %d\n",  total );
702
703         entry = &dma->bufs[order];
704
705         entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
706             M_NOWAIT | M_ZERO);
707         if (entry->buflist == NULL)
708                 return DRM_ERR(ENOMEM);
709
710         entry->buf_size = size;
711         entry->page_order = page_order;
712
713         offset = 0;
714
715         while ( entry->buf_count < count ) {
716                 buf          = &entry->buflist[entry->buf_count];
717                 buf->idx     = dma->buf_count + entry->buf_count;
718                 buf->total   = alignment;
719                 buf->order   = order;
720                 buf->used    = 0;
721
722                 buf->offset  = (dma->byte_count + offset);
723                 buf->bus_address = agp_offset + offset;
724                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
725                 buf->next    = NULL;
726                 buf->pending = 0;
727                 buf->filp    = NULL;
728
729                 buf->dev_priv_size = dev->driver.buf_priv_size;
730                 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
731                     M_NOWAIT | M_ZERO);
732                 if (buf->dev_private == NULL) {
733                         /* Set count correctly so we free the proper amount. */
734                         entry->buf_count = count;
735                         drm_cleanup_buf_error(dev, entry);
736                         return DRM_ERR(ENOMEM);
737                 }
738
739                 DRM_DEBUG( "buffer %d @ %p\n",
740                            entry->buf_count, buf->address );
741
742                 offset += alignment;
743                 entry->buf_count++;
744                 byte_count += PAGE_SIZE << page_order;
745         }
746
747         DRM_DEBUG( "byte_count: %d\n", byte_count );
748
749         temp_buflist = realloc(dma->buflist,
750             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
751             M_NOWAIT);
752         if (temp_buflist == NULL) {
753                 /* Free the entry because it isn't valid */
754                 drm_cleanup_buf_error(dev, entry);
755                 return DRM_ERR(ENOMEM);
756         }
757         dma->buflist = temp_buflist;
758
759         for ( i = 0 ; i < entry->buf_count ; i++ ) {
760                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
761         }
762
763         dma->buf_count += entry->buf_count;
764         dma->byte_count += byte_count;
765
766         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
767         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
768
769         request->count = entry->buf_count;
770         request->size = size;
771
772         dma->flags = _DRM_DMA_USE_SG;
773
774         return 0;
775 }
776
777 int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
778 {
779         int order, ret;
780
781         DRM_SPINLOCK(&dev->dma_lock);
782
783         if (request->count < 0 || request->count > 4096)
784                 return DRM_ERR(EINVAL);
785         
786         order = drm_order(request->size);
787         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
788                 return DRM_ERR(EINVAL);
789
790         /* No more allocations after first buffer-using ioctl. */
791         if (dev->buf_use != 0) {
792                 DRM_SPINUNLOCK(&dev->dma_lock);
793                 return DRM_ERR(EBUSY);
794         }
795         /* No more than one allocation per order */
796         if (dev->dma->bufs[order].buf_count != 0) {
797                 DRM_SPINUNLOCK(&dev->dma_lock);
798                 return DRM_ERR(ENOMEM);
799         }
800
801         ret = drm_do_addbufs_agp(dev, request);
802
803         DRM_SPINUNLOCK(&dev->dma_lock);
804
805         return ret;
806 }
807
808 int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
809 {
810         int order, ret;
811
812         DRM_SPINLOCK(&dev->dma_lock);
813
814         if (!DRM_SUSER(DRM_CURPROC))
815                 return DRM_ERR(EACCES);
816
817         if (request->count < 0 || request->count > 4096)
818                 return DRM_ERR(EINVAL);
819         
820         order = drm_order(request->size);
821         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
822                 return DRM_ERR(EINVAL);
823
824         /* No more allocations after first buffer-using ioctl. */
825         if (dev->buf_use != 0) {
826                 DRM_SPINUNLOCK(&dev->dma_lock);
827                 return DRM_ERR(EBUSY);
828         }
829         /* No more than one allocation per order */
830         if (dev->dma->bufs[order].buf_count != 0) {
831                 DRM_SPINUNLOCK(&dev->dma_lock);
832                 return DRM_ERR(ENOMEM);
833         }
834
835         ret = drm_do_addbufs_sg(dev, request);
836
837         DRM_SPINUNLOCK(&dev->dma_lock);
838
839         return ret;
840 }
841
842 int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
843 {
844         int order, ret;
845
846         DRM_SPINLOCK(&dev->dma_lock);
847
848         if (!DRM_SUSER(DRM_CURPROC))
849                 return DRM_ERR(EACCES);
850
851         if (request->count < 0 || request->count > 4096)
852                 return DRM_ERR(EINVAL);
853         
854         order = drm_order(request->size);
855         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
856                 return DRM_ERR(EINVAL);
857
858         /* No more allocations after first buffer-using ioctl. */
859         if (dev->buf_use != 0) {
860                 DRM_SPINUNLOCK(&dev->dma_lock);
861                 return DRM_ERR(EBUSY);
862         }
863         /* No more than one allocation per order */
864         if (dev->dma->bufs[order].buf_count != 0) {
865                 DRM_SPINUNLOCK(&dev->dma_lock);
866                 return DRM_ERR(ENOMEM);
867         }
868
869         ret = drm_do_addbufs_pci(dev, request);
870
871         DRM_SPINUNLOCK(&dev->dma_lock);
872
873         return ret;
874 }
875
876 int drm_addbufs_ioctl(DRM_IOCTL_ARGS)
877 {
878         DRM_DEVICE;
879         drm_buf_desc_t request;
880         int err;
881
882         DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_desc_t *)data,
883             sizeof(request));
884
885         if (request.flags & _DRM_AGP_BUFFER)
886                 err = drm_addbufs_agp(dev, &request);
887         else if (request.flags & _DRM_SG_BUFFER)
888                 err = drm_addbufs_sg(dev, &request);
889         else
890                 err = drm_addbufs_pci(dev, &request);
891
892         DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request,
893             sizeof(request));
894
895         return err;
896 }
897
898 int drm_infobufs(DRM_IOCTL_ARGS)
899 {
900         DRM_DEVICE;
901         drm_device_dma_t *dma = dev->dma;
902         drm_buf_info_t request;
903         int i;
904         int count;
905         int retcode = 0;
906
907         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
908
909         DRM_SPINLOCK(&dev->dma_lock);
910         ++dev->buf_use;         /* Can't allocate more after this call */
911         DRM_SPINUNLOCK(&dev->dma_lock);
912
913         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
914                 if ( dma->bufs[i].buf_count ) ++count;
915         }
916
917         DRM_DEBUG( "count = %d\n", count );
918
919         if ( request.count >= count ) {
920                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
921                         if ( dma->bufs[i].buf_count ) {
922                                 drm_buf_desc_t from;
923
924                                 from.count = dma->bufs[i].buf_count;
925                                 from.size = dma->bufs[i].buf_size;
926                                 from.low_mark = dma->bufs[i].freelist.low_mark;
927                                 from.high_mark = dma->bufs[i].freelist.high_mark;
928
929                                 if (DRM_COPY_TO_USER(&request.list[count], &from,
930                                     sizeof(drm_buf_desc_t)) != 0) {
931                                         retcode = DRM_ERR(EFAULT);
932                                         break;
933                                 }
934
935                                 DRM_DEBUG( "%d %d %d %d %d\n",
936                                            i,
937                                            dma->bufs[i].buf_count,
938                                            dma->bufs[i].buf_size,
939                                            dma->bufs[i].freelist.low_mark,
940                                            dma->bufs[i].freelist.high_mark );
941                                 ++count;
942                         }
943                 }
944         }
945         request.count = count;
946
947         DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
948
949         return retcode;
950 }
951
952 int drm_markbufs(DRM_IOCTL_ARGS)
953 {
954         DRM_DEVICE;
955         drm_device_dma_t *dma = dev->dma;
956         drm_buf_desc_t request;
957         int order;
958
959         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
960
961         DRM_DEBUG( "%d, %d, %d\n",
962                    request.size, request.low_mark, request.high_mark );
963         
964
965         order = drm_order(request.size);        
966         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
967             request.low_mark < 0 || request.high_mark < 0) {
968                 return DRM_ERR(EINVAL);
969         }
970
971         DRM_SPINLOCK(&dev->dma_lock);
972         if (request.low_mark > dma->bufs[order].buf_count ||
973             request.high_mark > dma->bufs[order].buf_count) {
974                 return DRM_ERR(EINVAL);
975         }
976
977         dma->bufs[order].freelist.low_mark  = request.low_mark;
978         dma->bufs[order].freelist.high_mark = request.high_mark;
979         DRM_SPINUNLOCK(&dev->dma_lock);
980
981         return 0;
982 }
983
984 int drm_freebufs(DRM_IOCTL_ARGS)
985 {
986         DRM_DEVICE;
987         drm_device_dma_t *dma = dev->dma;
988         drm_buf_free_t request;
989         int i;
990         int idx;
991         drm_buf_t *buf;
992         int retcode = 0;
993
994         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
995
996         DRM_DEBUG( "%d\n", request.count );
997         
998         DRM_SPINLOCK(&dev->dma_lock);
999         for ( i = 0 ; i < request.count ; i++ ) {
1000                 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
1001                         retcode = DRM_ERR(EFAULT);
1002                         break;
1003                 }
1004                 if ( idx < 0 || idx >= dma->buf_count ) {
1005                         DRM_ERROR( "Index %d (of %d max)\n",
1006                                    idx, dma->buf_count - 1 );
1007                         retcode = DRM_ERR(EINVAL);
1008                         break;
1009                 }
1010                 buf = dma->buflist[idx];
1011                 if ( buf->filp != filp ) {
1012                         DRM_ERROR("Process %d freeing buffer not owned\n",
1013                                    DRM_CURRENTPID);
1014                         retcode = DRM_ERR(EINVAL);
1015                         break;
1016                 }
1017                 drm_free_buffer(dev, buf);
1018         }
1019         DRM_SPINUNLOCK(&dev->dma_lock);
1020
1021         return retcode;
1022 }
1023
1024 int drm_mapbufs(DRM_IOCTL_ARGS)
1025 {
1026         DRM_DEVICE;
1027         drm_device_dma_t *dma = dev->dma;
1028         int retcode = 0;
1029         const int zero = 0;
1030         vm_offset_t address;
1031         struct vmspace *vms;
1032 #ifdef __FreeBSD__
1033         vm_ooffset_t foff;
1034         vm_size_t size;
1035         vm_offset_t vaddr;
1036 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1037         struct vnode *vn;
1038         voff_t foff;
1039         vsize_t size;
1040         vaddr_t vaddr;
1041 #endif /* __NetBSD__ || __OpenBSD__ */
1042
1043         drm_buf_map_t request;
1044         int i;
1045
1046         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
1047
1048 #if defined(__NetBSD__) || defined(__OpenBSD__)
1049         if (!vfinddev(kdev, VCHR, &vn))
1050                 return 0;       /* FIXME: Shouldn't this be EINVAL or something? */
1051 #endif /* __NetBSD__ || __OpenBSD */
1052
1053 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
1054         vms = p->td_proc->p_vmspace;
1055 #else
1056         vms = p->p_vmspace;
1057 #endif
1058
1059         DRM_SPINLOCK(&dev->dma_lock);
1060         dev->buf_use++;         /* Can't allocate more after this call */
1061         DRM_SPINUNLOCK(&dev->dma_lock);
1062
1063         if (request.count < dma->buf_count)
1064                 goto done;
1065
1066         if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1067             (dev->driver.use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
1068                 drm_local_map_t *map = dev->agp_buffer_map;
1069
1070                 if (map == NULL) {
1071                         retcode = EINVAL;
1072                         goto done;
1073                 }
1074                 size = round_page(map->size);
1075                 foff = map->offset;
1076         } else {
1077                 size = round_page(dma->byte_count),
1078                 foff = 0;
1079         }
1080
1081 #ifdef __FreeBSD__
1082         vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1083 #if __FreeBSD_version >= 600023
1084         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1085             VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff );
1086 #else
1087         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1088             VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
1089 #endif
1090 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1091         vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1092         retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
1093             UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
1094             &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1095 #endif /* __NetBSD__ || __OpenBSD */
1096         if (retcode)
1097                 goto done;
1098
1099         request.virtual = (void *)vaddr;
1100
1101         for ( i = 0 ; i < dma->buf_count ; i++ ) {
1102                 if (DRM_COPY_TO_USER(&request.list[i].idx,
1103                     &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
1104                         retcode = EFAULT;
1105                         goto done;
1106                 }
1107                 if (DRM_COPY_TO_USER(&request.list[i].total,
1108                     &dma->buflist[i]->total, sizeof(request.list[0].total))) {
1109                         retcode = EFAULT;
1110                         goto done;
1111                 }
1112                 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
1113                     sizeof(zero))) {
1114                         retcode = EFAULT;
1115                         goto done;
1116                 }
1117                 address = vaddr + dma->buflist[i]->offset; /* *** */
1118                 if (DRM_COPY_TO_USER(&request.list[i].address, &address,
1119                     sizeof(address))) {
1120                         retcode = EFAULT;
1121                         goto done;
1122                 }
1123         }
1124
1125  done:
1126         request.count = dma->buf_count;
1127
1128         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1129
1130         DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
1131
1132         return DRM_ERR(retcode);
1133 }