1 /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
35 #include <machine/bus.h>
36 #include <machine/resource.h>
38 #endif /* __FreeBSD__ */
40 #define __NO_VERSION__
41 #include <linux/interrupt.h> /* For task queue support */
42 #endif /* __linux__ */
44 #include "dev/drm/drmP.h"
46 #ifndef __HAVE_DMA_WAITQUEUE
47 #define __HAVE_DMA_WAITQUEUE 0
49 #ifndef __HAVE_DMA_RECLAIM
50 #define __HAVE_DMA_RECLAIM 0
52 #ifndef __HAVE_SHARED_IRQ
53 #define __HAVE_SHARED_IRQ 0
57 #define DRM_IRQ_TYPE SA_SHIRQ
59 #define DRM_IRQ_TYPE 0
64 int DRM(dma_setup)( drm_device_t *dev )
68 dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
70 return DRM_OS_ERR(ENOMEM);
72 memset( dev->dma, 0, sizeof(*dev->dma) );
74 for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
75 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
80 void DRM(dma_takedown)(drm_device_t *dev)
82 drm_device_dma_t *dma = dev->dma;
87 /* Clear dma buffers */
88 for (i = 0; i <= DRM_MAX_ORDER; i++) {
89 if (dma->bufs[i].seg_count) {
90 DRM_DEBUG("order %d: buf_count = %d,"
93 dma->bufs[i].buf_count,
94 dma->bufs[i].seg_count);
95 for (j = 0; j < dma->bufs[i].seg_count; j++) {
96 DRM(free_pages)(dma->bufs[i].seglist[j],
97 dma->bufs[i].page_order,
100 DRM(free)(dma->bufs[i].seglist,
101 dma->bufs[i].seg_count
102 * sizeof(*dma->bufs[0].seglist),
105 if(dma->bufs[i].buf_count) {
106 for(j = 0; j < dma->bufs[i].buf_count; j++) {
107 if(dma->bufs[i].buflist[j].dev_private) {
108 DRM(free)(dma->bufs[i].buflist[j].dev_private,
109 dma->bufs[i].buflist[j].dev_priv_size,
113 DRM(free)(dma->bufs[i].buflist,
114 dma->bufs[i].buf_count *
115 sizeof(*dma->bufs[0].buflist),
117 #if __HAVE_DMA_FREELIST
118 DRM(freelist_destroy)(&dma->bufs[i].freelist);
124 DRM(free)(dma->buflist,
125 dma->buf_count * sizeof(*dma->buflist),
130 DRM(free)(dma->pagelist,
131 dma->page_count * sizeof(*dma->pagelist),
134 DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
139 #if __HAVE_DMA_HISTOGRAM
140 /* This is slow, but is useful for debugging. */
141 int DRM(histogram_slot)(unsigned long count)
143 int value = DRM_DMA_HISTOGRAM_INITIAL;
147 slot < DRM_DMA_HISTOGRAM_SLOTS;
148 ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
149 if (count < value) return slot;
151 return DRM_DMA_HISTOGRAM_SLOTS - 1;
154 void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf)
156 cycles_t queued_to_dispatched;
157 cycles_t dispatched_to_completed;
158 cycles_t completed_to_freed;
159 int q2d, d2c, c2f, q2c, q2f;
161 if (buf->time_queued) {
162 queued_to_dispatched = (buf->time_dispatched
164 dispatched_to_completed = (buf->time_completed
165 - buf->time_dispatched);
166 completed_to_freed = (buf->time_freed
167 - buf->time_completed);
169 q2d = DRM(histogram_slot)(queued_to_dispatched);
170 d2c = DRM(histogram_slot)(dispatched_to_completed);
171 c2f = DRM(histogram_slot)(completed_to_freed);
173 q2c = DRM(histogram_slot)(queued_to_dispatched
174 + dispatched_to_completed);
175 q2f = DRM(histogram_slot)(queued_to_dispatched
176 + dispatched_to_completed
177 + completed_to_freed);
179 atomic_inc(&dev->histo.total);
180 atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
181 atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
182 atomic_inc(&dev->histo.completed_to_freed[c2f]);
184 atomic_inc(&dev->histo.queued_to_completed[q2c]);
185 atomic_inc(&dev->histo.queued_to_freed[q2f]);
188 buf->time_queued = 0;
189 buf->time_dispatched = 0;
190 buf->time_completed = 0;
195 void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
203 #if __HAVE_DMA_HISTOGRAM
204 buf->time_completed = get_cycles();
208 if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
209 wake_up_interruptible(&buf->dma_wait);
211 #endif /* __linux__ */
213 if ( buf->dma_wait ) {
214 wakeup( &buf->dma_wait );
217 #endif /* __FreeBSD__ */
218 #if __HAVE_DMA_FREELIST
220 drm_device_dma_t *dma = dev->dma;
221 /* If processes are waiting, the last one
222 to wake will put the buffer on the free
223 list. If no processes are waiting, we
224 put the buffer on the freelist here. */
225 DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
230 #if !__HAVE_DMA_RECLAIM
231 void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid)
233 drm_device_dma_t *dma = dev->dma;
237 for (i = 0; i < dma->buf_count; i++) {
238 if (dma->buflist[i]->pid == pid) {
239 switch (dma->buflist[i]->list) {
241 DRM(free_buffer)(dev, dma->buflist[i]);
244 dma->buflist[i]->list = DRM_LIST_RECLAIM;
247 /* Buffer already on hardware. */
256 /* GH: This is a big hack for now...
260 void DRM(clear_next_buffer)(drm_device_t *dev)
262 drm_device_dma_t *dma = dev->dma;
264 dma->next_buffer = NULL;
265 if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
266 DRM_OS_WAKEUP_INT(&dma->next_queue->flush_queue);
268 dma->next_queue = NULL;
271 int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
278 DRM_ERROR("No device\n");
281 if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
282 /* This only happens between the time the
283 interrupt is initialized and the time
284 the queues are initialized. */
288 /* Doing "while locked" DMA? */
289 if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
290 return DRM_KERNEL_CONTEXT;
293 /* If there are buffers on the last_context
294 queue, and we have not been executing
295 this context very long, continue to
296 execute this context. */
297 if (dev->last_switch <= j
298 && dev->last_switch + DRM_TIME_SLICE > j
299 && DRM_WAITCOUNT(dev, dev->last_context)) {
300 return dev->last_context;
303 /* Otherwise, find a candidate */
304 for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
305 if (DRM_WAITCOUNT(dev, i)) {
306 candidate = dev->last_checked = i;
312 for (i = 0; i < dev->queue_count; i++) {
313 if (DRM_WAITCOUNT(dev, i)) {
314 candidate = dev->last_checked = i;
322 && candidate != dev->last_context
323 && dev->last_switch <= j
324 && dev->last_switch + DRM_TIME_SLICE > j) {
326 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
327 del_timer(&dev->timer);
328 dev->timer.function = wrapper;
329 dev->timer.data = (unsigned long)dev;
330 dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
331 add_timer(&dev->timer);
333 #endif /* __linux__ */
336 if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
337 callout_reset(&dev->timer,
338 dev->last_switch + DRM_TIME_SLICE - j,
339 (void (*)(void *))wrapper,
343 #endif /* __FreeBSD__ */
351 int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
357 int while_locked = 0;
358 drm_device_dma_t *dma = dev->dma;
360 DECLARE_WAITQUEUE(entry, current);
361 #endif /* __linux__ */
364 #endif /* __FreeBSD__ */
366 DRM_DEBUG("%d\n", d->send_count);
368 if (d->flags & _DRM_DMA_WHILE_LOCKED) {
369 int context = dev->lock.hw_lock->lock;
371 if (!_DRM_LOCK_IS_HELD(context)) {
372 DRM_ERROR("No lock held during \"while locked\""
374 return DRM_OS_ERR(EINVAL);
376 if (d->context != _DRM_LOCKING_CONTEXT(context)
377 && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
378 DRM_ERROR("Lock held by %d while %d makes"
379 " \"while locked\" request\n",
380 _DRM_LOCKING_CONTEXT(context),
382 return DRM_OS_ERR(EINVAL);
384 q = dev->queuelist[DRM_KERNEL_CONTEXT];
387 q = dev->queuelist[d->context];
391 atomic_inc(&q->use_count);
392 if (atomic_read(&q->block_write)) {
394 add_wait_queue(&q->write_queue, &entry);
395 atomic_inc(&q->block_count);
397 current->state = TASK_INTERRUPTIBLE;
398 if (!atomic_read(&q->block_write)) break;
400 if (signal_pending(current)) {
401 atomic_dec(&q->use_count);
402 remove_wait_queue(&q->write_queue, &entry);
403 return DRM_OS_ERR(EINTR);
406 atomic_dec(&q->block_count);
407 current->state = TASK_RUNNING;
408 remove_wait_queue(&q->write_queue, &entry);
409 #endif /* __linux__ */
411 atomic_inc(&q->block_count);
413 if (!atomic_read(&q->block_write)) break;
414 error = tsleep(&q->block_write, PZERO|PCATCH,
417 atomic_dec(&q->use_count);
421 atomic_dec(&q->block_count);
422 #endif /* __FreeBSD__ */
425 for (i = 0; i < d->send_count; i++) {
426 idx = d->send_indices[i];
427 if (idx < 0 || idx >= dma->buf_count) {
428 atomic_dec(&q->use_count);
429 DRM_ERROR("Index %d (of %d max)\n",
430 d->send_indices[i], dma->buf_count - 1);
431 return DRM_OS_ERR(EINVAL);
433 buf = dma->buflist[ idx ];
434 if (buf->pid != DRM_OS_CURRENTPID) {
435 atomic_dec(&q->use_count);
436 DRM_ERROR("Process %d using buffer owned by %d\n",
437 DRM_OS_CURRENTPID, buf->pid);
438 return DRM_OS_ERR(EINVAL);
440 if (buf->list != DRM_LIST_NONE) {
441 atomic_dec(&q->use_count);
442 DRM_ERROR("Process %d using buffer %d on list %d\n",
443 DRM_OS_CURRENTPID, buf->idx, buf->list);
445 buf->used = d->send_sizes[i];
446 buf->while_locked = while_locked;
447 buf->context = d->context;
449 DRM_ERROR("Queueing 0 length buffer\n");
452 atomic_dec(&q->use_count);
453 DRM_ERROR("Queueing pending buffer:"
454 " buffer %d, offset %d\n",
455 d->send_indices[i], i);
456 return DRM_OS_ERR(EINVAL);
459 atomic_dec(&q->use_count);
460 DRM_ERROR("Queueing waiting buffer:"
461 " buffer %d, offset %d\n",
462 d->send_indices[i], i);
463 return DRM_OS_ERR(EINVAL);
466 if (atomic_read(&q->use_count) == 1
467 || atomic_read(&q->finalization)) {
468 DRM(free_buffer)(dev, buf);
470 DRM(waitlist_put)(&q->waitlist, buf);
471 atomic_inc(&q->total_queued);
474 atomic_dec(&q->use_count);
479 static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
484 drm_device_dma_t *dma = dev->dma;
486 for (i = d->granted_count; i < d->request_count; i++) {
487 buf = DRM(freelist_get)(&dma->bufs[order].freelist,
488 d->flags & _DRM_DMA_WAIT);
490 if (buf->pending || buf->waiting) {
491 DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
497 buf->pid = DRM_OS_CURRENTPID;
498 if (DRM_OS_COPYTOUSR(&d->request_indices[i],
501 return DRM_OS_ERR(EFAULT);
503 if (DRM_OS_COPYTOUSR(&d->request_sizes[i],
506 return DRM_OS_ERR(EFAULT);
514 int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
520 order = DRM(order)(dma->request_size);
522 dma->granted_count = 0;
523 retcode = DRM(dma_get_buffers_of_order)(dev, dma, order);
525 if (dma->granted_count < dma->request_count
526 && (dma->flags & _DRM_DMA_SMALLER_OK)) {
527 for (tmp_order = order - 1;
529 && dma->granted_count < dma->request_count
530 && tmp_order >= DRM_MIN_ORDER;
533 retcode = DRM(dma_get_buffers_of_order)(dev, dma,
538 if (dma->granted_count < dma->request_count
539 && (dma->flags & _DRM_DMA_LARGER_OK)) {
540 for (tmp_order = order + 1;
542 && dma->granted_count < dma->request_count
543 && tmp_order <= DRM_MAX_ORDER;
546 retcode = DRM(dma_get_buffers_of_order)(dev, dma,
553 #endif /* __HAVE_OLD_DMA */
558 int DRM(irq_install)( drm_device_t *dev, int irq )
562 #endif /* __FreeBSD__ */
566 return DRM_OS_ERR(EINVAL);
571 return DRM_OS_ERR(EBUSY);
576 DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
578 dev->context_flag = 0;
579 dev->interrupt_flag = 0;
582 dev->dma->next_buffer = NULL;
583 dev->dma->next_queue = NULL;
584 dev->dma->this_buffer = NULL;
586 #if __HAVE_DMA_IRQ_BH
588 INIT_LIST_HEAD( &dev->tq.list );
590 dev->tq.routine = DRM(dma_immediate_bh);
592 #endif /* __linux__ */
594 TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
595 #endif /* __FreeBSD__ */
598 /* Before installing handler */
601 /* Install handler */
603 retcode = request_irq( dev->irq, DRM(dma_service),
604 DRM_IRQ_TYPE, dev->devname, dev );
606 #endif /* __linux__ */
609 dev->irqr = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
610 0, ~0, 1, RF_SHAREABLE);
614 retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY,
615 DRM(dma_service), dev, &dev->irqh);
617 #endif /* __FreeBSD__ */
620 bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
621 #endif /* __FreeBSD__ */
627 /* After installing handler */
628 DRIVER_POSTINSTALL();
633 int DRM(irq_uninstall)( drm_device_t *dev )
643 return DRM_OS_ERR(EINVAL);
645 DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
650 free_irq( irq, dev );
651 #endif /* __linux__ */
653 bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
654 bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
655 #endif /* __FreeBSD__ */
660 int DRM(control)( DRM_OS_IOCTL )
665 DRM_OS_KRNFROMUSR( ctl, (drm_control_t *) data, sizeof(ctl) );
667 switch ( ctl.func ) {
668 case DRM_INST_HANDLER:
669 return DRM(irq_install)( dev, ctl.irq );
670 case DRM_UNINST_HANDLER:
671 return DRM(irq_uninstall)( dev );
673 return DRM_OS_ERR(EINVAL);
677 #endif /* __HAVE_DMA_IRQ */
679 #endif /* __HAVE_DMA */