1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
35 #define __NO_VERSION__
36 #include <linux/interrupt.h> /* For task queue support */
37 #include <linux/delay.h>
38 #endif /* __linux__ */
40 #include "dev/drm/gamma.h"
41 #include "dev/drm/drmP.h"
42 #include "dev/drm/gamma_drv.h"
45 static __inline__ void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
48 drm_gamma_private_t *dev_priv =
49 (drm_gamma_private_t *)dev->dev_private;
51 GAMMA_WRITE(GAMMA_DMAADDRESS, DRM_OS_VTOPHYS((void *)address));
52 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
54 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
57 void gamma_dma_quiescent_single(drm_device_t *dev)
59 drm_gamma_private_t *dev_priv =
60 (drm_gamma_private_t *)dev->dev_private;
62 while (GAMMA_READ(GAMMA_DMACOUNT))
64 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
67 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
68 GAMMA_WRITE(GAMMA_SYNC, 0);
71 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
73 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
76 void gamma_dma_quiescent_dual(drm_device_t *dev)
78 drm_gamma_private_t *dev_priv =
79 (drm_gamma_private_t *)dev->dev_private;
81 while (GAMMA_READ(GAMMA_DMACOUNT))
83 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
86 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89 GAMMA_WRITE(GAMMA_SYNC, 0);
91 /* Read from first MX */
93 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
95 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
97 /* Read from second MX */
99 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
101 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
104 void gamma_dma_ready(drm_device_t *dev)
106 drm_gamma_private_t *dev_priv =
107 (drm_gamma_private_t *)dev->dev_private;
109 while (GAMMA_READ(GAMMA_DMACOUNT))
113 static __inline__ int gamma_dma_is_ready(drm_device_t *dev)
115 drm_gamma_private_t *dev_priv =
116 (drm_gamma_private_t *)dev->dev_private;
118 return !GAMMA_READ(GAMMA_DMACOUNT);
121 void gamma_dma_service( DRM_OS_IRQ_ARGS)
123 drm_device_t *dev = (drm_device_t *)device;
124 drm_device_dma_t *dma = dev->dma;
125 drm_gamma_private_t *dev_priv =
126 (drm_gamma_private_t *)dev->dev_private;
128 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
129 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
130 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
131 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
132 if (gamma_dma_is_ready(dev)) {
133 /* Free previous buffer */
134 if (test_and_set_bit(0, &dev->dma_flag)) return;
135 if (dma->this_buffer) {
136 gamma_free_buffer(dev, dma->this_buffer);
137 dma->this_buffer = NULL;
139 clear_bit(0, &dev->dma_flag);
142 /* XXX: Does FreeBSD need something here?*/
143 /* Dispatch new buffer */
144 queue_task(&dev->tq, &tq_immediate);
145 mark_bh(IMMEDIATE_BH);
146 #endif /* __linux__ */
150 /* Only called by gamma_dma_schedule. */
151 static int gamma_do_dma(drm_device_t *dev, int locked)
153 unsigned long address;
154 unsigned long length;
157 drm_device_dma_t *dma = dev->dma;
158 #if DRM_DMA_HISTOGRAM
159 cycles_t dma_start, dma_stop;
162 if (test_and_set_bit(0, &dev->dma_flag)) return DRM_OS_ERR(EBUSY);
164 #if DRM_DMA_HISTOGRAM
165 dma_start = get_cycles();
168 if (!dma->next_buffer) {
169 DRM_ERROR("No next_buffer\n");
170 clear_bit(0, &dev->dma_flag);
171 return DRM_OS_ERR(EINVAL);
174 buf = dma->next_buffer;
175 address = (unsigned long)buf->address;
178 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
179 buf->context, buf->idx, length);
181 if (buf->list == DRM_LIST_RECLAIM) {
182 gamma_clear_next_buffer(dev);
183 gamma_free_buffer(dev, buf);
184 clear_bit(0, &dev->dma_flag);
185 return DRM_OS_ERR(EINVAL);
189 DRM_ERROR("0 length buffer\n");
190 gamma_clear_next_buffer(dev);
191 gamma_free_buffer(dev, buf);
192 clear_bit(0, &dev->dma_flag);
196 if (!gamma_dma_is_ready(dev)) {
197 clear_bit(0, &dev->dma_flag);
198 return DRM_OS_ERR(EBUSY);
201 if (buf->while_locked) {
202 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
203 DRM_ERROR("Dispatching buffer %d from pid %d"
204 " \"while locked\", but no lock held\n",
208 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
209 DRM_KERNEL_CONTEXT)) {
210 clear_bit(0, &dev->dma_flag);
211 return DRM_OS_ERR(EBUSY);
215 if (dev->last_context != buf->context
216 && !(dev->queuelist[buf->context]->flags
217 & _DRM_CONTEXT_PRESERVED)) {
218 /* PRE: dev->last_context != buf->context */
219 if (DRM(context_switch)(dev, dev->last_context,
221 DRM(clear_next_buffer)(dev);
222 DRM(free_buffer)(dev, buf);
224 retcode = DRM_OS_ERR(EBUSY);
227 /* POST: we will wait for the context
228 switch and will dispatch on a later call
229 when dev->last_context == buf->context.
230 NOTE WE HOLD THE LOCK THROUGHOUT THIS
234 gamma_clear_next_buffer(dev);
237 buf->list = DRM_LIST_PEND;
238 #if DRM_DMA_HISTOGRAM
239 buf->time_dispatched = get_cycles();
242 gamma_dma_dispatch(dev, address, length);
243 gamma_free_buffer(dev, dma->this_buffer);
244 dma->this_buffer = buf;
246 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
247 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
249 if (!buf->while_locked && !dev->context_flag && !locked) {
250 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
251 DRM_KERNEL_CONTEXT)) {
257 clear_bit(0, &dev->dma_flag);
259 #if DRM_DMA_HISTOGRAM
260 dma_stop = get_cycles();
261 atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]);
267 static void gamma_dma_timer_bh(unsigned long dev)
269 gamma_dma_schedule((drm_device_t *)dev, 0);
272 void gamma_dma_immediate_bh(DRM_OS_TASKQUEUE_ARGS)
274 gamma_dma_schedule(dev, 0);
277 int gamma_dma_schedule(drm_device_t *dev, int locked)
286 drm_device_dma_t *dma = dev->dma;
287 #if DRM_DMA_HISTOGRAM
288 cycles_t schedule_start;
291 if (test_and_set_bit(0, &dev->interrupt_flag)) {
293 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
294 return DRM_OS_ERR(EBUSY);
296 missed = atomic_read(&dev->counts[10]);
298 #if DRM_DMA_HISTOGRAM
299 schedule_start = get_cycles();
303 if (dev->context_flag) {
304 clear_bit(0, &dev->interrupt_flag);
305 return DRM_OS_ERR(EBUSY);
307 if (dma->next_buffer) {
308 /* Unsent buffer that was previously
309 selected, but that couldn't be sent
310 because the lock could not be obtained
311 or the DMA engine wasn't ready. Try
313 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
316 next = gamma_select_queue(dev, gamma_dma_timer_bh);
318 q = dev->queuelist[next];
319 buf = gamma_waitlist_get(&q->waitlist);
320 dma->next_buffer = buf;
322 if (buf && buf->list == DRM_LIST_RECLAIM) {
323 gamma_clear_next_buffer(dev);
324 gamma_free_buffer(dev, buf);
327 } while (next >= 0 && !dma->next_buffer);
328 if (dma->next_buffer) {
329 if (!(retcode = gamma_do_dma(dev, locked))) {
336 if (missed != atomic_read(&dev->counts[10])) {
337 if (gamma_dma_is_ready(dev)) goto again;
339 if (processed && gamma_dma_is_ready(dev)) {
345 clear_bit(0, &dev->interrupt_flag);
347 #if DRM_DMA_HISTOGRAM
348 atomic_inc(&dev->histo.schedule[gamma_histogram_slot(get_cycles()
354 static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
356 unsigned long address;
357 unsigned long length;
363 drm_buf_t *last_buf = NULL;
364 drm_device_dma_t *dma = dev->dma;
366 DECLARE_WAITQUEUE(entry, current);
367 #endif /* __linux__ */
370 #endif /* __FreeBSD__ */
372 /* Turn off interrupt handling */
373 while (test_and_set_bit(0, &dev->interrupt_flag)) {
376 if (signal_pending(current)) return DRM_OS_ERR(EINTR);
377 #endif /* __linux__ */
379 retcode = tsleep(&never, PZERO|PCATCH, "gamp1", 1);
382 #endif /* __FreeBSD__ */
384 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
385 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
386 DRM_KERNEL_CONTEXT)) {
389 if (signal_pending(current)) {
390 clear_bit(0, &dev->interrupt_flag);
391 return DRM_OS_ERR(EINTR);
393 #endif /* __linux__ */
395 retcode = tsleep(&never, PZERO|PCATCH, "gamp2", 1);
398 #endif /* __FreeBSD__ */
403 for (i = 0; i < d->send_count; i++) {
404 idx = d->send_indices[i];
405 if (idx < 0 || idx >= dma->buf_count) {
406 DRM_ERROR("Index %d (of %d max)\n",
407 d->send_indices[i], dma->buf_count - 1);
410 buf = dma->buflist[ idx ];
411 if (buf->pid != DRM_OS_CURRENTPID) {
412 DRM_ERROR("Process %d using buffer owned by %d\n",
413 DRM_OS_CURRENTPID, buf->pid);
414 retcode = DRM_OS_ERR(EINVAL);
417 if (buf->list != DRM_LIST_NONE) {
418 DRM_ERROR("Process %d using %d's buffer on list %d\n",
419 DRM_OS_CURRENTPID, buf->pid, buf->list);
420 retcode = DRM_OS_ERR(EINVAL);
423 /* This isn't a race condition on
424 buf->list, since our concern is the
425 buffer reclaim during the time the
426 process closes the /dev/drm? handle, so
427 it can't also be doing DMA. */
428 buf->list = DRM_LIST_PRIO;
429 buf->used = d->send_sizes[i];
430 buf->context = d->context;
431 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
432 address = (unsigned long)buf->address;
435 DRM_ERROR("0 length buffer\n");
438 DRM_ERROR("Sending pending buffer:"
439 " buffer %d, offset %d\n",
440 d->send_indices[i], i);
441 retcode = DRM_OS_ERR(EINVAL);
445 DRM_ERROR("Sending waiting buffer:"
446 " buffer %d, offset %d\n",
447 d->send_indices[i], i);
448 retcode = DRM_OS_ERR(EINVAL);
453 if (dev->last_context != buf->context
454 && !(dev->queuelist[buf->context]->flags
455 & _DRM_CONTEXT_PRESERVED)) {
457 add_wait_queue(&dev->context_wait, &entry);
458 current->state = TASK_INTERRUPTIBLE;
459 #endif /* __linux__ */
460 /* PRE: dev->last_context != buf->context */
461 DRM(context_switch)(dev, dev->last_context,
463 /* POST: we will wait for the context
464 switch and will dispatch on a later call
465 when dev->last_context == buf->context.
466 NOTE WE HOLD THE LOCK THROUGHOUT THIS
470 current->state = TASK_RUNNING;
471 remove_wait_queue(&dev->context_wait, &entry);
472 if (signal_pending(current)) {
473 retcode = DRM_OS_ERR(EINTR);
476 #endif /* __linux__ */
478 retcode = tsleep(&dev->context_wait, PZERO|PCATCH,
482 #endif /* __FreeBSD__ */
483 if (dev->last_context != buf->context) {
484 DRM_ERROR("Context mismatch: %d %d\n",
490 #if DRM_DMA_HISTOGRAM
491 buf->time_queued = get_cycles();
492 buf->time_dispatched = buf->time_queued;
494 gamma_dma_dispatch(dev, address, length);
495 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
496 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
499 gamma_free_buffer(dev, last_buf);
507 gamma_dma_ready(dev);
508 gamma_free_buffer(dev, last_buf);
511 if (must_free && !dev->context_flag) {
512 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
513 DRM_KERNEL_CONTEXT)) {
517 clear_bit(0, &dev->interrupt_flag);
521 static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
523 drm_buf_t *last_buf = NULL;
525 drm_device_dma_t *dma = dev->dma;
527 DECLARE_WAITQUEUE(entry, current);
528 #endif /* __linux__ */
530 if (d->flags & _DRM_DMA_BLOCK) {
531 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
533 add_wait_queue(&last_buf->dma_wait, &entry);
534 #endif /* __linux__ */
536 atomic_inc(&last_buf->dma_wait);
537 #endif /* __FreeBSD__ */
540 if ((retcode = gamma_dma_enqueue(dev, d))) {
541 if (d->flags & _DRM_DMA_BLOCK)
543 remove_wait_queue(&last_buf->dma_wait, &entry);
544 #endif /* __linux__ */
546 atomic_dec(&last_buf->dma_wait);
547 #endif /* __FreeBSD__ */
551 gamma_dma_schedule(dev, 0);
553 if (d->flags & _DRM_DMA_BLOCK) {
554 DRM_DEBUG("%d waiting\n", DRM_OS_CURRENTPID);
557 current->state = TASK_INTERRUPTIBLE;
558 if (!last_buf->waiting && !last_buf->pending)
559 break; /* finished */
561 if (signal_pending(current)) {
562 retcode = DRM_OS_ERR(EINTR); /* Can't restart */
566 current->state = TASK_RUNNING;
567 remove_wait_queue(&last_buf->dma_wait, &entry);
568 #endif /* __linux__ */
571 retcode = tsleep(&last_buf->dma_wait, PZERO|PCATCH,
573 if (!last_buf->waiting
574 && !last_buf->pending)
575 break; /* finished */
579 atomic_dec(&last_buf->dma_wait);
580 #endif /* __FreeBSD__ */
581 DRM_DEBUG("%d running\n", DRM_OS_CURRENTPID);
583 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
585 if (!waitqueue_active(&last_buf->dma_wait)) {
586 #endif /* __linux__ */
588 if (!last_buf->dma_wait) {
589 #endif /* __FreeBSD__ */
590 gamma_free_buffer(dev, last_buf);
594 DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
598 DRM_WAITCOUNT(dev, d->context),
608 int gamma_dma( DRM_OS_IOCTL )
611 drm_device_dma_t *dma = dev->dma;
615 DRM_OS_KRNFROMUSR(d, (drm_dma_t *) data, sizeof(d));
617 if (d.send_count < 0 || d.send_count > dma->buf_count) {
618 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
619 DRM_OS_CURRENTPID, d.send_count, dma->buf_count);
620 return DRM_OS_ERR(EINVAL);
623 if (d.request_count < 0 || d.request_count > dma->buf_count) {
624 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
625 DRM_OS_CURRENTPID, d.request_count, dma->buf_count);
626 return DRM_OS_ERR(EINVAL);
630 if (d.flags & _DRM_DMA_PRIORITY)
631 retcode = gamma_dma_priority(dev, &d);
633 retcode = gamma_dma_send_buffers(dev, &d);
638 if (!retcode && d.request_count) {
639 retcode = gamma_dma_get_buffers(dev, &d);
642 DRM_DEBUG("%d returning, granted = %d\n",
643 DRM_OS_CURRENTPID, d.granted_count);
644 DRM_OS_KRNTOUSR((drm_dma_t *) data, d, sizeof(d));