]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm/gamma_dma.c
This commit was generated by cvs2svn to compensate for changes in r103423,
[FreeBSD/FreeBSD.git] / sys / dev / drm / gamma_dma.c
1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *
30  * $FreeBSD$
31  */
32
33
34 #ifdef __linux__
35 #define __NO_VERSION__
36 #include <linux/interrupt.h>    /* For task queue support */
37 #include <linux/delay.h>
38 #endif /* __linux__ */
39
40 #include "dev/drm/gamma.h"
41 #include "dev/drm/drmP.h"
42 #include "dev/drm/gamma_drv.h"
43
44
45 static __inline__ void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
46                                       unsigned long length)
47 {
48         drm_gamma_private_t *dev_priv =
49                 (drm_gamma_private_t *)dev->dev_private;
50
51         GAMMA_WRITE(GAMMA_DMAADDRESS, DRM_OS_VTOPHYS((void *)address));
52         while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
53                 ;
54         GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
55 }
56
57 void gamma_dma_quiescent_single(drm_device_t *dev)
58 {
59         drm_gamma_private_t *dev_priv =
60                 (drm_gamma_private_t *)dev->dev_private;
61
62         while (GAMMA_READ(GAMMA_DMACOUNT))
63                 ;
64         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
65                 ;
66
67         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
68         GAMMA_WRITE(GAMMA_SYNC, 0);
69
70         do {
71                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
72                         ;
73         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
74 }
75
76 void gamma_dma_quiescent_dual(drm_device_t *dev)
77 {
78         drm_gamma_private_t *dev_priv =
79                 (drm_gamma_private_t *)dev->dev_private;
80
81         while (GAMMA_READ(GAMMA_DMACOUNT))
82                 ;
83         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
84                 ;
85
86         GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
87
88         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89         GAMMA_WRITE(GAMMA_SYNC, 0);
90
91                                 /* Read from first MX */
92         do {
93                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
94                         ;
95         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
96
97                                 /* Read from second MX */
98         do {
99                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
100                         ;
101         } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
102 }
103
104 void gamma_dma_ready(drm_device_t *dev)
105 {
106         drm_gamma_private_t *dev_priv =
107                 (drm_gamma_private_t *)dev->dev_private;
108
109         while (GAMMA_READ(GAMMA_DMACOUNT))
110                 ;
111 }
112
113 static __inline__ int gamma_dma_is_ready(drm_device_t *dev)
114 {
115         drm_gamma_private_t *dev_priv =
116                 (drm_gamma_private_t *)dev->dev_private;
117
118         return !GAMMA_READ(GAMMA_DMACOUNT);
119 }
120
121 void gamma_dma_service( DRM_OS_IRQ_ARGS)
122 {
123         drm_device_t        *dev      = (drm_device_t *)device;
124         drm_device_dma_t    *dma      = dev->dma;
125         drm_gamma_private_t *dev_priv =
126                 (drm_gamma_private_t *)dev->dev_private;
127
128         atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
129         GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
130         GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
131         GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
132         if (gamma_dma_is_ready(dev)) {
133                                 /* Free previous buffer */
134                 if (test_and_set_bit(0, &dev->dma_flag)) return;
135                 if (dma->this_buffer) {
136                         gamma_free_buffer(dev, dma->this_buffer);
137                         dma->this_buffer = NULL;
138                 }
139                 clear_bit(0, &dev->dma_flag);
140
141 #ifdef __linux__
142                 /* XXX: Does FreeBSD need something here?*/
143                 /* Dispatch new buffer */
144                 queue_task(&dev->tq, &tq_immediate);
145                 mark_bh(IMMEDIATE_BH);
146 #endif /* __linux__ */
147         }
148 }
149
150 /* Only called by gamma_dma_schedule. */
151 static int gamma_do_dma(drm_device_t *dev, int locked)
152 {
153         unsigned long    address;
154         unsigned long    length;
155         drm_buf_t        *buf;
156         int              retcode = 0;
157         drm_device_dma_t *dma = dev->dma;
158 #if DRM_DMA_HISTOGRAM
159         cycles_t         dma_start, dma_stop;
160 #endif
161
162         if (test_and_set_bit(0, &dev->dma_flag)) return DRM_OS_ERR(EBUSY);
163
164 #if DRM_DMA_HISTOGRAM
165         dma_start = get_cycles();
166 #endif
167
168         if (!dma->next_buffer) {
169                 DRM_ERROR("No next_buffer\n");
170                 clear_bit(0, &dev->dma_flag);
171                 return DRM_OS_ERR(EINVAL);
172         }
173
174         buf     = dma->next_buffer;
175         address = (unsigned long)buf->address;
176         length  = buf->used;
177
178         DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
179                   buf->context, buf->idx, length);
180
181         if (buf->list == DRM_LIST_RECLAIM) {
182                 gamma_clear_next_buffer(dev);
183                 gamma_free_buffer(dev, buf);
184                 clear_bit(0, &dev->dma_flag);
185                 return DRM_OS_ERR(EINVAL);
186         }
187
188         if (!length) {
189                 DRM_ERROR("0 length buffer\n");
190                 gamma_clear_next_buffer(dev);
191                 gamma_free_buffer(dev, buf);
192                 clear_bit(0, &dev->dma_flag);
193                 return 0;
194         }
195
196         if (!gamma_dma_is_ready(dev)) {
197                 clear_bit(0, &dev->dma_flag);
198                 return DRM_OS_ERR(EBUSY);
199         }
200
201         if (buf->while_locked) {
202                 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
203                         DRM_ERROR("Dispatching buffer %d from pid %d"
204                                   " \"while locked\", but no lock held\n",
205                                   buf->idx, buf->pid);
206                 }
207         } else {
208                 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
209                                               DRM_KERNEL_CONTEXT)) {
210                         clear_bit(0, &dev->dma_flag);
211                         return DRM_OS_ERR(EBUSY);
212                 }
213         }
214
215         if (dev->last_context != buf->context
216             && !(dev->queuelist[buf->context]->flags
217                  & _DRM_CONTEXT_PRESERVED)) {
218                                 /* PRE: dev->last_context != buf->context */
219                 if (DRM(context_switch)(dev, dev->last_context,
220                                         buf->context)) {
221                         DRM(clear_next_buffer)(dev);
222                         DRM(free_buffer)(dev, buf);
223                 }
224                 retcode = DRM_OS_ERR(EBUSY);
225                 goto cleanup;
226
227                                 /* POST: we will wait for the context
228                                    switch and will dispatch on a later call
229                                    when dev->last_context == buf->context.
230                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
231                                    TIME! */
232         }
233
234         gamma_clear_next_buffer(dev);
235         buf->pending     = 1;
236         buf->waiting     = 0;
237         buf->list        = DRM_LIST_PEND;
238 #if DRM_DMA_HISTOGRAM
239         buf->time_dispatched = get_cycles();
240 #endif
241
242         gamma_dma_dispatch(dev, address, length);
243         gamma_free_buffer(dev, dma->this_buffer);
244         dma->this_buffer = buf;
245
246         atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
247         atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
248
249         if (!buf->while_locked && !dev->context_flag && !locked) {
250                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
251                                   DRM_KERNEL_CONTEXT)) {
252                         DRM_ERROR("\n");
253                 }
254         }
255 cleanup:
256
257         clear_bit(0, &dev->dma_flag);
258
259 #if DRM_DMA_HISTOGRAM
260         dma_stop = get_cycles();
261         atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]);
262 #endif
263
264         return retcode;
265 }
266
267 static void gamma_dma_timer_bh(unsigned long dev)
268 {
269         gamma_dma_schedule((drm_device_t *)dev, 0);
270 }
271
272 void gamma_dma_immediate_bh(DRM_OS_TASKQUEUE_ARGS)
273 {
274         gamma_dma_schedule(dev, 0);
275 }
276
277 int gamma_dma_schedule(drm_device_t *dev, int locked)
278 {
279         int              next;
280         drm_queue_t      *q;
281         drm_buf_t        *buf;
282         int              retcode   = 0;
283         int              processed = 0;
284         int              missed;
285         int              expire    = 20;
286         drm_device_dma_t *dma      = dev->dma;
287 #if DRM_DMA_HISTOGRAM
288         cycles_t         schedule_start;
289 #endif
290
291         if (test_and_set_bit(0, &dev->interrupt_flag)) {
292                                 /* Not reentrant */
293                 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
294                 return DRM_OS_ERR(EBUSY);
295         }
296         missed = atomic_read(&dev->counts[10]);
297
298 #if DRM_DMA_HISTOGRAM
299         schedule_start = get_cycles();
300 #endif
301
302 again:
303         if (dev->context_flag) {
304                 clear_bit(0, &dev->interrupt_flag);
305                 return DRM_OS_ERR(EBUSY);
306         }
307         if (dma->next_buffer) {
308                                 /* Unsent buffer that was previously
309                                    selected, but that couldn't be sent
310                                    because the lock could not be obtained
311                                    or the DMA engine wasn't ready.  Try
312                                    again. */
313                 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
314         } else {
315                 do {
316                         next = gamma_select_queue(dev, gamma_dma_timer_bh);
317                         if (next >= 0) {
318                                 q   = dev->queuelist[next];
319                                 buf = gamma_waitlist_get(&q->waitlist);
320                                 dma->next_buffer = buf;
321                                 dma->next_queue  = q;
322                                 if (buf && buf->list == DRM_LIST_RECLAIM) {
323                                         gamma_clear_next_buffer(dev);
324                                         gamma_free_buffer(dev, buf);
325                                 }
326                         }
327                 } while (next >= 0 && !dma->next_buffer);
328                 if (dma->next_buffer) {
329                         if (!(retcode = gamma_do_dma(dev, locked))) {
330                                 ++processed;
331                         }
332                 }
333         }
334
335         if (--expire) {
336                 if (missed != atomic_read(&dev->counts[10])) {
337                         if (gamma_dma_is_ready(dev)) goto again;
338                 }
339                 if (processed && gamma_dma_is_ready(dev)) {
340                         processed = 0;
341                         goto again;
342                 }
343         }
344
345         clear_bit(0, &dev->interrupt_flag);
346
347 #if DRM_DMA_HISTOGRAM
348         atomic_inc(&dev->histo.schedule[gamma_histogram_slot(get_cycles()
349                                                            - schedule_start)]);
350 #endif
351         return retcode;
352 }
353
354 static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
355 {
356         unsigned long     address;
357         unsigned long     length;
358         int               must_free = 0;
359         int               retcode   = 0;
360         int               i;
361         int               idx;
362         drm_buf_t         *buf;
363         drm_buf_t         *last_buf = NULL;
364         drm_device_dma_t  *dma      = dev->dma;
365 #ifdef __linux__
366         DECLARE_WAITQUEUE(entry, current);
367 #endif /* __linux__ */
368 #ifdef __FreeBSD__
369         static int never;
370 #endif /* __FreeBSD__ */
371
372                                 /* Turn off interrupt handling */
373         while (test_and_set_bit(0, &dev->interrupt_flag)) {
374 #ifdef __linux__
375                 schedule();
376                 if (signal_pending(current)) return DRM_OS_ERR(EINTR);
377 #endif /* __linux__ */
378 #ifdef __FreeBSD__
379                 retcode = tsleep(&never, PZERO|PCATCH, "gamp1", 1);
380                 if (retcode)
381                         return retcode;
382 #endif /* __FreeBSD__ */
383         }
384         if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
385                 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
386                                       DRM_KERNEL_CONTEXT)) {
387 #ifdef __linux__
388                         schedule();
389                         if (signal_pending(current)) {
390                                 clear_bit(0, &dev->interrupt_flag);
391                                 return DRM_OS_ERR(EINTR);
392                         }
393 #endif /* __linux__ */
394 #ifdef __FreeBSD__
395                         retcode = tsleep(&never, PZERO|PCATCH, "gamp2", 1);
396                         if (retcode)
397                                 return retcode;
398 #endif /* __FreeBSD__ */
399                 }
400                 ++must_free;
401         }
402
403         for (i = 0; i < d->send_count; i++) {
404                 idx = d->send_indices[i];
405                 if (idx < 0 || idx >= dma->buf_count) {
406                         DRM_ERROR("Index %d (of %d max)\n",
407                                   d->send_indices[i], dma->buf_count - 1);
408                         continue;
409                 }
410                 buf = dma->buflist[ idx ];
411                 if (buf->pid != DRM_OS_CURRENTPID) {
412                         DRM_ERROR("Process %d using buffer owned by %d\n",
413                                   DRM_OS_CURRENTPID, buf->pid);
414                         retcode = DRM_OS_ERR(EINVAL);
415                         goto cleanup;
416                 }
417                 if (buf->list != DRM_LIST_NONE) {
418                         DRM_ERROR("Process %d using %d's buffer on list %d\n",
419                                   DRM_OS_CURRENTPID, buf->pid, buf->list);
420                         retcode = DRM_OS_ERR(EINVAL);
421                         goto cleanup;
422                 }
423                                 /* This isn't a race condition on
424                                    buf->list, since our concern is the
425                                    buffer reclaim during the time the
426                                    process closes the /dev/drm? handle, so
427                                    it can't also be doing DMA. */
428                 buf->list         = DRM_LIST_PRIO;
429                 buf->used         = d->send_sizes[i];
430                 buf->context      = d->context;
431                 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
432                 address           = (unsigned long)buf->address;
433                 length            = buf->used;
434                 if (!length) {
435                         DRM_ERROR("0 length buffer\n");
436                 }
437                 if (buf->pending) {
438                         DRM_ERROR("Sending pending buffer:"
439                                   " buffer %d, offset %d\n",
440                                   d->send_indices[i], i);
441                         retcode = DRM_OS_ERR(EINVAL);
442                         goto cleanup;
443                 }
444                 if (buf->waiting) {
445                         DRM_ERROR("Sending waiting buffer:"
446                                   " buffer %d, offset %d\n",
447                                   d->send_indices[i], i);
448                         retcode = DRM_OS_ERR(EINVAL);
449                         goto cleanup;
450                 }
451                 buf->pending = 1;
452
453                 if (dev->last_context != buf->context
454                     && !(dev->queuelist[buf->context]->flags
455                          & _DRM_CONTEXT_PRESERVED)) {
456 #ifdef __linux__
457                         add_wait_queue(&dev->context_wait, &entry);
458                         current->state = TASK_INTERRUPTIBLE;
459 #endif /* __linux__ */
460                         /* PRE: dev->last_context != buf->context */
461                         DRM(context_switch)(dev, dev->last_context,
462                                             buf->context);
463                                 /* POST: we will wait for the context
464                                    switch and will dispatch on a later call
465                                    when dev->last_context == buf->context.
466                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
467                                    TIME! */
468 #ifdef __linux__
469                         schedule();
470                         current->state = TASK_RUNNING;
471                         remove_wait_queue(&dev->context_wait, &entry);
472                         if (signal_pending(current)) {
473                                 retcode = DRM_OS_ERR(EINTR);
474                                 goto cleanup;
475                         }
476 #endif /* __linux__ */
477 #ifdef __FreeBSD__
478                         retcode = tsleep(&dev->context_wait,  PZERO|PCATCH,
479                                        "gamctx", 0);
480                         if (retcode)
481                                 goto cleanup;
482 #endif /* __FreeBSD__ */
483                         if (dev->last_context != buf->context) {
484                                 DRM_ERROR("Context mismatch: %d %d\n",
485                                           dev->last_context,
486                                           buf->context);
487                         }
488                 }
489
490 #if DRM_DMA_HISTOGRAM
491                 buf->time_queued     = get_cycles();
492                 buf->time_dispatched = buf->time_queued;
493 #endif
494                 gamma_dma_dispatch(dev, address, length);
495                 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
496                 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
497
498                 if (last_buf) {
499                         gamma_free_buffer(dev, last_buf);
500                 }
501                 last_buf = buf;
502         }
503
504
505 cleanup:
506         if (last_buf) {
507                 gamma_dma_ready(dev);
508                 gamma_free_buffer(dev, last_buf);
509         }
510
511         if (must_free && !dev->context_flag) {
512                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
513                                   DRM_KERNEL_CONTEXT)) {
514                         DRM_ERROR("\n");
515                 }
516         }
517         clear_bit(0, &dev->interrupt_flag);
518         return retcode;
519 }
520
521 static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
522 {
523         drm_buf_t         *last_buf = NULL;
524         int               retcode   = 0;
525         drm_device_dma_t  *dma      = dev->dma;
526 #ifdef __linux__
527         DECLARE_WAITQUEUE(entry, current);
528 #endif /* __linux__ */
529
530         if (d->flags & _DRM_DMA_BLOCK) {
531                 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
532 #ifdef __linux__
533                 add_wait_queue(&last_buf->dma_wait, &entry);
534 #endif /* __linux__ */
535 #ifdef __FreeBSD__
536                 atomic_inc(&last_buf->dma_wait);
537 #endif /* __FreeBSD__ */
538         }
539
540         if ((retcode = gamma_dma_enqueue(dev, d))) {
541                 if (d->flags & _DRM_DMA_BLOCK)
542 #ifdef __linux__
543                         remove_wait_queue(&last_buf->dma_wait, &entry);
544 #endif /* __linux__ */
545 #ifdef __FreeBSD__
546                         atomic_dec(&last_buf->dma_wait);
547 #endif /* __FreeBSD__ */
548                 return retcode;
549         }
550
551         gamma_dma_schedule(dev, 0);
552
553         if (d->flags & _DRM_DMA_BLOCK) {
554                 DRM_DEBUG("%d waiting\n", DRM_OS_CURRENTPID);
555 #ifdef __linux__
556                 for (;;) {
557                         current->state = TASK_INTERRUPTIBLE;
558                         if (!last_buf->waiting && !last_buf->pending)
559                                 break; /* finished */
560                         schedule();
561                         if (signal_pending(current)) {
562                                 retcode = DRM_OS_ERR(EINTR); /* Can't restart */
563                                 break;
564                         }
565                 }
566                 current->state = TASK_RUNNING;
567                 remove_wait_queue(&last_buf->dma_wait, &entry);
568 #endif /* __linux__ */
569 #ifdef __FreeBSD__
570                 for (;;) {
571                         retcode = tsleep(&last_buf->dma_wait, PZERO|PCATCH,
572                                          "gamdw", 0);
573                         if (!last_buf->waiting
574                             && !last_buf->pending)
575                                 break; /* finished */
576                         if (retcode)
577                                 break;
578                 }
579                 atomic_dec(&last_buf->dma_wait);
580 #endif /* __FreeBSD__ */
581                 DRM_DEBUG("%d running\n", DRM_OS_CURRENTPID);
582                 if (!retcode
583                     || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
584 #ifdef __linux__
585                         if (!waitqueue_active(&last_buf->dma_wait)) {
586 #endif /* __linux__ */
587 #ifdef __FreeBSD__
588                         if (!last_buf->dma_wait) {
589 #endif /* __FreeBSD__ */
590                                 gamma_free_buffer(dev, last_buf);
591                         }
592                 }
593                 if (retcode) {
594                         DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
595                                   d->context,
596                                   last_buf->waiting,
597                                   last_buf->pending,
598                                   DRM_WAITCOUNT(dev, d->context),
599                                   last_buf->idx,
600                                   last_buf->list,
601                                   last_buf->pid,
602                                   DRM_OS_CURRENTPID);
603                 }
604         }
605         return retcode;
606 }
607
608 int gamma_dma( DRM_OS_IOCTL )
609 {
610         DRM_OS_DEVICE;
611         drm_device_dma_t  *dma      = dev->dma;
612         int               retcode   = 0;
613         drm_dma_t         d;
614
615         DRM_OS_KRNFROMUSR(d, (drm_dma_t *) data, sizeof(d));
616
617         if (d.send_count < 0 || d.send_count > dma->buf_count) {
618                 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
619                           DRM_OS_CURRENTPID, d.send_count, dma->buf_count);
620                 return DRM_OS_ERR(EINVAL);
621         }
622
623         if (d.request_count < 0 || d.request_count > dma->buf_count) {
624                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
625                           DRM_OS_CURRENTPID, d.request_count, dma->buf_count);
626                 return DRM_OS_ERR(EINVAL);
627         }
628
629         if (d.send_count) {
630                 if (d.flags & _DRM_DMA_PRIORITY)
631                         retcode = gamma_dma_priority(dev, &d);
632                 else
633                         retcode = gamma_dma_send_buffers(dev, &d);
634         }
635
636         d.granted_count = 0;
637
638         if (!retcode && d.request_count) {
639                 retcode = gamma_dma_get_buffers(dev, &d);
640         }
641
642         DRM_DEBUG("%d returning, granted = %d\n",
643                   DRM_OS_CURRENTPID, d.granted_count);
644         DRM_OS_KRNTOUSR((drm_dma_t *) data, d, sizeof(d));
645
646         return retcode;
647 }