]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/drm/mach64_dma.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / drm / mach64_dma.c
1 /* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
2 /**
3  * \file mach64_dma.c
4  * DMA support for mach64 (Rage Pro) driver
5  *
6  * \author Gareth Hughes <gareth@valinux.com>
7  * \author Frank C. Earl <fearl@airmail.net>
8  * \author Leif Delgass <ldelgass@retinalburn.net>
9  * \author José Fonseca <j_r_fonseca@yahoo.co.uk>
10  */
11
12 /*-
13  * Copyright 2000 Gareth Hughes
14  * Copyright 2002 Frank C. Earl
15  * Copyright 2002-2003 Leif Delgass
16  * All Rights Reserved.
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a
19  * copy of this software and associated documentation files (the "Software"),
20  * to deal in the Software without restriction, including without limitation
21  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22  * and/or sell copies of the Software, and to permit persons to whom the
23  * Software is furnished to do so, subject to the following conditions:
24  *
25  * The above copyright notice and this permission notice (including the next
26  * paragraph) shall be included in all copies or substantial portions of the
27  * Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
32  * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
33  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
35  */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "dev/drm/drmP.h"
41 #include "dev/drm/drm.h"
42 #include "dev/drm/mach64_drm.h"
43 #include "dev/drm/mach64_drv.h"
44
45 /*******************************************************************/
46 /** \name Engine, FIFO control */
47 /*@{*/
48
49 /**
50  * Waits for free entries in the FIFO.
51  *
52  * \note Most writes to Mach64 registers are automatically routed through
53  * command FIFO which is 16 entry deep. Prior to writing to any draw engine
54  * register one has to ensure that enough FIFO entries are available by calling
55  * this function.  Failure to do so may cause the engine to lock.
56  *
57  * \param dev_priv pointer to device private data structure.
58  * \param entries number of free entries in the FIFO to wait for.
59  *
60  * \returns zero on success, or -EBUSY if the timeout (specificed by
61  * drm_mach64_private::usec_timeout) occurs.
62  */
63 int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries)
64 {
65         int slots = 0, i;
66
67         for (i = 0; i < dev_priv->usec_timeout; i++) {
68                 slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
69                 if (slots <= (0x8000 >> entries))
70                         return 0;
71                 DRM_UDELAY(1);
72         }
73
74         DRM_INFO("failed! slots=%d entries=%d\n", slots, entries);
75         return -EBUSY;
76 }
77
78 /**
79  * Wait for the draw engine to be idle.
80  */
81 int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv)
82 {
83         int i, ret;
84
85         ret = mach64_do_wait_for_fifo(dev_priv, 16);
86         if (ret < 0)
87                 return ret;
88
89         for (i = 0; i < dev_priv->usec_timeout; i++) {
90                 if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE))
91                         return 0;
92                 DRM_UDELAY(1);
93         }
94
95         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
96         mach64_dump_ring_info(dev_priv);
97         return -EBUSY;
98 }
99
100 /**
101  * Wait for free entries in the ring buffer.
102  *
103  * The Mach64 bus master can be configured to act as a virtual FIFO, using a
104  * circular buffer (commonly referred as "ring buffer" in other drivers) with
105  * pointers to engine commands. This allows the CPU to do other things while
106  * the graphics engine is busy, i.e., DMA mode.
107  *
108  * This function should be called before writing new entries to the ring
109  * buffer.
110  *
111  * \param dev_priv pointer to device private data structure.
112  * \param n number of free entries in the ring buffer to wait for.
113  *
114  * \returns zero on success, or -EBUSY if the timeout (specificed by
115  * drm_mach64_private_t::usec_timeout) occurs.
116  *
117  * \sa mach64_dump_ring_info()
118  */
119 int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n)
120 {
121         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
122         int i;
123
124         for (i = 0; i < dev_priv->usec_timeout; i++) {
125                 mach64_update_ring_snapshot(dev_priv);
126                 if (ring->space >= n) {
127                         if (i > 0)
128                                 DRM_DEBUG("%d usecs\n", i);
129                         return 0;
130                 }
131                 DRM_UDELAY(1);
132         }
133
134         /* FIXME: This is being ignored... */
135         DRM_ERROR("failed!\n");
136         mach64_dump_ring_info(dev_priv);
137         return -EBUSY;
138 }
139
140 /**
141  * Wait until all DMA requests have been processed...
142  *
143  * \sa mach64_wait_ring()
144  */
145 static int mach64_ring_idle(drm_mach64_private_t *dev_priv)
146 {
147         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
148         u32 head;
149         int i;
150
151         head = ring->head;
152         i = 0;
153         while (i < dev_priv->usec_timeout) {
154                 mach64_update_ring_snapshot(dev_priv);
155                 if (ring->head == ring->tail &&
156                     !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
157                         if (i > 0)
158                                 DRM_DEBUG("%d usecs\n", i);
159                         return 0;
160                 }
161                 if (ring->head == head) {
162                         ++i;
163                 } else {
164                         head = ring->head;
165                         i = 0;
166                 }
167                 DRM_UDELAY(1);
168         }
169
170         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
171         mach64_dump_ring_info(dev_priv);
172         return -EBUSY;
173 }
174
175 /**
176  * Reset the ring buffer descriptors.
177  *
178  * \sa mach64_do_engine_reset()
179  */
180 static void mach64_ring_reset(drm_mach64_private_t *dev_priv)
181 {
182         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
183
184         mach64_do_release_used_buffers(dev_priv);
185         ring->head_addr = ring->start_addr;
186         ring->head = ring->tail = 0;
187         ring->space = ring->size;
188
189         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
190                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
191
192         dev_priv->ring_running = 0;
193 }
194
195 /**
196  * Ensure the all the queued commands will be processed.
197  */
198 int mach64_do_dma_flush(drm_mach64_private_t *dev_priv)
199 {
200         /* FIXME: It's not necessary to wait for idle when flushing
201          * we just need to ensure the ring will be completely processed
202          * in finite time without another ioctl
203          */
204         return mach64_ring_idle(dev_priv);
205 }
206
207 /**
208  * Stop all DMA activity.
209  */
210 int mach64_do_dma_idle(drm_mach64_private_t *dev_priv)
211 {
212         int ret;
213
214         /* wait for completion */
215         if ((ret = mach64_ring_idle(dev_priv)) < 0) {
216                 DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n",
217                           MACH64_READ(MACH64_BM_GUI_TABLE),
218                           dev_priv->ring.tail);
219                 return ret;
220         }
221
222         mach64_ring_stop(dev_priv);
223
224         /* clean up after pass */
225         mach64_do_release_used_buffers(dev_priv);
226         return 0;
227 }
228
229 /**
230  * Reset the engine.  This will stop the DMA if it is running.
231  */
232 int mach64_do_engine_reset(drm_mach64_private_t *dev_priv)
233 {
234         u32 tmp;
235
236         DRM_DEBUG("\n");
237
238         /* Kill off any outstanding DMA transfers.
239          */
240         tmp = MACH64_READ(MACH64_BUS_CNTL);
241         MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
242
243         /* Reset the GUI engine (high to low transition).
244          */
245         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
246         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
247         /* Enable the GUI engine
248          */
249         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
250         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
251
252         /* ensure engine is not locked up by clearing any FIFO or HOST errors
253          */
254         tmp = MACH64_READ(MACH64_BUS_CNTL);
255         MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
256
257         /* Once GUI engine is restored, disable bus mastering */
258         MACH64_WRITE(MACH64_SRC_CNTL, 0);
259
260         /* Reset descriptor ring */
261         mach64_ring_reset(dev_priv);
262
263         return 0;
264 }
265
266 /*@}*/
267
268
269 /*******************************************************************/
270 /** \name Debugging output */
271 /*@{*/
272
273 /**
274  * Dump engine registers values.
275  */
276 void mach64_dump_engine_info(drm_mach64_private_t *dev_priv)
277 {
278         DRM_INFO("\n");
279         if (!dev_priv->is_pci) {
280                 DRM_INFO("           AGP_BASE = 0x%08x\n",
281                          MACH64_READ(MACH64_AGP_BASE));
282                 DRM_INFO("           AGP_CNTL = 0x%08x\n",
283                          MACH64_READ(MACH64_AGP_CNTL));
284         }
285         DRM_INFO("     ALPHA_TST_CNTL = 0x%08x\n",
286                  MACH64_READ(MACH64_ALPHA_TST_CNTL));
287         DRM_INFO("\n");
288         DRM_INFO("         BM_COMMAND = 0x%08x\n",
289                  MACH64_READ(MACH64_BM_COMMAND));
290         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
291                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
292         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
293                  MACH64_READ(MACH64_BM_GUI_TABLE));
294         DRM_INFO("          BM_STATUS = 0x%08x\n",
295                  MACH64_READ(MACH64_BM_STATUS));
296         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
297                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
298         DRM_INFO("    BM_SYSTEM_TABLE = 0x%08x\n",
299                  MACH64_READ(MACH64_BM_SYSTEM_TABLE));
300         DRM_INFO("           BUS_CNTL = 0x%08x\n",
301                  MACH64_READ(MACH64_BUS_CNTL));
302         DRM_INFO("\n");
303         /* DRM_INFO( "         CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
304         DRM_INFO("        CLR_CMP_CLR = 0x%08x\n",
305                  MACH64_READ(MACH64_CLR_CMP_CLR));
306         DRM_INFO("       CLR_CMP_CNTL = 0x%08x\n",
307                  MACH64_READ(MACH64_CLR_CMP_CNTL));
308         /* DRM_INFO( "        CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
309         DRM_INFO("     CONFIG_CHIP_ID = 0x%08x\n",
310                  MACH64_READ(MACH64_CONFIG_CHIP_ID));
311         DRM_INFO("        CONFIG_CNTL = 0x%08x\n",
312                  MACH64_READ(MACH64_CONFIG_CNTL));
313         DRM_INFO("       CONFIG_STAT0 = 0x%08x\n",
314                  MACH64_READ(MACH64_CONFIG_STAT0));
315         DRM_INFO("       CONFIG_STAT1 = 0x%08x\n",
316                  MACH64_READ(MACH64_CONFIG_STAT1));
317         DRM_INFO("       CONFIG_STAT2 = 0x%08x\n",
318                  MACH64_READ(MACH64_CONFIG_STAT2));
319         DRM_INFO("            CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
320         DRM_INFO("  CUSTOM_MACRO_CNTL = 0x%08x\n",
321                  MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
322         DRM_INFO("\n");
323         /* DRM_INFO( "           DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
324         /* DRM_INFO( "           DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
325         DRM_INFO("        DP_BKGD_CLR = 0x%08x\n",
326                  MACH64_READ(MACH64_DP_BKGD_CLR));
327         DRM_INFO("        DP_FRGD_CLR = 0x%08x\n",
328                  MACH64_READ(MACH64_DP_FRGD_CLR));
329         DRM_INFO("             DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
330         DRM_INFO("       DP_PIX_WIDTH = 0x%08x\n",
331                  MACH64_READ(MACH64_DP_PIX_WIDTH));
332         DRM_INFO("             DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
333         DRM_INFO("      DP_WRITE_MASK = 0x%08x\n",
334                  MACH64_READ(MACH64_DP_WRITE_MASK));
335         DRM_INFO("         DSP_CONFIG = 0x%08x\n",
336                  MACH64_READ(MACH64_DSP_CONFIG));
337         DRM_INFO("         DSP_ON_OFF = 0x%08x\n",
338                  MACH64_READ(MACH64_DSP_ON_OFF));
339         DRM_INFO("           DST_CNTL = 0x%08x\n",
340                  MACH64_READ(MACH64_DST_CNTL));
341         DRM_INFO("      DST_OFF_PITCH = 0x%08x\n",
342                  MACH64_READ(MACH64_DST_OFF_PITCH));
343         DRM_INFO("\n");
344         /* DRM_INFO( "       EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
345         DRM_INFO("       EXT_MEM_CNTL = 0x%08x\n",
346                  MACH64_READ(MACH64_EXT_MEM_CNTL));
347         DRM_INFO("\n");
348         DRM_INFO("          FIFO_STAT = 0x%08x\n",
349                  MACH64_READ(MACH64_FIFO_STAT));
350         DRM_INFO("\n");
351         DRM_INFO("      GEN_TEST_CNTL = 0x%08x\n",
352                  MACH64_READ(MACH64_GEN_TEST_CNTL));
353         /* DRM_INFO( "              GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
354         DRM_INFO("   GUI_CMDFIFO_DATA = 0x%08x\n",
355                  MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
356         DRM_INFO("  GUI_CMDFIFO_DEBUG = 0x%08x\n",
357                  MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
358         DRM_INFO("           GUI_CNTL = 0x%08x\n",
359                  MACH64_READ(MACH64_GUI_CNTL));
360         DRM_INFO("           GUI_STAT = 0x%08x\n",
361                  MACH64_READ(MACH64_GUI_STAT));
362         DRM_INFO("      GUI_TRAJ_CNTL = 0x%08x\n",
363                  MACH64_READ(MACH64_GUI_TRAJ_CNTL));
364         DRM_INFO("\n");
365         DRM_INFO("          HOST_CNTL = 0x%08x\n",
366                  MACH64_READ(MACH64_HOST_CNTL));
367         DRM_INFO("           HW_DEBUG = 0x%08x\n",
368                  MACH64_READ(MACH64_HW_DEBUG));
369         DRM_INFO("\n");
370         DRM_INFO("    MEM_ADDR_CONFIG = 0x%08x\n",
371                  MACH64_READ(MACH64_MEM_ADDR_CONFIG));
372         DRM_INFO("       MEM_BUF_CNTL = 0x%08x\n",
373                  MACH64_READ(MACH64_MEM_BUF_CNTL));
374         DRM_INFO("\n");
375         DRM_INFO("           PAT_REG0 = 0x%08x\n",
376                  MACH64_READ(MACH64_PAT_REG0));
377         DRM_INFO("           PAT_REG1 = 0x%08x\n",
378                  MACH64_READ(MACH64_PAT_REG1));
379         DRM_INFO("\n");
380         DRM_INFO("            SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
381         DRM_INFO("           SC_RIGHT = 0x%08x\n",
382                  MACH64_READ(MACH64_SC_RIGHT));
383         DRM_INFO("             SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
384         DRM_INFO("          SC_BOTTOM = 0x%08x\n",
385                  MACH64_READ(MACH64_SC_BOTTOM));
386         DRM_INFO("\n");
387         DRM_INFO("      SCALE_3D_CNTL = 0x%08x\n",
388                  MACH64_READ(MACH64_SCALE_3D_CNTL));
389         DRM_INFO("       SCRATCH_REG0 = 0x%08x\n",
390                  MACH64_READ(MACH64_SCRATCH_REG0));
391         DRM_INFO("       SCRATCH_REG1 = 0x%08x\n",
392                  MACH64_READ(MACH64_SCRATCH_REG1));
393         DRM_INFO("         SETUP_CNTL = 0x%08x\n",
394                  MACH64_READ(MACH64_SETUP_CNTL));
395         DRM_INFO("           SRC_CNTL = 0x%08x\n",
396                  MACH64_READ(MACH64_SRC_CNTL));
397         DRM_INFO("\n");
398         DRM_INFO("           TEX_CNTL = 0x%08x\n",
399                  MACH64_READ(MACH64_TEX_CNTL));
400         DRM_INFO("     TEX_SIZE_PITCH = 0x%08x\n",
401                  MACH64_READ(MACH64_TEX_SIZE_PITCH));
402         DRM_INFO("       TIMER_CONFIG = 0x%08x\n",
403                  MACH64_READ(MACH64_TIMER_CONFIG));
404         DRM_INFO("\n");
405         DRM_INFO("             Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
406         DRM_INFO("        Z_OFF_PITCH = 0x%08x\n",
407                  MACH64_READ(MACH64_Z_OFF_PITCH));
408         DRM_INFO("\n");
409 }
410
411 #define MACH64_DUMP_CONTEXT     3
412
413 /**
414  * Used by mach64_dump_ring_info() to dump the contents of the current buffer
415  * pointed by the ring head.
416  */
417 static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv,
418                                  struct drm_buf *buf)
419 {
420         u32 addr = GETBUFADDR(buf);
421         u32 used = buf->used >> 2;
422         u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
423         u32 *p = GETBUFPTR(buf);
424         int skipped = 0;
425
426         DRM_INFO("buffer contents:\n");
427
428         while (used) {
429                 u32 reg, count;
430
431                 reg = le32_to_cpu(*p++);
432                 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
433                     (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
434                      addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
435                     addr >=
436                     GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
437                         DRM_INFO("%08x:  0x%08x\n", addr, reg);
438                 }
439                 addr += 4;
440                 used--;
441
442                 count = (reg >> 16) + 1;
443                 reg = reg & 0xffff;
444                 reg = MMSELECT(reg);
445                 while (count && used) {
446                         if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
447                             (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
448                              addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
449                             addr >=
450                             GETBUFADDR(buf) + buf->used -
451                             MACH64_DUMP_CONTEXT * 4) {
452                                 DRM_INFO("%08x:    0x%04x = 0x%08x\n", addr,
453                                          reg, le32_to_cpu(*p));
454                                 skipped = 0;
455                         } else {
456                                 if (!skipped) {
457                                         DRM_INFO("  ...\n");
458                                         skipped = 1;
459                                 }
460                         }
461                         p++;
462                         addr += 4;
463                         used--;
464
465                         reg += 4;
466                         count--;
467                 }
468         }
469
470         DRM_INFO("\n");
471 }
472
473 /**
474  * Dump the ring state and contents, including the contents of the buffer being
475  * processed by the graphics engine.
476  */
477 void mach64_dump_ring_info(drm_mach64_private_t *dev_priv)
478 {
479         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
480         int i, skipped;
481
482         DRM_INFO("\n");
483
484         DRM_INFO("ring contents:\n");
485         DRM_INFO("  head_addr: 0x%08x head: %u tail: %u\n\n",
486                  ring->head_addr, ring->head, ring->tail);
487
488         skipped = 0;
489         for (i = 0; i < ring->size / sizeof(u32); i += 4) {
490                 if (i <= MACH64_DUMP_CONTEXT * 4 ||
491                     i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
492                     (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
493                      i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
494                     (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
495                      i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
496                         DRM_INFO("  0x%08x:  0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
497                                  (u32)(ring->start_addr + i * sizeof(u32)),
498                                  le32_to_cpu(((u32 *) ring->start)[i + 0]),
499                                  le32_to_cpu(((u32 *) ring->start)[i + 1]),
500                                  le32_to_cpu(((u32 *) ring->start)[i + 2]),
501                                  le32_to_cpu(((u32 *) ring->start)[i + 3]),
502                                  i == ring->head ? " (head)" : "",
503                                  i == ring->tail ? " (tail)" : "");
504                         skipped = 0;
505                 } else {
506                         if (!skipped) {
507                                 DRM_INFO("  ...\n");
508                                 skipped = 1;
509                         }
510                 }
511         }
512
513         DRM_INFO("\n");
514
515         if (ring->head < ring->size / sizeof(u32)) {
516                 struct list_head *ptr;
517                 u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
518
519                 list_for_each(ptr, &dev_priv->pending) {
520                         drm_mach64_freelist_t *entry =
521                             list_entry(ptr, drm_mach64_freelist_t, list);
522                         struct drm_buf *buf = entry->buf;
523
524                         u32 buf_addr = GETBUFADDR(buf);
525
526                         if (buf_addr <= addr && addr < buf_addr + buf->used)
527                                 mach64_dump_buf_info(dev_priv, buf);
528                 }
529         }
530
531         DRM_INFO("\n");
532         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
533                  MACH64_READ(MACH64_BM_GUI_TABLE));
534         DRM_INFO("\n");
535         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
536                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
537         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
538                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
539         DRM_INFO("         BM_COMMAND = 0x%08x\n",
540                  MACH64_READ(MACH64_BM_COMMAND));
541         DRM_INFO("\n");
542         DRM_INFO("          BM_STATUS = 0x%08x\n",
543                  MACH64_READ(MACH64_BM_STATUS));
544         DRM_INFO("           BUS_CNTL = 0x%08x\n",
545                  MACH64_READ(MACH64_BUS_CNTL));
546         DRM_INFO("          FIFO_STAT = 0x%08x\n",
547                  MACH64_READ(MACH64_FIFO_STAT));
548         DRM_INFO("           GUI_STAT = 0x%08x\n",
549                  MACH64_READ(MACH64_GUI_STAT));
550         DRM_INFO("           SRC_CNTL = 0x%08x\n",
551                  MACH64_READ(MACH64_SRC_CNTL));
552 }
553
554 /*@}*/
555
556
557 /*******************************************************************/
558 /** \name DMA descriptor ring macros */
559 /*@{*/
560
561 /**
562  * Add the end mark to the ring's new tail position.
563  *
564  * The bus master engine will keep processing the DMA buffers listed in the ring
565  * until it finds this mark, making it stop.
566  *
567  * \sa mach64_clear_dma_eol
568  */ 
569 static __inline__ void mach64_set_dma_eol(volatile u32 *addr)
570 {
571 #if defined(__i386__)
572         int nr = 31;
573
574         /* Taken from include/asm-i386/bitops.h linux header */
575         __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
576                              :"Ir"(nr));
577 #elif defined(__powerpc__)
578         u32 old;
579         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
580
581         /* Taken from the include/asm-ppc/bitops.h linux header */
582         __asm__ __volatile__("\n\
583 1:      lwarx   %0,0,%3 \n\
584         or      %0,%0,%2 \n\
585         stwcx.  %0,0,%3 \n\
586         bne-    1b":"=&r"(old), "=m"(*addr)
587                              :"r"(mask), "r"(addr), "m"(*addr)
588                              :"cc");
589 #elif defined(__alpha__)
590         u32 temp;
591         u32 mask = MACH64_DMA_EOL;
592
593         /* Taken from the include/asm-alpha/bitops.h linux header */
594         __asm__ __volatile__("1:        ldl_l %0,%3\n"
595                              "  bis %0,%2,%0\n"
596                              "  stl_c %0,%1\n"
597                              "  beq %0,2f\n"
598                              ".subsection 2\n"
599                              "2:        br 1b\n"
600                              ".previous":"=&r"(temp), "=m"(*addr)
601                              :"Ir"(mask), "m"(*addr));
602 #else
603         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
604
605         *addr |= mask;
606 #endif
607 }
608
609 /**
610  * Remove the end mark from the ring's old tail position.
611  *
612  * It should be called after calling mach64_set_dma_eol to mark the ring's new
613  * tail position.
614  *
615  * We update the end marks while the bus master engine is in operation. Since
616  * the bus master engine may potentially be reading from the same position
617  * that we write, we must change atomically to avoid having intermediary bad
618  * data.
619  */
620 static __inline__ void mach64_clear_dma_eol(volatile u32 *addr)
621 {
622 #if defined(__i386__)
623         int nr = 31;
624
625         /* Taken from include/asm-i386/bitops.h linux header */
626         __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
627                              :"Ir"(nr));
628 #elif defined(__powerpc__)
629         u32 old;
630         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
631
632         /* Taken from the include/asm-ppc/bitops.h linux header */
633         __asm__ __volatile__("\n\
634 1:      lwarx   %0,0,%3 \n\
635         andc    %0,%0,%2 \n\
636         stwcx.  %0,0,%3 \n\
637         bne-    1b":"=&r"(old), "=m"(*addr)
638                              :"r"(mask), "r"(addr), "m"(*addr)
639                              :"cc");
640 #elif defined(__alpha__)
641         u32 temp;
642         u32 mask = ~MACH64_DMA_EOL;
643
644         /* Taken from the include/asm-alpha/bitops.h linux header */
645         __asm__ __volatile__("1:        ldl_l %0,%3\n"
646                              "  and %0,%2,%0\n"
647                              "  stl_c %0,%1\n"
648                              "  beq %0,2f\n"
649                              ".subsection 2\n"
650                              "2:        br 1b\n"
651                              ".previous":"=&r"(temp), "=m"(*addr)
652                              :"Ir"(mask), "m"(*addr));
653 #else
654         u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
655
656         *addr &= mask;
657 #endif
658 }
659
660 #define RING_LOCALS                                                     \
661         int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
662
663 #define RING_WRITE_OFS  _ring_write
664
665 #define BEGIN_RING(n)                                                   \
666         do {                                                            \
667                 if (MACH64_VERBOSE) {                                   \
668                         DRM_INFO( "BEGIN_RING( %d ) \n",                \
669                                   (n) );                                \
670                 }                                                       \
671                 if (dev_priv->ring.space <= (n) * sizeof(u32)) {        \
672                         int ret;                                        \
673                         if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
674                                 DRM_ERROR( "wait_ring failed, resetting engine\n"); \
675                                 mach64_dump_engine_info( dev_priv );    \
676                                 mach64_do_engine_reset( dev_priv );     \
677                                 return ret;                             \
678                         }                                               \
679                 }                                                       \
680                 dev_priv->ring.space -= (n) * sizeof(u32);              \
681                 _ring = (u32 *) dev_priv->ring.start;                   \
682                 _ring_tail = _ring_write = dev_priv->ring.tail;         \
683                 _ring_mask = dev_priv->ring.tail_mask;                  \
684         } while (0)
685
686 #define OUT_RING( x )                                           \
687 do {                                                            \
688         if (MACH64_VERBOSE) {                                   \
689                 DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",    \
690                            (unsigned int)(x), _ring_write );    \
691         }                                                       \
692         _ring[_ring_write++] = cpu_to_le32( x );                \
693         _ring_write &= _ring_mask;                              \
694 } while (0)
695
696 #define ADVANCE_RING()                                                  \
697 do {                                                                    \
698         if (MACH64_VERBOSE) {                                           \
699                 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
700                           _ring_write, _ring_tail );                    \
701         }                                                               \
702         DRM_MEMORYBARRIER();                                            \
703         mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );  \
704         DRM_MEMORYBARRIER();                                            \
705         dev_priv->ring.tail = _ring_write;                              \
706         mach64_ring_tick( dev_priv, &(dev_priv)->ring );                \
707 } while (0)
708
709 /**
710  * Queue a DMA buffer of registers writes into the ring buffer.
711  */ 
712 int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
713                            drm_mach64_freelist_t *entry)
714 {
715         int bytes, pages, remainder;
716         u32 address, page;
717         int i;
718         struct drm_buf *buf = entry->buf;
719         RING_LOCALS;
720
721         bytes = buf->used;
722         address = GETBUFADDR( buf );
723         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
724
725         BEGIN_RING( pages * 4 );
726
727         for ( i = 0 ; i < pages-1 ; i++ ) {
728                 page = address + i * MACH64_DMA_CHUNKSIZE;
729                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
730                 OUT_RING( page );
731                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
732                 OUT_RING( 0 );
733         }
734
735         /* generate the final descriptor for any remaining commands in this buffer */
736         page = address + i * MACH64_DMA_CHUNKSIZE;
737         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
738
739         /* Save dword offset of last descriptor for this buffer.
740          * This is needed to check for completion of the buffer in freelist_get
741          */
742         entry->ring_ofs = RING_WRITE_OFS;
743
744         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
745         OUT_RING( page );
746         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
747         OUT_RING( 0 );
748
749         ADVANCE_RING();
750         
751         return 0;
752 }
753
754 /**
755  * Queue DMA buffer controlling host data tranfers (e.g., blit).
756  * 
757  * Almost identical to mach64_add_buf_to_ring.
758  */
759 int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
760                                     drm_mach64_freelist_t *entry)
761 {
762         int bytes, pages, remainder;
763         u32 address, page;
764         int i;
765         struct drm_buf *buf = entry->buf;
766         RING_LOCALS;
767         
768         bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET;
769         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
770         address = GETBUFADDR( buf );
771         
772         BEGIN_RING( 4 + pages * 4 );
773         
774         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
775         OUT_RING( address );
776         OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );
777         OUT_RING( 0 );
778         address += MACH64_HOSTDATA_BLIT_OFFSET;
779         
780         for ( i = 0 ; i < pages-1 ; i++ ) {
781                 page = address + i * MACH64_DMA_CHUNKSIZE;
782                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
783                 OUT_RING( page );
784                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
785                 OUT_RING( 0 );
786         }
787         
788         /* generate the final descriptor for any remaining commands in this buffer */
789         page = address + i * MACH64_DMA_CHUNKSIZE;
790         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
791         
792         /* Save dword offset of last descriptor for this buffer.
793          * This is needed to check for completion of the buffer in freelist_get
794          */
795         entry->ring_ofs = RING_WRITE_OFS;
796         
797         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
798         OUT_RING( page );
799         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
800         OUT_RING( 0 );
801         
802         ADVANCE_RING();
803         
804         return 0;
805 }
806
807 /*@}*/
808
809
810 /*******************************************************************/
811 /** \name DMA test and initialization */
812 /*@{*/
813
814 /**
815  * Perform a simple DMA operation using the pattern registers to test whether
816  * DMA works.
817  *
818  * \return zero if successful.
819  *
820  * \note This function was the testbed for many experiences regarding Mach64
821  * DMA operation. It is left here since it so tricky to get DMA operating
822  * properly in some architectures and hardware.
823  */
824 static int mach64_bm_dma_test(struct drm_device * dev)
825 {
826         drm_mach64_private_t *dev_priv = dev->dev_private;
827         drm_dma_handle_t *cpu_addr_dmah;
828         u32 data_addr;
829         u32 *table, *data;
830         u32 expected[2];
831         u32 src_cntl, pat_reg0, pat_reg1;
832         int i, count, failed;
833
834         DRM_DEBUG("\n");
835
836         table = (u32 *) dev_priv->ring.start;
837
838         /* FIXME: get a dma buffer from the freelist here */
839         DRM_DEBUG("Allocating data memory ...\n");
840 #ifdef __FreeBSD__
841         DRM_UNLOCK();
842 #endif
843         cpu_addr_dmah =
844             drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
845 #ifdef __FreeBSD__
846         DRM_LOCK();
847 #endif
848         if (!cpu_addr_dmah) {
849                 DRM_INFO("data-memory allocation failed!\n");
850                 return -ENOMEM;
851         } else {
852                 data = (u32 *) cpu_addr_dmah->vaddr;
853                 data_addr = (u32) cpu_addr_dmah->busaddr;
854         }
855
856         /* Save the X server's value for SRC_CNTL and restore it
857          * in case our test fails.  This prevents the X server
858          * from disabling it's cache for this register
859          */
860         src_cntl = MACH64_READ(MACH64_SRC_CNTL);
861         pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
862         pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
863
864         mach64_do_wait_for_fifo(dev_priv, 3);
865
866         MACH64_WRITE(MACH64_SRC_CNTL, 0);
867         MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
868         MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
869
870         mach64_do_wait_for_idle(dev_priv);
871
872         for (i = 0; i < 2; i++) {
873                 u32 reg;
874                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
875                 DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
876                 if (reg != 0x11111111) {
877                         DRM_INFO("Error initializing test registers\n");
878                         DRM_INFO("resetting engine ...\n");
879                         mach64_do_engine_reset(dev_priv);
880                         DRM_INFO("freeing data buffer memory.\n");
881                         drm_pci_free(dev, cpu_addr_dmah);
882                         return -EIO;
883                 }
884         }
885
886         /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
887         count = 0;
888
889         data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
890         data[count++] = expected[0] = 0x22222222;
891         data[count++] = expected[1] = 0xaaaaaaaa;
892
893         while (count < 1020) {
894                 data[count++] =
895                     cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
896                 data[count++] = 0x22222222;
897                 data[count++] = 0xaaaaaaaa;
898         }
899         data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
900         data[count++] = 0;
901
902         DRM_DEBUG("Preparing table ...\n");
903         table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
904                                                          MACH64_APERTURE_OFFSET);
905         table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
906         table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
907                                                 | MACH64_DMA_HOLD_OFFSET
908                                                 | MACH64_DMA_EOL);
909         table[MACH64_DMA_RESERVED] = 0;
910
911         DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
912         DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
913         DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
914         DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
915
916         for (i = 0; i < 6; i++) {
917                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
918         }
919         DRM_DEBUG(" ...\n");
920         for (i = count - 5; i < count; i++) {
921                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
922         }
923
924         DRM_MEMORYBARRIER();
925
926         DRM_DEBUG("waiting for idle...\n");
927         if ((i = mach64_do_wait_for_idle(dev_priv))) {
928                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
929                 DRM_INFO("resetting engine ...\n");
930                 mach64_do_engine_reset(dev_priv);
931                 mach64_do_wait_for_fifo(dev_priv, 3);
932                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
933                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
934                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
935                 DRM_INFO("freeing data buffer memory.\n");
936                 drm_pci_free(dev, cpu_addr_dmah);
937                 return i;
938         }
939         DRM_DEBUG("waiting for idle...done\n");
940
941         DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
942         DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
943         DRM_DEBUG("\n");
944         DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
945         DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
946
947         DRM_DEBUG("starting DMA transfer...\n");
948         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
949                      dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
950
951         MACH64_WRITE(MACH64_SRC_CNTL,
952                      MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
953                      MACH64_SRC_BM_OP_SYSTEM_TO_REG);
954
955         /* Kick off the transfer */
956         DRM_DEBUG("starting DMA transfer... done.\n");
957         MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
958
959         DRM_DEBUG("waiting for idle...\n");
960
961         if ((i = mach64_do_wait_for_idle(dev_priv))) {
962                 /* engine locked up, dump register state and reset */
963                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
964                 mach64_dump_engine_info(dev_priv);
965                 DRM_INFO("resetting engine ...\n");
966                 mach64_do_engine_reset(dev_priv);
967                 mach64_do_wait_for_fifo(dev_priv, 3);
968                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
969                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
970                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
971                 DRM_INFO("freeing data buffer memory.\n");
972                 drm_pci_free(dev, cpu_addr_dmah);
973                 return i;
974         }
975
976         DRM_DEBUG("waiting for idle...done\n");
977
978         /* restore SRC_CNTL */
979         mach64_do_wait_for_fifo(dev_priv, 1);
980         MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
981
982         failed = 0;
983
984         /* Check register values to see if the GUI master operation succeeded */
985         for (i = 0; i < 2; i++) {
986                 u32 reg;
987                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
988                 DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
989                 if (reg != expected[i]) {
990                         failed = -1;
991                 }
992         }
993
994         /* restore pattern registers */
995         mach64_do_wait_for_fifo(dev_priv, 2);
996         MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
997         MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
998
999         DRM_DEBUG("freeing data buffer memory.\n");
1000         drm_pci_free(dev, cpu_addr_dmah);
1001         DRM_DEBUG("returning ...\n");
1002
1003         return failed;
1004 }
1005
1006 /**
1007  * Called during the DMA initialization ioctl to initialize all the necessary
1008  * software and hardware state for DMA operation.
1009  */
1010 static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
1011 {
1012         drm_mach64_private_t *dev_priv;
1013         u32 tmp;
1014         int i, ret;
1015
1016         DRM_DEBUG("\n");
1017
1018         dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
1019         if (dev_priv == NULL)
1020                 return -ENOMEM;
1021
1022         memset(dev_priv, 0, sizeof(drm_mach64_private_t));
1023
1024         dev_priv->is_pci = init->is_pci;
1025
1026         dev_priv->fb_bpp = init->fb_bpp;
1027         dev_priv->front_offset = init->front_offset;
1028         dev_priv->front_pitch = init->front_pitch;
1029         dev_priv->back_offset = init->back_offset;
1030         dev_priv->back_pitch = init->back_pitch;
1031
1032         dev_priv->depth_bpp = init->depth_bpp;
1033         dev_priv->depth_offset = init->depth_offset;
1034         dev_priv->depth_pitch = init->depth_pitch;
1035
1036         dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
1037                                         (dev_priv->front_offset >> 3));
1038         dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
1039                                        (dev_priv->back_offset >> 3));
1040         dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
1041                                         (dev_priv->depth_offset >> 3));
1042
1043         dev_priv->usec_timeout = 1000000;
1044
1045         /* Set up the freelist, placeholder list and pending list */
1046         INIT_LIST_HEAD(&dev_priv->free_list);
1047         INIT_LIST_HEAD(&dev_priv->placeholders);
1048         INIT_LIST_HEAD(&dev_priv->pending);
1049
1050         dev_priv->sarea = drm_getsarea(dev);
1051         if (!dev_priv->sarea) {
1052                 DRM_ERROR("can not find sarea!\n");
1053                 dev->dev_private = (void *)dev_priv;
1054                 mach64_do_cleanup_dma(dev);
1055                 return -EINVAL;
1056         }
1057         dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
1058         if (!dev_priv->fb) {
1059                 DRM_ERROR("can not find frame buffer map!\n");
1060                 dev->dev_private = (void *)dev_priv;
1061                 mach64_do_cleanup_dma(dev);
1062                 return -EINVAL;
1063         }
1064         dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
1065         if (!dev_priv->mmio) {
1066                 DRM_ERROR("can not find mmio map!\n");
1067                 dev->dev_private = (void *)dev_priv;
1068                 mach64_do_cleanup_dma(dev);
1069                 return -EINVAL;
1070         }
1071
1072         dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
1073         if (!dev_priv->ring_map) {
1074                 DRM_ERROR("can not find ring map!\n");
1075                 dev->dev_private = (void *)dev_priv;
1076                 mach64_do_cleanup_dma(dev);
1077                 return -EINVAL;
1078         }
1079
1080         dev_priv->sarea_priv = (drm_mach64_sarea_t *)
1081             ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
1082
1083         if (!dev_priv->is_pci) {
1084                 drm_core_ioremap(dev_priv->ring_map, dev);
1085                 if (!dev_priv->ring_map->virtual) {
1086                         DRM_ERROR("can not ioremap virtual address for"
1087                                   " descriptor ring\n");
1088                         dev->dev_private = (void *)dev_priv;
1089                         mach64_do_cleanup_dma(dev);
1090                         return -ENOMEM;
1091                 }
1092                 dev->agp_buffer_token = init->buffers_offset;
1093                 dev->agp_buffer_map =
1094                     drm_core_findmap(dev, init->buffers_offset);
1095                 if (!dev->agp_buffer_map) {
1096                         DRM_ERROR("can not find dma buffer map!\n");
1097                         dev->dev_private = (void *)dev_priv;
1098                         mach64_do_cleanup_dma(dev);
1099                         return -EINVAL;
1100                 }
1101                 /* there might be a nicer way to do this -
1102                    dev isn't passed all the way though the mach64 - DA */
1103                 dev_priv->dev_buffers = dev->agp_buffer_map;
1104
1105                 drm_core_ioremap(dev->agp_buffer_map, dev);
1106                 if (!dev->agp_buffer_map->virtual) {
1107                         DRM_ERROR("can not ioremap virtual address for"
1108                                   " dma buffer\n");
1109                         dev->dev_private = (void *)dev_priv;
1110                         mach64_do_cleanup_dma(dev);
1111                         return -ENOMEM;
1112                 }
1113                 dev_priv->agp_textures =
1114                     drm_core_findmap(dev, init->agp_textures_offset);
1115                 if (!dev_priv->agp_textures) {
1116                         DRM_ERROR("can not find agp texture region!\n");
1117                         dev->dev_private = (void *)dev_priv;
1118                         mach64_do_cleanup_dma(dev);
1119                         return -EINVAL;
1120                 }
1121         }
1122
1123         dev->dev_private = (void *)dev_priv;
1124
1125         dev_priv->driver_mode = init->dma_mode;
1126
1127         /* changing the FIFO size from the default causes problems with DMA */
1128         tmp = MACH64_READ(MACH64_GUI_CNTL);
1129         if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
1130                 DRM_INFO("Setting FIFO size to 128 entries\n");
1131                 /* FIFO must be empty to change the FIFO depth */
1132                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1133                         DRM_ERROR
1134                             ("wait for idle failed before changing FIFO depth!\n");
1135                         mach64_do_cleanup_dma(dev);
1136                         return ret;
1137                 }
1138                 MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
1139                                                | MACH64_CMDFIFO_SIZE_128));
1140                 /* need to read GUI_STAT for proper sync according to docs */
1141                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1142                         DRM_ERROR
1143                             ("wait for idle failed when changing FIFO depth!\n");
1144                         mach64_do_cleanup_dma(dev);
1145                         return ret;
1146                 }
1147         }
1148
1149         dev_priv->ring.size = 0x4000;   /* 16KB */
1150         dev_priv->ring.start = dev_priv->ring_map->virtual;
1151         dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
1152
1153         memset(dev_priv->ring.start, 0, dev_priv->ring.size);
1154         DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
1155                  dev_priv->ring.start, dev_priv->ring.start_addr);
1156
1157         ret = 0;
1158         if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
1159
1160                 /* enable block 1 registers and bus mastering */
1161                 MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
1162                                                 | MACH64_BUS_EXT_REG_EN)
1163                                                & ~MACH64_BUS_MASTER_DIS));
1164
1165                 /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
1166                 DRM_DEBUG("Starting DMA test...\n");
1167                 if ((ret = mach64_bm_dma_test(dev))) {
1168                         dev_priv->driver_mode = MACH64_MODE_MMIO;
1169                 }
1170         }
1171
1172         switch (dev_priv->driver_mode) {
1173         case MACH64_MODE_MMIO:
1174                 MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
1175                                                | MACH64_BUS_EXT_REG_EN
1176                                                | MACH64_BUS_MASTER_DIS));
1177                 if (init->dma_mode == MACH64_MODE_MMIO)
1178                         DRM_INFO("Forcing pseudo-DMA mode\n");
1179                 else
1180                         DRM_INFO
1181                             ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
1182                              ret);
1183                 break;
1184         case MACH64_MODE_DMA_SYNC:
1185                 DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
1186                 break;
1187         case MACH64_MODE_DMA_ASYNC:
1188         default:
1189                 DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
1190         }
1191
1192         dev_priv->ring_running = 0;
1193
1194         /* setup offsets for physical address of table start and end */
1195         dev_priv->ring.head_addr = dev_priv->ring.start_addr;
1196         dev_priv->ring.head = dev_priv->ring.tail = 0;
1197         dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1198         dev_priv->ring.space = dev_priv->ring.size;
1199
1200         /* setup physical address and size of descriptor table */
1201         mach64_do_wait_for_fifo(dev_priv, 1);
1202         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1203                      (dev_priv->ring.
1204                       head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
1205
1206         /* init frame counter */
1207         dev_priv->sarea_priv->frames_queued = 0;
1208         for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
1209                 dev_priv->frame_ofs[i] = ~0;    /* All ones indicates placeholder */
1210         }
1211
1212         /* Allocate the DMA buffer freelist */
1213         if ((ret = mach64_init_freelist(dev))) {
1214                 DRM_ERROR("Freelist allocation failed\n");
1215                 mach64_do_cleanup_dma(dev);
1216                 return ret;
1217         }
1218
1219         return 0;
1220 }
1221
1222 /*******************************************************************/
1223 /** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
1224  */
1225
1226 int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv)
1227 {
1228         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1229         volatile u32 *ring_read;
1230         struct list_head *ptr;
1231         drm_mach64_freelist_t *entry;
1232         struct drm_buf *buf = NULL;
1233         u32 *buf_ptr;
1234         u32 used, reg, target;
1235         int fifo, count, found, ret, no_idle_wait;
1236
1237         fifo = count = reg = no_idle_wait = 0;
1238         target = MACH64_BM_ADDR;
1239
1240         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1241                 DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n");
1242                 mach64_dump_engine_info(dev_priv);
1243                 mach64_do_engine_reset(dev_priv);
1244                 return ret;
1245         }
1246
1247         ring_read = (u32 *) ring->start;
1248
1249         while (ring->tail != ring->head) {
1250                 u32 buf_addr, new_target, offset;
1251                 u32 bytes, remaining, head, eol;
1252
1253                 head = ring->head;
1254
1255                 new_target =
1256                     le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
1257                 buf_addr = le32_to_cpu(ring_read[head++]);
1258                 eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
1259                 bytes = le32_to_cpu(ring_read[head++])
1260                     & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
1261                 head++;
1262                 head &= ring->tail_mask;
1263
1264                 /* can't wait for idle between a blit setup descriptor
1265                  * and a HOSTDATA descriptor or the engine will lock
1266                  */
1267                 if (new_target == MACH64_BM_HOSTDATA
1268                     && target == MACH64_BM_ADDR)
1269                         no_idle_wait = 1;
1270
1271                 target = new_target;
1272
1273                 found = 0;
1274                 offset = 0;
1275                 list_for_each(ptr, &dev_priv->pending) {
1276                         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1277                         buf = entry->buf;
1278                         offset = buf_addr - GETBUFADDR(buf);
1279                         if (offset < MACH64_BUFFER_SIZE) {
1280                                 found = 1;
1281                                 break;
1282                         }
1283                 }
1284
1285                 if (!found || buf == NULL) {
1286                         DRM_ERROR
1287                             ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
1288                              head, ring->tail, buf_addr, (eol ? "eol" : ""));
1289                         mach64_dump_ring_info(dev_priv);
1290                         mach64_do_engine_reset(dev_priv);
1291                         return -EINVAL;
1292                 }
1293
1294                 /* Hand feed the buffer to the card via MMIO, waiting for the fifo
1295                  * every 16 writes
1296                  */
1297                 DRM_DEBUG("target: (0x%08x) %s\n", target,
1298                           (target ==
1299                            MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
1300                 DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
1301                           buf->used);
1302
1303                 remaining = (buf->used - offset) >> 2;  /* dwords remaining in buffer */
1304                 used = bytes >> 2;      /* dwords in buffer for this descriptor */
1305                 buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
1306
1307                 while (used) {
1308
1309                         if (count == 0) {
1310                                 if (target == MACH64_BM_HOSTDATA) {
1311                                         reg = DMAREG(MACH64_HOST_DATA0);
1312                                         count =
1313                                             (remaining > 16) ? 16 : remaining;
1314                                         fifo = 0;
1315                                 } else {
1316                                         reg = le32_to_cpu(*buf_ptr++);
1317                                         used--;
1318                                         count = (reg >> 16) + 1;
1319                                 }
1320
1321                                 reg = reg & 0xffff;
1322                                 reg = MMSELECT(reg);
1323                         }
1324                         while (count && used) {
1325                                 if (!fifo) {
1326                                         if (no_idle_wait) {
1327                                                 if ((ret =
1328                                                      mach64_do_wait_for_fifo
1329                                                      (dev_priv, 16)) < 0) {
1330                                                         no_idle_wait = 0;
1331                                                         return ret;
1332                                                 }
1333                                         } else {
1334                                                 if ((ret =
1335                                                      mach64_do_wait_for_idle
1336                                                      (dev_priv)) < 0) {
1337                                                         return ret;
1338                                                 }
1339                                         }
1340                                         fifo = 16;
1341                                 }
1342                                 --fifo;
1343                                 MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
1344                                 used--;
1345                                 remaining--;
1346
1347                                 reg += 4;
1348                                 count--;
1349                         }
1350                 }
1351                 ring->head = head;
1352                 ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
1353                 ring->space += (4 * sizeof(u32));
1354         }
1355
1356         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1357                 return ret;
1358         }
1359         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1360                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
1361
1362         DRM_DEBUG("completed\n");
1363         return 0;
1364 }
1365
1366 /*@}*/
1367
1368
1369 /*******************************************************************/
1370 /** \name DMA cleanup */
1371 /*@{*/
1372
1373 int mach64_do_cleanup_dma(struct drm_device * dev)
1374 {
1375         DRM_DEBUG("\n");
1376
1377         /* Make sure interrupts are disabled here because the uninstall ioctl
1378          * may not have been called from userspace and after dev_private
1379          * is freed, it's too late.
1380          */
1381         if (dev->irq)
1382                 drm_irq_uninstall(dev);
1383
1384         if (dev->dev_private) {
1385                 drm_mach64_private_t *dev_priv = dev->dev_private;
1386
1387                 if (!dev_priv->is_pci) {
1388                         if (dev_priv->ring_map)
1389                                 drm_core_ioremapfree(dev_priv->ring_map, dev);
1390
1391                         if (dev->agp_buffer_map) {
1392                                 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1393                                 dev->agp_buffer_map = NULL;
1394                         }
1395                 }
1396
1397                 mach64_destroy_freelist(dev);
1398
1399                 drm_free(dev_priv, sizeof(drm_mach64_private_t),
1400                          DRM_MEM_DRIVER);
1401                 dev->dev_private = NULL;
1402         }
1403
1404         return 0;
1405 }
1406
1407 /*@}*/
1408
1409
1410 /*******************************************************************/
1411 /** \name IOCTL handlers */
1412 /*@{*/
1413
1414 int mach64_dma_init(struct drm_device *dev, void *data,
1415                     struct drm_file *file_priv)
1416 {
1417         drm_mach64_init_t *init = data;
1418
1419         DRM_DEBUG("\n");
1420
1421         LOCK_TEST_WITH_RETURN(dev, file_priv);
1422
1423         switch (init->func) {
1424         case DRM_MACH64_INIT_DMA:
1425                 return mach64_do_dma_init(dev, init);
1426         case DRM_MACH64_CLEANUP_DMA:
1427                 return mach64_do_cleanup_dma(dev);
1428         }
1429
1430         return -EINVAL;
1431 }
1432
1433 int mach64_dma_idle(struct drm_device *dev, void *data,
1434                     struct drm_file *file_priv)
1435 {
1436         drm_mach64_private_t *dev_priv = dev->dev_private;
1437
1438         DRM_DEBUG("\n");
1439
1440         LOCK_TEST_WITH_RETURN(dev, file_priv);
1441
1442         return mach64_do_dma_idle(dev_priv);
1443 }
1444
1445 int mach64_dma_flush(struct drm_device *dev, void *data,
1446                      struct drm_file *file_priv)
1447 {
1448         drm_mach64_private_t *dev_priv = dev->dev_private;
1449
1450         DRM_DEBUG("\n");
1451
1452         LOCK_TEST_WITH_RETURN(dev, file_priv);
1453
1454         return mach64_do_dma_flush(dev_priv);
1455 }
1456
1457 int mach64_engine_reset(struct drm_device *dev, void *data,
1458                         struct drm_file *file_priv)
1459 {
1460         drm_mach64_private_t *dev_priv = dev->dev_private;
1461
1462         DRM_DEBUG("\n");
1463
1464         LOCK_TEST_WITH_RETURN(dev, file_priv);
1465
1466         return mach64_do_engine_reset(dev_priv);
1467 }
1468
1469 /*@}*/
1470
1471
1472 /*******************************************************************/
1473 /** \name Freelist management */
1474 /*@{*/
1475
1476 int mach64_init_freelist(struct drm_device * dev)
1477 {
1478         struct drm_device_dma *dma = dev->dma;
1479         drm_mach64_private_t *dev_priv = dev->dev_private;
1480         drm_mach64_freelist_t *entry;
1481         struct list_head *ptr;
1482         int i;
1483
1484         DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count);
1485
1486         for (i = 0; i < dma->buf_count; i++) {
1487                 if ((entry =
1488                      (drm_mach64_freelist_t *)
1489                      drm_alloc(sizeof(drm_mach64_freelist_t),
1490                                DRM_MEM_BUFLISTS)) == NULL)
1491                         return -ENOMEM;
1492                 memset(entry, 0, sizeof(drm_mach64_freelist_t));
1493                 entry->buf = dma->buflist[i];
1494                 ptr = &entry->list;
1495                 list_add_tail(ptr, &dev_priv->free_list);
1496         }
1497
1498         return 0;
1499 }
1500
1501 void mach64_destroy_freelist(struct drm_device * dev)
1502 {
1503         drm_mach64_private_t *dev_priv = dev->dev_private;
1504         drm_mach64_freelist_t *entry;
1505         struct list_head *ptr;
1506         struct list_head *tmp;
1507
1508         DRM_DEBUG("\n");
1509
1510         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1511                 list_del(ptr);
1512                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1513                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1514         }
1515         list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
1516                 list_del(ptr);
1517                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1518                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1519         }
1520
1521         list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
1522                 list_del(ptr);
1523                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1524                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1525         }
1526 }
1527
1528 /* IMPORTANT: This function should only be called when the engine is idle or locked up,
1529  * as it assumes all buffers in the pending list have been completed by the hardware.
1530  */
1531 int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv)
1532 {
1533         struct list_head *ptr;
1534         struct list_head *tmp;
1535         drm_mach64_freelist_t *entry;
1536         int i;
1537
1538         if (list_empty(&dev_priv->pending))
1539                 return 0;
1540
1541         /* Iterate the pending list and move all buffers into the freelist... */
1542         i = 0;
1543         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1544                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1545                 if (entry->discard) {
1546                         entry->buf->pending = 0;
1547                         list_del(ptr);
1548                         list_add_tail(ptr, &dev_priv->free_list);
1549                         i++;
1550                 }
1551         }
1552
1553         DRM_DEBUG("released %d buffers from pending list\n", i);
1554
1555         return 0;
1556 }
1557
1558 static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv)
1559 {
1560         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1561         struct list_head *ptr;
1562         struct list_head *tmp;
1563         drm_mach64_freelist_t *entry;
1564         u32 head, tail, ofs;
1565
1566         mach64_ring_tick(dev_priv, ring);
1567         head = ring->head;
1568         tail = ring->tail;
1569
1570         if (head == tail) {
1571 #if MACH64_EXTRA_CHECKING
1572                 if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) {
1573                         DRM_ERROR("Empty ring with non-idle engine!\n");
1574                         mach64_dump_ring_info(dev_priv);
1575                         return -1;
1576                 }
1577 #endif
1578                 /* last pass is complete, so release everything */
1579                 mach64_do_release_used_buffers(dev_priv);
1580                 DRM_DEBUG("idle engine, freed all buffers.\n");
1581                 if (list_empty(&dev_priv->free_list)) {
1582                         DRM_ERROR("Freelist empty with idle engine\n");
1583                         return -1;
1584                 }
1585                 return 0;
1586         }
1587         /* Look for a completed buffer and bail out of the loop
1588          * as soon as we find one -- don't waste time trying
1589          * to free extra bufs here, leave that to do_release_used_buffers
1590          */
1591         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1592                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1593                 ofs = entry->ring_ofs;
1594                 if (entry->discard &&
1595                     ((head < tail && (ofs < head || ofs >= tail)) ||
1596                      (head > tail && (ofs < head && ofs >= tail)))) {
1597 #if MACH64_EXTRA_CHECKING
1598                         int i;
1599
1600                         for (i = head; i != tail; i = (i + 4) & ring->tail_mask)
1601                         {
1602                                 u32 o1 = le32_to_cpu(((u32 *) ring->
1603                                                  start)[i + 1]);
1604                                 u32 o2 = GETBUFADDR(entry->buf);
1605
1606                                 if (o1 == o2) {
1607                                         DRM_ERROR
1608                                             ("Attempting to free used buffer: "
1609                                              "i=%d  buf=0x%08x\n",
1610                                              i, o1);
1611                                         mach64_dump_ring_info(dev_priv);
1612                                         return -1;
1613                                 }
1614                         }
1615 #endif
1616                         /* found a processed buffer */
1617                         entry->buf->pending = 0;
1618                         list_del(ptr);
1619                         list_add_tail(ptr, &dev_priv->free_list);
1620                         DRM_DEBUG
1621                             ("freed processed buffer (head=%d tail=%d "
1622                              "buf ring ofs=%d).\n",
1623                              head, tail, ofs);
1624                         return 0;
1625                 }
1626         }
1627
1628         return 1;
1629 }
1630
1631 struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv)
1632 {
1633         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1634         drm_mach64_freelist_t *entry;
1635         struct list_head *ptr;
1636         int t;
1637
1638         if (list_empty(&dev_priv->free_list)) {
1639                 if (list_empty(&dev_priv->pending)) {
1640                         DRM_ERROR
1641                             ("Couldn't get buffer - pending and free lists empty\n");
1642                         t = 0;
1643                         list_for_each(ptr, &dev_priv->placeholders) {
1644                                 t++;
1645                         }
1646                         DRM_INFO("Placeholders: %d\n", t);
1647                         return NULL;
1648                 }
1649
1650                 for (t = 0; t < dev_priv->usec_timeout; t++) {
1651                         int ret;
1652
1653                         ret = mach64_do_reclaim_completed(dev_priv);
1654                         if (ret == 0)
1655                                 goto _freelist_entry_found;
1656                         if (ret < 0)
1657                                 return NULL;
1658
1659                         DRM_UDELAY(1);
1660                 }
1661                 mach64_dump_ring_info(dev_priv);
1662                 DRM_ERROR
1663                     ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
1664                      ring->head_addr, ring->head, ring->tail);
1665                 return NULL;
1666         }
1667
1668       _freelist_entry_found:
1669         ptr = dev_priv->free_list.next;
1670         list_del(ptr);
1671         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1672         entry->buf->used = 0;
1673         list_add_tail(ptr, &dev_priv->placeholders);
1674         return entry->buf;
1675 }
1676
1677 int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf)
1678 {
1679         struct list_head *ptr;
1680         drm_mach64_freelist_t *entry;
1681
1682 #if MACH64_EXTRA_CHECKING
1683         list_for_each(ptr, &dev_priv->pending) {
1684                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1685                 if (copy_buf == entry->buf) {
1686                         DRM_ERROR("Trying to release a pending buf\n");
1687                         return -EFAULT;
1688                 }
1689         }
1690 #endif
1691         ptr = dev_priv->placeholders.next;
1692         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1693         copy_buf->pending = 0;
1694         copy_buf->used = 0;
1695         entry->buf = copy_buf;
1696         entry->discard = 1;
1697         list_del(ptr);
1698         list_add_tail(ptr, &dev_priv->free_list);
1699
1700         return 0;
1701 }
1702
1703 /*@}*/
1704
1705
1706 /*******************************************************************/
1707 /** \name DMA buffer request and submission IOCTL handler */
1708 /*@{*/
1709
1710 static int mach64_dma_get_buffers(struct drm_device *dev,
1711                                   struct drm_file *file_priv,
1712                                   struct drm_dma * d)
1713 {
1714         int i;
1715         struct drm_buf *buf;
1716         drm_mach64_private_t *dev_priv = dev->dev_private;
1717
1718         for (i = d->granted_count; i < d->request_count; i++) {
1719                 buf = mach64_freelist_get(dev_priv);
1720 #if MACH64_EXTRA_CHECKING
1721                 if (!buf)
1722                         return -EFAULT;
1723 #else
1724                 if (!buf)
1725                         return -EAGAIN;
1726 #endif
1727
1728                 buf->file_priv = file_priv;
1729
1730                 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1731                                      sizeof(buf->idx)))
1732                         return -EFAULT;
1733                 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1734                                      sizeof(buf->total)))
1735                         return -EFAULT;
1736
1737                 d->granted_count++;
1738         }
1739         return 0;
1740 }
1741
1742 int mach64_dma_buffers(struct drm_device *dev, void *data,
1743                        struct drm_file *file_priv)
1744 {
1745         struct drm_device_dma *dma = dev->dma;
1746         struct drm_dma *d = data;
1747         int ret = 0;
1748
1749         LOCK_TEST_WITH_RETURN(dev, file_priv);
1750
1751         /* Please don't send us buffers.
1752          */
1753         if (d->send_count != 0) {
1754                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1755                           DRM_CURRENTPID, d->send_count);
1756                 return -EINVAL;
1757         }
1758
1759         /* We'll send you buffers.
1760          */
1761         if (d->request_count < 0 || d->request_count > dma->buf_count) {
1762                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1763                           DRM_CURRENTPID, d->request_count, dma->buf_count);
1764                 ret = -EINVAL;
1765         }
1766
1767         d->granted_count = 0;
1768
1769         if (d->request_count) {
1770                 ret = mach64_dma_get_buffers(dev, file_priv, d);
1771         }
1772
1773         return ret;
1774 }
1775
1776 void mach64_driver_lastclose(struct drm_device * dev)
1777 {
1778         mach64_do_cleanup_dma(dev);
1779 }
1780
1781 /*@}*/