]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm/mach64_dma.c
Disable the drm_initmap calls in radeon_cp.c, due to them resulting in improper
[FreeBSD/FreeBSD.git] / sys / dev / drm / mach64_dma.c
1 /* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
2 /**
3  * \file mach64_dma.c
4  * DMA support for mach64 (Rage Pro) driver
5  *
6  * \author Gareth Hughes <gareth@valinux.com>
7  * \author Frank C. Earl <fearl@airmail.net>
8  * \author Leif Delgass <ldelgass@retinalburn.net>
9  * \author Jose Fonseca <j_r_fonseca@yahoo.co.uk>
10  */
11
12 /*
13  * Copyright 2000 Gareth Hughes
14  * Copyright 2002 Frank C. Earl
15  * Copyright 2002-2003 Leif Delgass
16  * All Rights Reserved.
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a
19  * copy of this software and associated documentation files (the "Software"),
20  * to deal in the Software without restriction, including without limitation
21  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22  * and/or sell copies of the Software, and to permit persons to whom the
23  * Software is furnished to do so, subject to the following conditions:
24  *
25  * The above copyright notice and this permission notice (including the next
26  * paragraph) shall be included in all copies or substantial portions of the
27  * Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
32  * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
33  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
35  *
36  * $FreeBSD$
37  */
38
39 #include "dev/drm/drmP.h"
40 #include "dev/drm/drm.h"
41 #include "dev/drm/mach64_drm.h"
42 #include "dev/drm/mach64_drv.h"
43
44 /*******************************************************************/
45 /** \name Engine, FIFO control */
46 /*@{*/
47
48 /**
49  * Waits for free entries in the FIFO.
50  *
51  * \note Most writes to Mach64 registers are automatically routed through
52  * command FIFO which is 16 entry deep. Prior to writing to any draw engine
53  * register one has to ensure that enough FIFO entries are available by calling
54  * this function.  Failure to do so may cause the engine to lock.
55  *
56  * \param dev_priv pointer to device private data structure.
57  * \param entries number of free entries in the FIFO to wait for.
58  * 
59  * \returns zero on success, or -EBUSY if the timeout (specificed by
60  * drm_mach64_private::usec_timeout) occurs.
61  */
62 int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries)
63 {
64         int slots = 0, i;
65
66         for (i = 0; i < dev_priv->usec_timeout; i++) {
67                 slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
68                 if (slots <= (0x8000 >> entries))
69                         return 0;
70                 DRM_UDELAY(1);
71         }
72
73         DRM_INFO("%s failed! slots=%d entries=%d\n", __FUNCTION__, slots,
74                  entries);
75         return DRM_ERR(EBUSY);
76 }
77
78 /**
79  * Wait for the draw engine to be idle.
80  */
81 int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv)
82 {
83         int i, ret;
84
85         ret = mach64_do_wait_for_fifo(dev_priv, 16);
86         if (ret < 0)
87                 return ret;
88
89         for (i = 0; i < dev_priv->usec_timeout; i++) {
90                 if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
91                         return 0;
92                 }
93                 DRM_UDELAY(1);
94         }
95
96         DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__,
97                  MACH64_READ(MACH64_GUI_STAT));
98         mach64_dump_ring_info(dev_priv);
99         return DRM_ERR(EBUSY);
100 }
101
102 /**
103  * Wait for free entries in the ring buffer.
104  *
105  * The Mach64 bus master can be configured to act as a virtual FIFO, using a
106  * circular buffer (commonly referred as "ring buffer" in other drivers) with
107  * pointers to engine commands. This allows the CPU to do other things while
108  * the graphics engine is busy, i.e., DMA mode.
109  *
110  * This function should be called before writing new entries to the ring
111  * buffer.
112  * 
113  * \param dev_priv pointer to device private data structure.
114  * \param n number of free entries in the ring buffer to wait for.
115  * 
116  * \returns zero on success, or -EBUSY if the timeout (specificed by
117  * drm_mach64_private_t::usec_timeout) occurs.
118  *
119  * \sa mach64_dump_ring_info()
120  */
121 int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n)
122 {
123         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
124         int i;
125
126         for (i = 0; i < dev_priv->usec_timeout; i++) {
127                 mach64_update_ring_snapshot(dev_priv);
128                 if (ring->space >= n) {
129                         if (i > 0) {
130                                 DRM_DEBUG("%s: %d usecs\n", __FUNCTION__, i);
131                         }
132                         return 0;
133                 }
134                 DRM_UDELAY(1);
135         }
136
137         /* FIXME: This is being ignored... */
138         DRM_ERROR("failed!\n");
139         mach64_dump_ring_info(dev_priv);
140         return DRM_ERR(EBUSY);
141 }
142
143 /**
144  * Wait until all DMA requests have been processed... 
145  *
146  * \sa mach64_wait_ring()
147  */
148 static int mach64_ring_idle(drm_mach64_private_t * dev_priv)
149 {
150         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
151         u32 head;
152         int i;
153
154         head = ring->head;
155         i = 0;
156         while (i < dev_priv->usec_timeout) {
157                 mach64_update_ring_snapshot(dev_priv);
158                 if (ring->head == ring->tail &&
159                     !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
160                         if (i > 0) {
161                                 DRM_DEBUG("%s: %d usecs\n", __FUNCTION__, i);
162                         }
163                         return 0;
164                 }
165                 if (ring->head == head) {
166                         ++i;
167                 } else {
168                         head = ring->head;
169                         i = 0;
170                 }
171                 DRM_UDELAY(1);
172         }
173
174         DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__,
175                  MACH64_READ(MACH64_GUI_STAT));
176         mach64_dump_ring_info(dev_priv);
177         return DRM_ERR(EBUSY);
178 }
179
180 /**
181  * Reset the the ring buffer descriptors.
182  *
183  * \sa mach64_do_engine_reset()
184  */
185 static void mach64_ring_reset(drm_mach64_private_t * dev_priv)
186 {
187         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
188
189         mach64_do_release_used_buffers(dev_priv);
190         ring->head_addr = ring->start_addr;
191         ring->head = ring->tail = 0;
192         ring->space = ring->size;
193
194         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
195                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
196
197         dev_priv->ring_running = 0;
198 }
199
200 /**
201  * Ensure the all the queued commands will be processed.
202  */
203 int mach64_do_dma_flush(drm_mach64_private_t * dev_priv)
204 {
205         /* FIXME: It's not necessary to wait for idle when flushing
206          * we just need to ensure the ring will be completely processed
207          * in finite time without another ioctl
208          */
209         return mach64_ring_idle(dev_priv);
210 }
211
212 /**
213  * Stop all DMA activity.
214  */
215 int mach64_do_dma_idle(drm_mach64_private_t * dev_priv)
216 {
217         int ret;
218
219         /* wait for completion */
220         if ((ret = mach64_ring_idle(dev_priv)) < 0) {
221                 DRM_ERROR("%s failed BM_GUI_TABLE=0x%08x tail: %u\n",
222                           __FUNCTION__, MACH64_READ(MACH64_BM_GUI_TABLE),
223                           dev_priv->ring.tail);
224                 return ret;
225         }
226
227         mach64_ring_stop(dev_priv);
228
229         /* clean up after pass */
230         mach64_do_release_used_buffers(dev_priv);
231         return 0;
232 }
233
234 /**
235  * Reset the engine.  This will stop the DMA if it is running.
236  */
237 int mach64_do_engine_reset(drm_mach64_private_t * dev_priv)
238 {
239         u32 tmp;
240
241         DRM_DEBUG("%s\n", __FUNCTION__);
242
243         /* Kill off any outstanding DMA transfers.
244          */
245         tmp = MACH64_READ(MACH64_BUS_CNTL);
246         MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
247
248         /* Reset the GUI engine (high to low transition).
249          */
250         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
251         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
252         /* Enable the GUI engine
253          */
254         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
255         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
256
257         /* ensure engine is not locked up by clearing any FIFO or HOST errors
258          */
259         tmp = MACH64_READ(MACH64_BUS_CNTL);
260         MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
261
262         /* Once GUI engine is restored, disable bus mastering */
263         MACH64_WRITE(MACH64_SRC_CNTL, 0);
264
265         /* Reset descriptor ring */
266         mach64_ring_reset(dev_priv);
267
268         return 0;
269 }
270
271 /*@}*/
272
273
274 /*******************************************************************/
275 /** \name Debugging output */
276 /*@{*/
277
278 /**
279  * Dump engine registers values.
280  */
281 void mach64_dump_engine_info(drm_mach64_private_t * dev_priv)
282 {
283         DRM_INFO("\n");
284         if (!dev_priv->is_pci) {
285                 DRM_INFO("           AGP_BASE = 0x%08x\n",
286                          MACH64_READ(MACH64_AGP_BASE));
287                 DRM_INFO("           AGP_CNTL = 0x%08x\n",
288                          MACH64_READ(MACH64_AGP_CNTL));
289         }
290         DRM_INFO("     ALPHA_TST_CNTL = 0x%08x\n",
291                  MACH64_READ(MACH64_ALPHA_TST_CNTL));
292         DRM_INFO("\n");
293         DRM_INFO("         BM_COMMAND = 0x%08x\n",
294                  MACH64_READ(MACH64_BM_COMMAND));
295         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
296                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
297         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
298                  MACH64_READ(MACH64_BM_GUI_TABLE));
299         DRM_INFO("          BM_STATUS = 0x%08x\n",
300                  MACH64_READ(MACH64_BM_STATUS));
301         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
302                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
303         DRM_INFO("    BM_SYSTEM_TABLE = 0x%08x\n",
304                  MACH64_READ(MACH64_BM_SYSTEM_TABLE));
305         DRM_INFO("           BUS_CNTL = 0x%08x\n",
306                  MACH64_READ(MACH64_BUS_CNTL));
307         DRM_INFO("\n");
308         /* DRM_INFO( "         CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
309         DRM_INFO("        CLR_CMP_CLR = 0x%08x\n",
310                  MACH64_READ(MACH64_CLR_CMP_CLR));
311         DRM_INFO("       CLR_CMP_CNTL = 0x%08x\n",
312                  MACH64_READ(MACH64_CLR_CMP_CNTL));
313         /* DRM_INFO( "        CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
314         DRM_INFO("     CONFIG_CHIP_ID = 0x%08x\n",
315                  MACH64_READ(MACH64_CONFIG_CHIP_ID));
316         DRM_INFO("        CONFIG_CNTL = 0x%08x\n",
317                  MACH64_READ(MACH64_CONFIG_CNTL));
318         DRM_INFO("       CONFIG_STAT0 = 0x%08x\n",
319                  MACH64_READ(MACH64_CONFIG_STAT0));
320         DRM_INFO("       CONFIG_STAT1 = 0x%08x\n",
321                  MACH64_READ(MACH64_CONFIG_STAT1));
322         DRM_INFO("       CONFIG_STAT2 = 0x%08x\n",
323                  MACH64_READ(MACH64_CONFIG_STAT2));
324         DRM_INFO("            CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
325         DRM_INFO("  CUSTOM_MACRO_CNTL = 0x%08x\n",
326                  MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
327         DRM_INFO("\n");
328         /* DRM_INFO( "           DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
329         /* DRM_INFO( "           DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
330         DRM_INFO("        DP_BKGD_CLR = 0x%08x\n",
331                  MACH64_READ(MACH64_DP_BKGD_CLR));
332         DRM_INFO("        DP_FRGD_CLR = 0x%08x\n",
333                  MACH64_READ(MACH64_DP_FRGD_CLR));
334         DRM_INFO("             DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
335         DRM_INFO("       DP_PIX_WIDTH = 0x%08x\n",
336                  MACH64_READ(MACH64_DP_PIX_WIDTH));
337         DRM_INFO("             DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
338         DRM_INFO("      DP_WRITE_MASK = 0x%08x\n",
339                  MACH64_READ(MACH64_DP_WRITE_MASK));
340         DRM_INFO("         DSP_CONFIG = 0x%08x\n",
341                  MACH64_READ(MACH64_DSP_CONFIG));
342         DRM_INFO("         DSP_ON_OFF = 0x%08x\n",
343                  MACH64_READ(MACH64_DSP_ON_OFF));
344         DRM_INFO("           DST_CNTL = 0x%08x\n",
345                  MACH64_READ(MACH64_DST_CNTL));
346         DRM_INFO("      DST_OFF_PITCH = 0x%08x\n",
347                  MACH64_READ(MACH64_DST_OFF_PITCH));
348         DRM_INFO("\n");
349         /* DRM_INFO( "       EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
350         DRM_INFO("       EXT_MEM_CNTL = 0x%08x\n",
351                  MACH64_READ(MACH64_EXT_MEM_CNTL));
352         DRM_INFO("\n");
353         DRM_INFO("          FIFO_STAT = 0x%08x\n",
354                  MACH64_READ(MACH64_FIFO_STAT));
355         DRM_INFO("\n");
356         DRM_INFO("      GEN_TEST_CNTL = 0x%08x\n",
357                  MACH64_READ(MACH64_GEN_TEST_CNTL));
358         /* DRM_INFO( "              GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
359         DRM_INFO("   GUI_CMDFIFO_DATA = 0x%08x\n",
360                  MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
361         DRM_INFO("  GUI_CMDFIFO_DEBUG = 0x%08x\n",
362                  MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
363         DRM_INFO("           GUI_CNTL = 0x%08x\n",
364                  MACH64_READ(MACH64_GUI_CNTL));
365         DRM_INFO("           GUI_STAT = 0x%08x\n",
366                  MACH64_READ(MACH64_GUI_STAT));
367         DRM_INFO("      GUI_TRAJ_CNTL = 0x%08x\n",
368                  MACH64_READ(MACH64_GUI_TRAJ_CNTL));
369         DRM_INFO("\n");
370         DRM_INFO("          HOST_CNTL = 0x%08x\n",
371                  MACH64_READ(MACH64_HOST_CNTL));
372         DRM_INFO("           HW_DEBUG = 0x%08x\n",
373                  MACH64_READ(MACH64_HW_DEBUG));
374         DRM_INFO("\n");
375         DRM_INFO("    MEM_ADDR_CONFIG = 0x%08x\n",
376                  MACH64_READ(MACH64_MEM_ADDR_CONFIG));
377         DRM_INFO("       MEM_BUF_CNTL = 0x%08x\n",
378                  MACH64_READ(MACH64_MEM_BUF_CNTL));
379         DRM_INFO("\n");
380         DRM_INFO("           PAT_REG0 = 0x%08x\n",
381                  MACH64_READ(MACH64_PAT_REG0));
382         DRM_INFO("           PAT_REG1 = 0x%08x\n",
383                  MACH64_READ(MACH64_PAT_REG1));
384         DRM_INFO("\n");
385         DRM_INFO("            SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
386         DRM_INFO("           SC_RIGHT = 0x%08x\n",
387                  MACH64_READ(MACH64_SC_RIGHT));
388         DRM_INFO("             SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
389         DRM_INFO("          SC_BOTTOM = 0x%08x\n",
390                  MACH64_READ(MACH64_SC_BOTTOM));
391         DRM_INFO("\n");
392         DRM_INFO("      SCALE_3D_CNTL = 0x%08x\n",
393                  MACH64_READ(MACH64_SCALE_3D_CNTL));
394         DRM_INFO("       SCRATCH_REG0 = 0x%08x\n",
395                  MACH64_READ(MACH64_SCRATCH_REG0));
396         DRM_INFO("       SCRATCH_REG1 = 0x%08x\n",
397                  MACH64_READ(MACH64_SCRATCH_REG1));
398         DRM_INFO("         SETUP_CNTL = 0x%08x\n",
399                  MACH64_READ(MACH64_SETUP_CNTL));
400         DRM_INFO("           SRC_CNTL = 0x%08x\n",
401                  MACH64_READ(MACH64_SRC_CNTL));
402         DRM_INFO("\n");
403         DRM_INFO("           TEX_CNTL = 0x%08x\n",
404                  MACH64_READ(MACH64_TEX_CNTL));
405         DRM_INFO("     TEX_SIZE_PITCH = 0x%08x\n",
406                  MACH64_READ(MACH64_TEX_SIZE_PITCH));
407         DRM_INFO("       TIMER_CONFIG = 0x%08x\n",
408                  MACH64_READ(MACH64_TIMER_CONFIG));
409         DRM_INFO("\n");
410         DRM_INFO("             Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
411         DRM_INFO("        Z_OFF_PITCH = 0x%08x\n",
412                  MACH64_READ(MACH64_Z_OFF_PITCH));
413         DRM_INFO("\n");
414 }
415
416 #define MACH64_DUMP_CONTEXT     3
417
418 /**
419  * Used by mach64_dump_ring_info() to dump the contents of the current buffer
420  * pointed by the ring head.
421  */
422 static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv,
423                                  drm_buf_t * buf)
424 {
425         u32 addr = GETBUFADDR(buf);
426         u32 used = buf->used >> 2;
427         u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
428         u32 *p = GETBUFPTR(buf);
429         int skipped = 0;
430
431         DRM_INFO("buffer contents:\n");
432
433         while (used) {
434                 u32 reg, count;
435
436                 reg = le32_to_cpu(*p++);
437                 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
438                     (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
439                      addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
440                     addr >=
441                     GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
442                         DRM_INFO("%08x:  0x%08x\n", addr, reg);
443                 }
444                 addr += 4;
445                 used--;
446
447                 count = (reg >> 16) + 1;
448                 reg = reg & 0xffff;
449                 reg = MMSELECT(reg);
450                 while (count && used) {
451                         if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
452                             (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
453                              addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
454                             addr >=
455                             GETBUFADDR(buf) + buf->used -
456                             MACH64_DUMP_CONTEXT * 4) {
457                                 DRM_INFO("%08x:    0x%04x = 0x%08x\n", addr,
458                                          reg, le32_to_cpu(*p));
459                                 skipped = 0;
460                         } else {
461                                 if (!skipped) {
462                                         DRM_INFO("  ...\n");
463                                         skipped = 1;
464                                 }
465                         }
466                         p++;
467                         addr += 4;
468                         used--;
469
470                         reg += 4;
471                         count--;
472                 }
473         }
474
475         DRM_INFO("\n");
476 }
477
478 /**
479  * Dump the ring state and contents, including the contents of the buffer being
480  * processed by the graphics engine.
481  */
482 void mach64_dump_ring_info(drm_mach64_private_t * dev_priv)
483 {
484         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
485         int i, skipped;
486
487         DRM_INFO("\n");
488
489         DRM_INFO("ring contents:\n");
490         DRM_INFO("  head_addr: 0x%08x head: %u tail: %u\n\n",
491                  ring->head_addr, ring->head, ring->tail);
492
493         skipped = 0;
494         for (i = 0; i < ring->size / sizeof(u32); i += 4) {
495                 if (i <= MACH64_DUMP_CONTEXT * 4 ||
496                     i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
497                     (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
498                      i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
499                     (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
500                      i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
501                         DRM_INFO("  0x%08x:  0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
502                                  (u32)(ring->start_addr + i * sizeof(u32)),
503                                  le32_to_cpu(((u32 *) ring->start)[i + 0]),
504                                  le32_to_cpu(((u32 *) ring->start)[i + 1]),
505                                  le32_to_cpu(((u32 *) ring->start)[i + 2]),
506                                  le32_to_cpu(((u32 *) ring->start)[i + 3]),
507                                  i == ring->head ? " (head)" : "",
508                                  i == ring->tail ? " (tail)" : "");
509                         skipped = 0;
510                 } else {
511                         if (!skipped) {
512                                 DRM_INFO("  ...\n");
513                                 skipped = 1;
514                         }
515                 }
516         }
517
518         DRM_INFO("\n");
519
520         if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) {
521                 struct list_head *ptr;
522                 u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
523
524                 list_for_each(ptr, &dev_priv->pending) {
525                         drm_mach64_freelist_t *entry =
526                             list_entry(ptr, drm_mach64_freelist_t, list);
527                         drm_buf_t *buf = entry->buf;
528
529                         u32 buf_addr = GETBUFADDR(buf);
530
531                         if (buf_addr <= addr && addr < buf_addr + buf->used) {
532                                 mach64_dump_buf_info(dev_priv, buf);
533                         }
534                 }
535         }
536
537         DRM_INFO("\n");
538         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
539                  MACH64_READ(MACH64_BM_GUI_TABLE));
540         DRM_INFO("\n");
541         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
542                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
543         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
544                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
545         DRM_INFO("         BM_COMMAND = 0x%08x\n",
546                  MACH64_READ(MACH64_BM_COMMAND));
547         DRM_INFO("\n");
548         DRM_INFO("          BM_STATUS = 0x%08x\n",
549                  MACH64_READ(MACH64_BM_STATUS));
550         DRM_INFO("           BUS_CNTL = 0x%08x\n",
551                  MACH64_READ(MACH64_BUS_CNTL));
552         DRM_INFO("          FIFO_STAT = 0x%08x\n",
553                  MACH64_READ(MACH64_FIFO_STAT));
554         DRM_INFO("           GUI_STAT = 0x%08x\n",
555                  MACH64_READ(MACH64_GUI_STAT));
556         DRM_INFO("           SRC_CNTL = 0x%08x\n",
557                  MACH64_READ(MACH64_SRC_CNTL));
558 }
559
560 /*@}*/
561
562
563 /*******************************************************************/
564 /** \name DMA test and initialization */
565 /*@{*/
566
567 /**
568  * Perform a simple DMA operation using the pattern registers to test whether
569  * DMA works.
570  *
571  * \return zero if successful.
572  *
573  * \note This function was the testbed for many experiences regarding Mach64
574  * DMA operation. It is left here since it so tricky to get DMA operating
575  * properly in some architectures and hardware.
576  */
577 static int mach64_bm_dma_test(drm_device_t * dev)
578 {
579         drm_mach64_private_t *dev_priv = dev->dev_private;
580         dma_addr_t data_handle;
581         void *cpu_addr_data;
582         u32 data_addr;
583         u32 *table, *data;
584         u32 expected[2];
585         u32 src_cntl, pat_reg0, pat_reg1;
586         int i, count, failed;
587
588         DRM_DEBUG("%s\n", __FUNCTION__);
589
590         table = (u32 *) dev_priv->ring.start;
591
592         /* FIXME: get a dma buffer from the freelist here */
593         DRM_DEBUG("Allocating data memory ...\n");
594         cpu_addr_data =
595             drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful, &data_handle);
596         if (!cpu_addr_data || !data_handle) {
597                 DRM_INFO("data-memory allocation failed!\n");
598                 return DRM_ERR(ENOMEM);
599         } else {
600                 data = (u32 *) cpu_addr_data;
601                 data_addr = (u32) data_handle;
602         }
603
604         /* Save the X server's value for SRC_CNTL and restore it
605          * in case our test fails.  This prevents the X server
606          * from disabling it's cache for this register
607          */
608         src_cntl = MACH64_READ(MACH64_SRC_CNTL);
609         pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
610         pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
611
612         mach64_do_wait_for_fifo(dev_priv, 3);
613
614         MACH64_WRITE(MACH64_SRC_CNTL, 0);
615         MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
616         MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
617
618         mach64_do_wait_for_idle(dev_priv);
619
620         for (i = 0; i < 2; i++) {
621                 u32 reg;
622                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
623                 DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
624                 if (reg != 0x11111111) {
625                         DRM_INFO("Error initializing test registers\n");
626                         DRM_INFO("resetting engine ...\n");
627                         mach64_do_engine_reset(dev_priv);
628                         DRM_INFO("freeing data buffer memory.\n");
629                         drm_pci_free(dev, 0x1000, cpu_addr_data, data_handle);
630                         return DRM_ERR(EIO);
631                 }
632         }
633
634         /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
635         count = 0;
636
637         data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
638         data[count++] = expected[0] = 0x22222222;
639         data[count++] = expected[1] = 0xaaaaaaaa;
640
641         while (count < 1020) {
642                 data[count++] =
643                     cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
644                 data[count++] = 0x22222222;
645                 data[count++] = 0xaaaaaaaa;
646         }
647         data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
648         data[count++] = 0;
649
650         DRM_DEBUG("Preparing table ...\n");
651         table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
652                                                          MACH64_APERTURE_OFFSET);
653         table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
654         table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
655                                                 | MACH64_DMA_HOLD_OFFSET
656                                                 | MACH64_DMA_EOL);
657         table[MACH64_DMA_RESERVED] = 0;
658
659         DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
660         DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
661         DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
662         DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
663
664         for (i = 0; i < 6; i++) {
665                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
666         }
667         DRM_DEBUG(" ...\n");
668         for (i = count - 5; i < count; i++) {
669                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
670         }
671
672         DRM_MEMORYBARRIER();
673
674         DRM_DEBUG("waiting for idle...\n");
675         if ((i = mach64_do_wait_for_idle(dev_priv))) {
676                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
677                 DRM_INFO("resetting engine ...\n");
678                 mach64_do_engine_reset(dev_priv);
679                 mach64_do_wait_for_fifo(dev_priv, 3);
680                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
681                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
682                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
683                 DRM_INFO("freeing data buffer memory.\n");
684                 drm_pci_free(dev, 0x1000, cpu_addr_data, data_handle);
685                 return i;
686         }
687         DRM_DEBUG("waiting for idle...done\n");
688
689         DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
690         DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
691         DRM_DEBUG("\n");
692         DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
693         DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
694
695         DRM_DEBUG("starting DMA transfer...\n");
696         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
697                      dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
698
699         MACH64_WRITE(MACH64_SRC_CNTL,
700                      MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
701                      MACH64_SRC_BM_OP_SYSTEM_TO_REG);
702
703         /* Kick off the transfer */
704         DRM_DEBUG("starting DMA transfer... done.\n");
705         MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
706
707         DRM_DEBUG("waiting for idle...\n");
708
709         if ((i = mach64_do_wait_for_idle(dev_priv))) {
710                 /* engine locked up, dump register state and reset */
711                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
712                 mach64_dump_engine_info(dev_priv);
713                 DRM_INFO("resetting engine ...\n");
714                 mach64_do_engine_reset(dev_priv);
715                 mach64_do_wait_for_fifo(dev_priv, 3);
716                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
717                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
718                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
719                 DRM_INFO("freeing data buffer memory.\n");
720                 drm_pci_free(dev, 0x1000, cpu_addr_data, data_handle);
721                 return i;
722         }
723
724         DRM_DEBUG("waiting for idle...done\n");
725
726         /* restore SRC_CNTL */
727         mach64_do_wait_for_fifo(dev_priv, 1);
728         MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
729
730         failed = 0;
731
732         /* Check register values to see if the GUI master operation succeeded */
733         for (i = 0; i < 2; i++) {
734                 u32 reg;
735                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
736                 DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
737                 if (reg != expected[i]) {
738                         failed = -1;
739                 }
740         }
741
742         /* restore pattern registers */
743         mach64_do_wait_for_fifo(dev_priv, 2);
744         MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
745         MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
746
747         DRM_DEBUG("freeing data buffer memory.\n");
748         drm_pci_free(dev, 0x1000, cpu_addr_data, data_handle);
749         DRM_DEBUG("returning ...\n");
750
751         return failed;
752 }
753
754 /**
755  * Called during the DMA initialization ioctl to initialize all the necessary
756  * software and hardware state for DMA operation.
757  */
758 static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
759 {
760         drm_mach64_private_t *dev_priv;
761         u32 tmp;
762         int i, ret;
763
764         DRM_DEBUG("%s\n", __FUNCTION__);
765
766         dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
767         if (dev_priv == NULL)
768                 return DRM_ERR(ENOMEM);
769
770         memset(dev_priv, 0, sizeof(drm_mach64_private_t));
771
772         dev_priv->is_pci = init->is_pci;
773
774         dev_priv->fb_bpp = init->fb_bpp;
775         dev_priv->front_offset = init->front_offset;
776         dev_priv->front_pitch = init->front_pitch;
777         dev_priv->back_offset = init->back_offset;
778         dev_priv->back_pitch = init->back_pitch;
779
780         dev_priv->depth_bpp = init->depth_bpp;
781         dev_priv->depth_offset = init->depth_offset;
782         dev_priv->depth_pitch = init->depth_pitch;
783
784         dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
785                                         (dev_priv->front_offset >> 3));
786         dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
787                                        (dev_priv->back_offset >> 3));
788         dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
789                                         (dev_priv->depth_offset >> 3));
790
791         dev_priv->usec_timeout = 1000000;
792
793         /* Set up the freelist, placeholder list and pending list */
794         INIT_LIST_HEAD(&dev_priv->free_list);
795         INIT_LIST_HEAD(&dev_priv->placeholders);
796         INIT_LIST_HEAD(&dev_priv->pending);
797
798         DRM_GETSAREA();
799
800         if (!dev_priv->sarea) {
801                 DRM_ERROR("can not find sarea!\n");
802                 dev->dev_private = (void *)dev_priv;
803                 mach64_do_cleanup_dma(dev);
804                 return DRM_ERR(EINVAL);
805         }
806         dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
807         if (!dev_priv->fb) {
808                 DRM_ERROR("can not find frame buffer map!\n");
809                 dev->dev_private = (void *)dev_priv;
810                 mach64_do_cleanup_dma(dev);
811                 return DRM_ERR(EINVAL);
812         }
813         dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
814         if (!dev_priv->mmio) {
815                 DRM_ERROR("can not find mmio map!\n");
816                 dev->dev_private = (void *)dev_priv;
817                 mach64_do_cleanup_dma(dev);
818                 return DRM_ERR(EINVAL);
819         }
820
821         dev_priv->sarea_priv = (drm_mach64_sarea_t *)
822             ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
823
824         if (!dev_priv->is_pci) {
825                 dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
826                 if (!dev_priv->ring_map) {
827                         DRM_ERROR("can not find ring map!\n");
828                         dev->dev_private = (void *)dev_priv;
829                         mach64_do_cleanup_dma(dev);
830                         return DRM_ERR(EINVAL);
831                 }
832                 drm_core_ioremap(dev_priv->ring_map, dev);
833                 if (!dev_priv->ring_map->handle) {
834                         DRM_ERROR("can not ioremap virtual address for"
835                                   " descriptor ring\n");
836                         dev->dev_private = (void *)dev_priv;
837                         mach64_do_cleanup_dma(dev);
838                         return DRM_ERR(ENOMEM);
839                 }
840                 dev->agp_buffer_map =
841                     drm_core_findmap(dev, init->buffers_offset);
842                 if (!dev->agp_buffer_map) {
843                         DRM_ERROR("can not find dma buffer map!\n");
844                         dev->dev_private = (void *)dev_priv;
845                         mach64_do_cleanup_dma(dev);
846                         return DRM_ERR(EINVAL);
847                 }
848                 /* there might be a nicer way to do this -
849                    dev isn't passed all the way though the mach64 - DA */
850                 dev_priv->dev_buffers = dev->agp_buffer_map;
851
852                 drm_core_ioremap(dev->agp_buffer_map, dev);
853                 if (!dev->agp_buffer_map->handle) {
854                         DRM_ERROR("can not ioremap virtual address for"
855                                   " dma buffer\n");
856                         dev->dev_private = (void *)dev_priv;
857                         mach64_do_cleanup_dma(dev);
858                         return DRM_ERR(ENOMEM);
859                 }
860                 dev_priv->agp_textures =
861                     drm_core_findmap(dev, init->agp_textures_offset);
862                 if (!dev_priv->agp_textures) {
863                         DRM_ERROR("can not find agp texture region!\n");
864                         dev->dev_private = (void *)dev_priv;
865                         mach64_do_cleanup_dma(dev);
866                         return DRM_ERR(EINVAL);
867                 }
868         }
869
870         dev->dev_private = (void *)dev_priv;
871
872         dev_priv->driver_mode = init->dma_mode;
873
874         /* changing the FIFO size from the default causes problems with DMA */
875         tmp = MACH64_READ(MACH64_GUI_CNTL);
876         if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
877                 DRM_INFO("Setting FIFO size to 128 entries\n");
878                 /* FIFO must be empty to change the FIFO depth */
879                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
880                         DRM_ERROR
881                             ("wait for idle failed before changing FIFO depth!\n");
882                         mach64_do_cleanup_dma(dev);
883                         return ret;
884                 }
885                 MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
886                                                | MACH64_CMDFIFO_SIZE_128));
887                 /* need to read GUI_STAT for proper sync according to docs */
888                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
889                         DRM_ERROR
890                             ("wait for idle failed when changing FIFO depth!\n");
891                         mach64_do_cleanup_dma(dev);
892                         return ret;
893                 }
894         }
895
896         /* allocate descriptor memory from pci pool */
897         DRM_DEBUG("Allocating dma descriptor ring\n");
898         dev_priv->ring.size = 0x4000;   /* 16KB */
899
900         if (dev_priv->is_pci) {
901                 dev_priv->ring.start = drm_pci_alloc(dev, dev_priv->ring.size,
902                                                      dev_priv->ring.size,
903                                                      0xfffffffful,
904                                                      &dev_priv->ring.handle);
905
906                 if (!dev_priv->ring.start || !dev_priv->ring.handle) {
907                         DRM_ERROR("Allocating dma descriptor ring failed\n");
908                         return DRM_ERR(ENOMEM);
909                 } else {
910                         dev_priv->ring.start_addr = (u32) dev_priv->ring.handle;
911                 }
912         } else {
913                 dev_priv->ring.start = dev_priv->ring_map->handle;
914                 dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
915         }
916
917         memset(dev_priv->ring.start, 0, dev_priv->ring.size);
918         DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
919                  dev_priv->ring.start, dev_priv->ring.start_addr);
920
921         ret = 0;
922         if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
923
924                 /* enable block 1 registers and bus mastering */
925                 MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
926                                                 | MACH64_BUS_EXT_REG_EN)
927                                                & ~MACH64_BUS_MASTER_DIS));
928
929                 /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
930                 DRM_DEBUG("Starting DMA test...\n");
931                 if ((ret = mach64_bm_dma_test(dev))) {
932                         dev_priv->driver_mode = MACH64_MODE_MMIO;
933                 }
934         }
935
936         switch (dev_priv->driver_mode) {
937         case MACH64_MODE_MMIO:
938                 MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
939                                                | MACH64_BUS_EXT_REG_EN
940                                                | MACH64_BUS_MASTER_DIS));
941                 if (init->dma_mode == MACH64_MODE_MMIO)
942                         DRM_INFO("Forcing pseudo-DMA mode\n");
943                 else
944                         DRM_INFO
945                             ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
946                              ret);
947                 break;
948         case MACH64_MODE_DMA_SYNC:
949                 DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
950                 break;
951         case MACH64_MODE_DMA_ASYNC:
952         default:
953                 DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
954         }
955
956         dev_priv->ring_running = 0;
957
958         /* setup offsets for physical address of table start and end */
959         dev_priv->ring.head_addr = dev_priv->ring.start_addr;
960         dev_priv->ring.head = dev_priv->ring.tail = 0;
961         dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
962         dev_priv->ring.space = dev_priv->ring.size;
963
964         /* setup physical address and size of descriptor table */
965         mach64_do_wait_for_fifo(dev_priv, 1);
966         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
967                      (dev_priv->ring.
968                       head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
969
970         /* init frame counter */
971         dev_priv->sarea_priv->frames_queued = 0;
972         for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
973                 dev_priv->frame_ofs[i] = ~0;    /* All ones indicates placeholder */
974         }
975
976         /* Allocate the DMA buffer freelist */
977         if ((ret = mach64_init_freelist(dev))) {
978                 DRM_ERROR("Freelist allocation failed\n");
979                 mach64_do_cleanup_dma(dev);
980                 return ret;
981         }
982
983         return 0;
984 }
985
986 /*******************************************************************/
987 /** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
988  */
989
990 int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)
991 {
992         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
993         volatile u32 *ring_read;
994         struct list_head *ptr;
995         drm_mach64_freelist_t *entry;
996         drm_buf_t *buf = NULL;
997         u32 *buf_ptr;
998         u32 used, reg, target;
999         int fifo, count, found, ret, no_idle_wait;
1000
1001         fifo = count = reg = no_idle_wait = 0;
1002         target = MACH64_BM_ADDR;
1003
1004         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1005                 DRM_INFO
1006                     ("%s: idle failed before pseudo-dma dispatch, resetting engine\n",
1007                      __FUNCTION__);
1008                 mach64_dump_engine_info(dev_priv);
1009                 mach64_do_engine_reset(dev_priv);
1010                 return ret;
1011         }
1012
1013         ring_read = (u32 *) ring->start;
1014
1015         while (ring->tail != ring->head) {
1016                 u32 buf_addr, new_target, offset;
1017                 u32 bytes, remaining, head, eol;
1018
1019                 head = ring->head;
1020
1021                 new_target =
1022                     le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
1023                 buf_addr = le32_to_cpu(ring_read[head++]);
1024                 eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
1025                 bytes = le32_to_cpu(ring_read[head++])
1026                     & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
1027                 head++;
1028                 head &= ring->tail_mask;
1029
1030                 /* can't wait for idle between a blit setup descriptor
1031                  * and a HOSTDATA descriptor or the engine will lock
1032                  */
1033                 if (new_target == MACH64_BM_HOSTDATA
1034                     && target == MACH64_BM_ADDR)
1035                         no_idle_wait = 1;
1036
1037                 target = new_target;
1038
1039                 found = 0;
1040                 offset = 0;
1041                 list_for_each(ptr, &dev_priv->pending) {
1042                         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1043                         buf = entry->buf;
1044                         offset = buf_addr - GETBUFADDR(buf);
1045                         if (offset >= 0 && offset < MACH64_BUFFER_SIZE) {
1046                                 found = 1;
1047                                 break;
1048                         }
1049                 }
1050
1051                 if (!found || buf == NULL) {
1052                         DRM_ERROR
1053                             ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
1054                              head, ring->tail, buf_addr, (eol ? "eol" : ""));
1055                         mach64_dump_ring_info(dev_priv);
1056                         mach64_do_engine_reset(dev_priv);
1057                         return DRM_ERR(EINVAL);
1058                 }
1059
1060                 /* Hand feed the buffer to the card via MMIO, waiting for the fifo
1061                  * every 16 writes
1062                  */
1063                 DRM_DEBUG("target: (0x%08x) %s\n", target,
1064                           (target ==
1065                            MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
1066                 DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
1067                           buf->used);
1068
1069                 remaining = (buf->used - offset) >> 2;  /* dwords remaining in buffer */
1070                 used = bytes >> 2;      /* dwords in buffer for this descriptor */
1071                 buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
1072
1073                 while (used) {
1074
1075                         if (count == 0) {
1076                                 if (target == MACH64_BM_HOSTDATA) {
1077                                         reg = DMAREG(MACH64_HOST_DATA0);
1078                                         count =
1079                                             (remaining > 16) ? 16 : remaining;
1080                                         fifo = 0;
1081                                 } else {
1082                                         reg = le32_to_cpu(*buf_ptr++);
1083                                         used--;
1084                                         count = (reg >> 16) + 1;
1085                                 }
1086
1087                                 reg = reg & 0xffff;
1088                                 reg = MMSELECT(reg);
1089                         }
1090                         while (count && used) {
1091                                 if (!fifo) {
1092                                         if (no_idle_wait) {
1093                                                 if ((ret =
1094                                                      mach64_do_wait_for_fifo
1095                                                      (dev_priv, 16)) < 0) {
1096                                                         no_idle_wait = 0;
1097                                                         return ret;
1098                                                 }
1099                                         } else {
1100                                                 if ((ret =
1101                                                      mach64_do_wait_for_idle
1102                                                      (dev_priv)) < 0) {
1103                                                         return ret;
1104                                                 }
1105                                         }
1106                                         fifo = 16;
1107                                 }
1108                                 --fifo;
1109                                 MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
1110                                 used--;
1111                                 remaining--;
1112
1113                                 reg += 4;
1114                                 count--;
1115                         }
1116                 }
1117                 ring->head = head;
1118                 ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
1119                 ring->space += (4 * sizeof(u32));
1120         }
1121
1122         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1123                 return ret;
1124         }
1125         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1126                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
1127
1128         DRM_DEBUG("%s completed\n", __FUNCTION__);
1129         return 0;
1130 }
1131
1132 /*@}*/
1133
1134
1135 /*******************************************************************/
1136 /** \name DMA cleanup */
1137 /*@{*/
1138
1139 int mach64_do_cleanup_dma(drm_device_t * dev)
1140 {
1141         DRM_DEBUG("%s\n", __FUNCTION__);
1142
1143         /* Make sure interrupts are disabled here because the uninstall ioctl
1144          * may not have been called from userspace and after dev_private
1145          * is freed, it's too late.
1146          */
1147         if (dev->irq)
1148                 drm_irq_uninstall(dev);
1149
1150         if (dev->dev_private) {
1151                 drm_mach64_private_t *dev_priv = dev->dev_private;
1152
1153                 if (dev_priv->is_pci) {
1154                         if ((dev_priv->ring.start != NULL)
1155                             && dev_priv->ring.handle) {
1156                                 drm_pci_free(dev, dev_priv->ring.size,
1157                                              dev_priv->ring.start,
1158                                              dev_priv->ring.handle);
1159                         }
1160                 } else {
1161                         if (dev_priv->ring_map)
1162                                 drm_core_ioremapfree(dev_priv->ring_map, dev);
1163                 }
1164
1165                 if (dev->agp_buffer_map) {
1166                         drm_core_ioremapfree(dev->agp_buffer_map, dev);
1167                         dev->agp_buffer_map = NULL;
1168                 }
1169
1170                 mach64_destroy_freelist(dev);
1171
1172                 drm_free(dev_priv, sizeof(drm_mach64_private_t),
1173                          DRM_MEM_DRIVER);
1174                 dev->dev_private = NULL;
1175         }
1176
1177         return 0;
1178 }
1179
1180 /*@}*/
1181
1182
1183 /*******************************************************************/
1184 /** \name IOCTL handlers */
1185 /*@{*/
1186
1187 int mach64_dma_init(DRM_IOCTL_ARGS)
1188 {
1189         DRM_DEVICE;
1190         drm_mach64_init_t init;
1191
1192         DRM_DEBUG("%s\n", __FUNCTION__);
1193
1194         LOCK_TEST_WITH_RETURN(dev, filp);
1195
1196         DRM_COPY_FROM_USER_IOCTL(init, (drm_mach64_init_t *) data,
1197                                  sizeof(init));
1198
1199         switch (init.func) {
1200         case DRM_MACH64_INIT_DMA:
1201                 return mach64_do_dma_init(dev, &init);
1202         case DRM_MACH64_CLEANUP_DMA:
1203                 return mach64_do_cleanup_dma(dev);
1204         }
1205
1206         return DRM_ERR(EINVAL);
1207 }
1208
1209 int mach64_dma_idle(DRM_IOCTL_ARGS)
1210 {
1211         DRM_DEVICE;
1212         drm_mach64_private_t *dev_priv = dev->dev_private;
1213
1214         DRM_DEBUG("%s\n", __FUNCTION__);
1215
1216         LOCK_TEST_WITH_RETURN(dev, filp);
1217
1218         return mach64_do_dma_idle(dev_priv);
1219 }
1220
1221 int mach64_dma_flush(DRM_IOCTL_ARGS)
1222 {
1223         DRM_DEVICE;
1224         drm_mach64_private_t *dev_priv = dev->dev_private;
1225
1226         DRM_DEBUG("%s\n", __FUNCTION__);
1227
1228         LOCK_TEST_WITH_RETURN(dev, filp);
1229
1230         return mach64_do_dma_flush(dev_priv);
1231 }
1232
1233 int mach64_engine_reset(DRM_IOCTL_ARGS)
1234 {
1235         DRM_DEVICE;
1236         drm_mach64_private_t *dev_priv = dev->dev_private;
1237
1238         DRM_DEBUG("%s\n", __FUNCTION__);
1239
1240         LOCK_TEST_WITH_RETURN(dev, filp);
1241
1242         return mach64_do_engine_reset(dev_priv);
1243 }
1244
1245 /*@}*/
1246
1247
1248 /*******************************************************************/
1249 /** \name Freelist management */
1250 /*@{*/
1251
1252 int mach64_init_freelist(drm_device_t * dev)
1253 {
1254         drm_device_dma_t *dma = dev->dma;
1255         drm_mach64_private_t *dev_priv = dev->dev_private;
1256         drm_mach64_freelist_t *entry;
1257         struct list_head *ptr;
1258         int i;
1259
1260         DRM_DEBUG("%s: adding %d buffers to freelist\n", __FUNCTION__,
1261                   dma->buf_count);
1262
1263         for (i = 0; i < dma->buf_count; i++) {
1264                 if ((entry =
1265                      (drm_mach64_freelist_t *)
1266                      drm_alloc(sizeof(drm_mach64_freelist_t),
1267                                DRM_MEM_BUFLISTS)) == NULL)
1268                         return DRM_ERR(ENOMEM);
1269                 memset(entry, 0, sizeof(drm_mach64_freelist_t));
1270                 entry->buf = dma->buflist[i];
1271                 ptr = &entry->list;
1272                 list_add_tail(ptr, &dev_priv->free_list);
1273         }
1274
1275         return 0;
1276 }
1277
1278 void mach64_destroy_freelist(drm_device_t * dev)
1279 {
1280         drm_mach64_private_t *dev_priv = dev->dev_private;
1281         drm_mach64_freelist_t *entry;
1282         struct list_head *ptr;
1283         struct list_head *tmp;
1284
1285         DRM_DEBUG("%s\n", __FUNCTION__);
1286
1287         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1288                 list_del(ptr);
1289                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1290                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1291         }
1292         list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
1293                 list_del(ptr);
1294                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1295                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1296         }
1297
1298         list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
1299                 list_del(ptr);
1300                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1301                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1302         }
1303 }
1304
1305 /* IMPORTANT: This function should only be called when the engine is idle or locked up,
1306  * as it assumes all buffers in the pending list have been completed by the hardware.
1307  */
1308 int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv)
1309 {
1310         struct list_head *ptr;
1311         struct list_head *tmp;
1312         drm_mach64_freelist_t *entry;
1313         int i;
1314
1315         if (list_empty(&dev_priv->pending))
1316                 return 0;
1317
1318         /* Iterate the pending list and move all buffers into the freelist... */
1319         i = 0;
1320         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1321                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1322                 if (entry->discard) {
1323                         entry->buf->pending = 0;
1324                         list_del(ptr);
1325                         list_add_tail(ptr, &dev_priv->free_list);
1326                         i++;
1327                 }
1328         }
1329
1330         DRM_DEBUG("%s: released %d buffers from pending list\n", __FUNCTION__,
1331                   i);
1332
1333         return 0;
1334 }
1335
1336 drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv)
1337 {
1338         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1339         drm_mach64_freelist_t *entry;
1340         struct list_head *ptr;
1341         struct list_head *tmp;
1342         int t;
1343
1344         if (list_empty(&dev_priv->free_list)) {
1345                 u32 head, tail, ofs;
1346
1347                 if (list_empty(&dev_priv->pending)) {
1348                         DRM_ERROR
1349                             ("Couldn't get buffer - pending and free lists empty\n");
1350                         t = 0;
1351                         list_for_each(ptr, &dev_priv->placeholders) {
1352                                 t++;
1353                         }
1354                         DRM_INFO("Placeholders: %d\n", t);
1355                         return NULL;
1356                 }
1357
1358                 tail = ring->tail;
1359                 for (t = 0; t < dev_priv->usec_timeout; t++) {
1360                         mach64_ring_tick(dev_priv, ring);
1361                         head = ring->head;
1362
1363                         if (head == tail) {
1364 #if MACH64_EXTRA_CHECKING
1365                                 if (MACH64_READ(MACH64_GUI_STAT) &
1366                                     MACH64_GUI_ACTIVE) {
1367                                         DRM_ERROR
1368                                             ("Empty ring with non-idle engine!\n");
1369                                         mach64_dump_ring_info(dev_priv);
1370                                         return NULL;
1371                                 }
1372 #endif
1373                                 /* last pass is complete, so release everything */
1374                                 mach64_do_release_used_buffers(dev_priv);
1375                                 DRM_DEBUG
1376                                     ("%s: idle engine, freed all buffers.\n",
1377                                      __FUNCTION__);
1378                                 if (list_empty(&dev_priv->free_list)) {
1379                                         DRM_ERROR
1380                                             ("Freelist empty with idle engine\n");
1381                                         return NULL;
1382                                 }
1383                                 goto _freelist_entry_found;
1384                         }
1385                         /* Look for a completed buffer and bail out of the loop
1386                          * as soon as we find one -- don't waste time trying
1387                          * to free extra bufs here, leave that to do_release_used_buffers
1388                          */
1389                         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1390                                 entry =
1391                                     list_entry(ptr, drm_mach64_freelist_t,
1392                                                list);
1393                                 ofs = entry->ring_ofs;
1394                                 if (entry->discard &&
1395                                     ((head < tail
1396                                       && (ofs < head || ofs >= tail))
1397                                      || (head > tail
1398                                          && (ofs < head && ofs >= tail)))) {
1399 #if MACH64_EXTRA_CHECKING
1400                                         int i;
1401
1402                                         for (i = head; i != tail;
1403                                              i = (i + 4) & ring->tail_mask) {
1404                                                 u32 o1 =
1405                                                     le32_to_cpu(((u32 *) ring->
1406                                                                  start)[i + 1]);
1407                                                 u32 o2 = GETBUFADDR(entry->buf);
1408
1409                                                 if (o1 == o2) {
1410                                                         DRM_ERROR
1411                                                             ("Attempting to free used buffer: "
1412                                                              "i=%d  buf=0x%08x\n",
1413                                                              i, o1);
1414                                                         mach64_dump_ring_info
1415                                                             (dev_priv);
1416                                                         return NULL;
1417                                                 }
1418                                         }
1419 #endif
1420                                         /* found a processed buffer */
1421                                         entry->buf->pending = 0;
1422                                         list_del(ptr);
1423                                         entry->buf->used = 0;
1424                                         list_add_tail(ptr,
1425                                                       &dev_priv->placeholders);
1426                                         DRM_DEBUG
1427                                             ("%s: freed processed buffer (head=%d tail=%d "
1428                                              "buf ring ofs=%d).\n",
1429                                              __FUNCTION__, head, tail, ofs);
1430                                         return entry->buf;
1431                                 }
1432                         }
1433                         DRM_UDELAY(1);
1434                 }
1435                 mach64_dump_ring_info(dev_priv);
1436                 DRM_ERROR
1437                     ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
1438                      ring->head_addr, ring->head, ring->tail);
1439                 return NULL;
1440         }
1441
1442       _freelist_entry_found:
1443         ptr = dev_priv->free_list.next;
1444         list_del(ptr);
1445         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1446         entry->buf->used = 0;
1447         list_add_tail(ptr, &dev_priv->placeholders);
1448         return entry->buf;
1449 }
1450
1451 /*@}*/
1452
1453
1454 /*******************************************************************/
1455 /** \name DMA buffer request and submission IOCTL handler */
1456 /*@{*/
1457
1458 static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev,
1459                                   drm_dma_t * d)
1460 {
1461         int i;
1462         drm_buf_t *buf;
1463         drm_mach64_private_t *dev_priv = dev->dev_private;
1464
1465         for (i = d->granted_count; i < d->request_count; i++) {
1466                 buf = mach64_freelist_get(dev_priv);
1467 #if MACH64_EXTRA_CHECKING
1468                 if (!buf)
1469                         return DRM_ERR(EFAULT);
1470 #else
1471                 if (!buf)
1472                         return DRM_ERR(EAGAIN);
1473 #endif
1474
1475                 buf->filp = filp;
1476
1477                 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1478                                      sizeof(buf->idx)))
1479                         return DRM_ERR(EFAULT);
1480                 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1481                                      sizeof(buf->total)))
1482                         return DRM_ERR(EFAULT);
1483
1484                 d->granted_count++;
1485         }
1486         return 0;
1487 }
1488
1489 int mach64_dma_buffers(DRM_IOCTL_ARGS)
1490 {
1491         DRM_DEVICE;
1492         drm_device_dma_t *dma = dev->dma;
1493         drm_dma_t d;
1494         int ret = 0;
1495
1496         LOCK_TEST_WITH_RETURN(dev, filp);
1497
1498         DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t *) data, sizeof(d));
1499
1500         /* Please don't send us buffers.
1501          */
1502         if (d.send_count != 0) {
1503                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1504                           DRM_CURRENTPID, d.send_count);
1505                 return DRM_ERR(EINVAL);
1506         }
1507
1508         /* We'll send you buffers.
1509          */
1510         if (d.request_count < 0 || d.request_count > dma->buf_count) {
1511                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1512                           DRM_CURRENTPID, d.request_count, dma->buf_count);
1513                 ret = DRM_ERR(EINVAL);
1514         }
1515
1516         d.granted_count = 0;
1517
1518         if (d.request_count) {
1519                 ret = mach64_dma_get_buffers(filp, dev, &d);
1520         }
1521
1522         DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d));
1523
1524         return ret;
1525 }
1526
1527 void mach64_driver_pretakedown(drm_device_t * dev)
1528 {
1529         mach64_do_cleanup_dma(dev);
1530 }
1531
1532 /*@}*/