1 /* savage_state.c -- State and drawing support for Savage
3 * Copyright 2004 Felix Kuehling
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "savage_drm.h"
30 #include "savage_drv.h"
32 void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
33 drm_clip_rect_t *pbox)
35 uint32_t scstart = dev_priv->state.s3d.new_scstart;
36 uint32_t scend = dev_priv->state.s3d.new_scend;
37 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
38 ((uint32_t)pbox->x1 & 0x000007ff) |
39 (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
40 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
41 (((uint32_t)pbox->x2-1) & 0x000007ff) |
42 ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
43 if (scstart != dev_priv->state.s3d.scstart ||
44 scend != dev_priv->state.s3d.scend) {
47 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
48 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
51 dev_priv->state.s3d.scstart = scstart;
52 dev_priv->state.s3d.scend = scend;
53 dev_priv->waiting = 1;
58 void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
59 drm_clip_rect_t *pbox)
61 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
62 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
63 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
64 ((uint32_t)pbox->x1 & 0x000007ff) |
65 (((uint32_t)pbox->y1 << 12) & 0x00fff000);
66 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
67 (((uint32_t)pbox->x2-1) & 0x000007ff) |
68 ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
69 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
70 drawctrl1 != dev_priv->state.s4.drawctrl1) {
73 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
74 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
77 dev_priv->state.s4.drawctrl0 = drawctrl0;
78 dev_priv->state.s4.drawctrl1 = drawctrl1;
79 dev_priv->waiting = 1;
84 static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
87 if ((addr & 6) != 2) { /* reserved bits */
88 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
89 return DRM_ERR(EINVAL);
91 if (!(addr & 1)) { /* local */
93 if (addr < dev_priv->texture_offset ||
94 addr >= dev_priv->texture_offset+dev_priv->texture_size) {
95 DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
97 return DRM_ERR(EINVAL);
100 if (!dev_priv->agp_textures) {
101 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
103 return DRM_ERR(EINVAL);
106 if (addr < dev_priv->agp_textures->offset ||
107 addr >= (dev_priv->agp_textures->offset +
108 dev_priv->agp_textures->size)) {
109 DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
111 return DRM_ERR(EINVAL);
117 #define SAVE_STATE(reg,where) \
118 if(start <= reg && start+count > reg) \
119 DRM_GET_USER_UNCHECKED(dev_priv->state.where, ®s[reg-start])
120 #define SAVE_STATE_MASK(reg,where,mask) do { \
121 if(start <= reg && start+count > reg) { \
123 DRM_GET_USER_UNCHECKED(tmp, ®s[reg-start]); \
124 dev_priv->state.where = (tmp & (mask)) | \
125 (dev_priv->state.where & ~(mask)); \
128 static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
129 unsigned int start, unsigned int count,
130 const uint32_t __user *regs)
132 if (start < SAVAGE_TEXPALADDR_S3D ||
133 start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
135 start, start+count-1);
136 return DRM_ERR(EINVAL);
139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
140 ~SAVAGE_SCISSOR_MASK_S3D);
141 SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
142 ~SAVAGE_SCISSOR_MASK_S3D);
144 /* if any texture regs were changed ... */
145 if (start <= SAVAGE_TEXCTRL_S3D &&
146 start+count > SAVAGE_TEXPALADDR_S3D) {
147 /* ... check texture state */
148 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
149 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
150 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
151 return savage_verify_texaddr(
152 dev_priv, 0, dev_priv->state.s3d.texaddr);
158 static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
159 unsigned int start, unsigned int count,
160 const uint32_t __user *regs)
164 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
165 start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
167 start, start+count-1);
168 return DRM_ERR(EINVAL);
171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
172 ~SAVAGE_SCISSOR_MASK_S4);
173 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
174 ~SAVAGE_SCISSOR_MASK_S4);
176 /* if any texture regs were changed ... */
177 if (start <= SAVAGE_TEXDESCR_S4 &&
178 start+count > SAVAGE_TEXPALADDR_S4) {
179 /* ... check texture state */
180 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
181 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
182 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
184 ret |= savage_verify_texaddr(
185 dev_priv, 0, dev_priv->state.s4.texaddr0);
186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
187 ret |= savage_verify_texaddr(
188 dev_priv, 1, dev_priv->state.s4.texaddr1);
194 #undef SAVE_STATE_MASK
196 static int savage_dispatch_state(drm_savage_private_t *dev_priv,
197 const drm_savage_cmd_header_t *cmd_header,
198 const uint32_t __user *regs)
201 unsigned int count = cmd_header->state.count;
202 unsigned int start = cmd_header->state.start;
203 unsigned int count2 = 0;
204 unsigned int bci_size;
210 if (DRM_VERIFYAREA_READ(regs, count*4))
211 return DRM_ERR(EFAULT);
213 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
214 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
217 /* scissor regs are emitted in savage_dispatch_draw */
218 if (start < SAVAGE_SCSTART_S3D) {
219 if (start+count > SAVAGE_SCEND_S3D+1)
220 count2 = count - (SAVAGE_SCEND_S3D+1 - start);
221 if (start+count > SAVAGE_SCSTART_S3D)
222 count = SAVAGE_SCSTART_S3D - start;
223 } else if (start <= SAVAGE_SCEND_S3D) {
224 if (start+count > SAVAGE_SCEND_S3D+1) {
225 count -= SAVAGE_SCEND_S3D+1 - start;
226 start = SAVAGE_SCEND_S3D+1;
231 ret = savage_verify_state_s4(dev_priv, start, count, regs);
234 /* scissor regs are emitted in savage_dispatch_draw */
235 if (start < SAVAGE_DRAWCTRL0_S4) {
236 if (start+count > SAVAGE_DRAWCTRL1_S4+1)
237 count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
238 if (start+count > SAVAGE_DRAWCTRL0_S4)
239 count = SAVAGE_DRAWCTRL0_S4 - start;
240 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
241 if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
242 count -= SAVAGE_DRAWCTRL1_S4+1 - start;
243 start = SAVAGE_DRAWCTRL1_S4+1;
249 bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
251 if (cmd_header->state.global) {
252 BEGIN_DMA(bci_size+1);
253 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
254 dev_priv->waiting = 1;
261 unsigned int n = count < 255 ? count : 255;
262 DMA_SET_REGISTERS(start, n);
263 DMA_COPY_FROM_USER(regs, n);
279 static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
280 const drm_savage_cmd_header_t *cmd_header,
281 const drm_buf_t *dmabuf)
284 unsigned char reorder = 0;
285 unsigned int prim = cmd_header->prim.prim;
286 unsigned int skip = cmd_header->prim.skip;
287 unsigned int n = cmd_header->prim.count;
288 unsigned int start = cmd_header->prim.start;
292 DRM_ERROR("called without dma buffers!\n");
293 return DRM_ERR(EINVAL);
300 case SAVAGE_PRIM_TRILIST_201:
302 prim = SAVAGE_PRIM_TRILIST;
303 case SAVAGE_PRIM_TRILIST:
305 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
307 return DRM_ERR(EINVAL);
310 case SAVAGE_PRIM_TRISTRIP:
311 case SAVAGE_PRIM_TRIFAN:
313 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
315 return DRM_ERR(EINVAL);
319 DRM_ERROR("invalid primitive type %u\n", prim);
320 return DRM_ERR(EINVAL);
323 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
325 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
327 return DRM_ERR(EINVAL);
330 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
331 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
332 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
333 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
334 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
336 return DRM_ERR(EINVAL);
339 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
340 return DRM_ERR(EINVAL);
344 if (start + n > dmabuf->total/32) {
345 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
346 start, start + n - 1, dmabuf->total/32);
347 return DRM_ERR(EINVAL);
350 /* Vertex DMA doesn't work with command DMA at the same time,
351 * so we use BCI_... to submit commands here. Flush buffered
352 * faked DMA first. */
355 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
357 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
358 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
359 dev_priv->state.common.vbaddr = dmabuf->bus_address;
361 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
362 /* Workaround for what looks like a hardware bug. If a
363 * WAIT_3D_IDLE was emitted some time before the
364 * indexed drawing command then the engine will lock
365 * up. There are two known workarounds:
366 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
368 for (i = 0; i < 63; ++i)
369 BCI_WRITE(BCI_CMD_WAIT);
370 dev_priv->waiting = 0;
375 /* Can emit up to 255 indices (85 triangles) at once. */
376 unsigned int count = n > 255 ? 255 : n;
378 /* Need to reorder indices for correct flat
379 * shading while preserving the clock sense
380 * for correct culling. Only on Savage3D. */
381 int reorder[3] = {-1, -1, -1};
382 reorder[start%3] = 2;
384 BEGIN_BCI((count+1+1)/2);
385 BCI_DRAW_INDICES_S3D(count, prim, start+2);
387 for (i = start+1; i+1 < start+count; i += 2)
388 BCI_WRITE((i + reorder[i % 3]) |
389 ((i+1 + reorder[(i+1) % 3]) << 16));
391 BCI_WRITE(i + reorder[i%3]);
392 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
393 BEGIN_BCI((count+1+1)/2);
394 BCI_DRAW_INDICES_S3D(count, prim, start);
396 for (i = start+1; i+1 < start+count; i += 2)
397 BCI_WRITE(i | ((i+1) << 16));
401 BEGIN_BCI((count+2+1)/2);
402 BCI_DRAW_INDICES_S4(count, prim, skip);
404 for (i = start; i+1 < start+count; i += 2)
405 BCI_WRITE(i | ((i+1) << 16));
413 prim |= BCI_CMD_DRAW_CONT;
419 static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
420 const drm_savage_cmd_header_t *cmd_header,
421 const uint32_t __user *vtxbuf,
422 unsigned int vb_size,
423 unsigned int vb_stride)
426 unsigned char reorder = 0;
427 unsigned int prim = cmd_header->prim.prim;
428 unsigned int skip = cmd_header->prim.skip;
429 unsigned int n = cmd_header->prim.count;
430 unsigned int start = cmd_header->prim.start;
431 unsigned int vtx_size;
438 case SAVAGE_PRIM_TRILIST_201:
440 prim = SAVAGE_PRIM_TRILIST;
441 case SAVAGE_PRIM_TRILIST:
443 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
445 return DRM_ERR(EINVAL);
448 case SAVAGE_PRIM_TRISTRIP:
449 case SAVAGE_PRIM_TRIFAN:
451 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
453 return DRM_ERR(EINVAL);
457 DRM_ERROR("invalid primitive type %u\n", prim);
458 return DRM_ERR(EINVAL);
461 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
462 if (skip > SAVAGE_SKIP_ALL_S3D) {
463 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
464 return DRM_ERR(EINVAL);
466 vtx_size = 8; /* full vertex */
468 if (skip > SAVAGE_SKIP_ALL_S4) {
469 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
470 return DRM_ERR(EINVAL);
472 vtx_size = 10; /* full vertex */
475 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
476 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
477 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
479 if (vtx_size > vb_stride) {
480 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
481 vtx_size, vb_stride);
482 return DRM_ERR(EINVAL);
485 if (start + n > vb_size / (vb_stride*4)) {
486 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
487 start, start + n - 1, vb_size / (vb_stride*4));
488 return DRM_ERR(EINVAL);
493 /* Can emit up to 255 vertices (85 triangles) at once. */
494 unsigned int count = n > 255 ? 255 : n;
496 /* Need to reorder vertices for correct flat
497 * shading while preserving the clock sense
498 * for correct culling. Only on Savage3D. */
499 int reorder[3] = {-1, -1, -1};
500 reorder[start%3] = 2;
502 BEGIN_DMA(count*vtx_size+1);
503 DMA_DRAW_PRIMITIVE(count, prim, skip);
505 for (i = start; i < start+count; ++i) {
506 unsigned int j = i + reorder[i % 3];
507 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
513 BEGIN_DMA(count*vtx_size+1);
514 DMA_DRAW_PRIMITIVE(count, prim, skip);
516 if (vb_stride == vtx_size) {
517 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start],
520 for (i = start; i < start+count; ++i) {
522 &vtxbuf[vb_stride*i],
533 prim |= BCI_CMD_DRAW_CONT;
539 static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
540 const drm_savage_cmd_header_t *cmd_header,
541 const uint16_t __user *usr_idx,
542 const drm_buf_t *dmabuf)
545 unsigned char reorder = 0;
546 unsigned int prim = cmd_header->idx.prim;
547 unsigned int skip = cmd_header->idx.skip;
548 unsigned int n = cmd_header->idx.count;
552 DRM_ERROR("called without dma buffers!\n");
553 return DRM_ERR(EINVAL);
560 case SAVAGE_PRIM_TRILIST_201:
562 prim = SAVAGE_PRIM_TRILIST;
563 case SAVAGE_PRIM_TRILIST:
565 DRM_ERROR("wrong number of indices %u in TRILIST\n",
567 return DRM_ERR(EINVAL);
570 case SAVAGE_PRIM_TRISTRIP:
571 case SAVAGE_PRIM_TRIFAN:
573 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
575 return DRM_ERR(EINVAL);
579 DRM_ERROR("invalid primitive type %u\n", prim);
580 return DRM_ERR(EINVAL);
583 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
585 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
587 return DRM_ERR(EINVAL);
590 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
591 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
592 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
593 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
594 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
596 return DRM_ERR(EINVAL);
599 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
600 return DRM_ERR(EINVAL);
604 /* Vertex DMA doesn't work with command DMA at the same time,
605 * so we use BCI_... to submit commands here. Flush buffered
606 * faked DMA first. */
609 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
611 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
612 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
613 dev_priv->state.common.vbaddr = dmabuf->bus_address;
615 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
616 /* Workaround for what looks like a hardware bug. If a
617 * WAIT_3D_IDLE was emitted some time before the
618 * indexed drawing command then the engine will lock
619 * up. There are two known workarounds:
620 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
622 for (i = 0; i < 63; ++i)
623 BCI_WRITE(BCI_CMD_WAIT);
624 dev_priv->waiting = 0;
629 /* Can emit up to 255 indices (85 triangles) at once. */
630 unsigned int count = n > 255 ? 255 : n;
631 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
634 /* Copy and check indices */
635 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
636 for (i = 0; i < count; ++i) {
637 if (idx[i] > dmabuf->total/32) {
638 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
639 i, idx[i], dmabuf->total/32);
640 return DRM_ERR(EINVAL);
645 /* Need to reorder indices for correct flat
646 * shading while preserving the clock sense
647 * for correct culling. Only on Savage3D. */
648 int reorder[3] = {2, -1, -1};
650 BEGIN_BCI((count+1+1)/2);
651 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
653 for (i = 1; i+1 < count; i += 2)
654 BCI_WRITE(idx[i + reorder[i % 3]] |
655 (idx[i+1 + reorder[(i+1) % 3]] << 16));
657 BCI_WRITE(idx[i + reorder[i%3]]);
658 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
659 BEGIN_BCI((count+1+1)/2);
660 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
662 for (i = 1; i+1 < count; i += 2)
663 BCI_WRITE(idx[i] | (idx[i+1] << 16));
667 BEGIN_BCI((count+2+1)/2);
668 BCI_DRAW_INDICES_S4(count, prim, skip);
670 for (i = 0; i+1 < count; i += 2)
671 BCI_WRITE(idx[i] | (idx[i+1] << 16));
679 prim |= BCI_CMD_DRAW_CONT;
685 static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
686 const drm_savage_cmd_header_t *cmd_header,
687 const uint16_t __user *usr_idx,
688 const uint32_t __user *vtxbuf,
689 unsigned int vb_size,
690 unsigned int vb_stride)
693 unsigned char reorder = 0;
694 unsigned int prim = cmd_header->idx.prim;
695 unsigned int skip = cmd_header->idx.skip;
696 unsigned int n = cmd_header->idx.count;
697 unsigned int vtx_size;
704 case SAVAGE_PRIM_TRILIST_201:
706 prim = SAVAGE_PRIM_TRILIST;
707 case SAVAGE_PRIM_TRILIST:
709 DRM_ERROR("wrong number of indices %u in TRILIST\n",
711 return DRM_ERR(EINVAL);
714 case SAVAGE_PRIM_TRISTRIP:
715 case SAVAGE_PRIM_TRIFAN:
717 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
719 return DRM_ERR(EINVAL);
723 DRM_ERROR("invalid primitive type %u\n", prim);
724 return DRM_ERR(EINVAL);
727 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
728 if (skip > SAVAGE_SKIP_ALL_S3D) {
729 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
730 return DRM_ERR(EINVAL);
732 vtx_size = 8; /* full vertex */
734 if (skip > SAVAGE_SKIP_ALL_S4) {
735 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
736 return DRM_ERR(EINVAL);
738 vtx_size = 10; /* full vertex */
741 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
742 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
743 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
745 if (vtx_size > vb_stride) {
746 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
747 vtx_size, vb_stride);
748 return DRM_ERR(EINVAL);
753 /* Can emit up to 255 vertices (85 triangles) at once. */
754 unsigned int count = n > 255 ? 255 : n;
755 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
758 /* Copy and check indices */
759 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
760 for (i = 0; i < count; ++i) {
761 if (idx[i] > vb_size / (vb_stride*4)) {
762 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
763 i, idx[i], vb_size / (vb_stride*4));
764 return DRM_ERR(EINVAL);
769 /* Need to reorder vertices for correct flat
770 * shading while preserving the clock sense
771 * for correct culling. Only on Savage3D. */
772 int reorder[3] = {2, -1, -1};
774 BEGIN_DMA(count*vtx_size+1);
775 DMA_DRAW_PRIMITIVE(count, prim, skip);
777 for (i = 0; i < count; ++i) {
778 unsigned int j = idx[i + reorder[i % 3]];
779 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
785 BEGIN_DMA(count*vtx_size+1);
786 DMA_DRAW_PRIMITIVE(count, prim, skip);
788 for (i = 0; i < count; ++i) {
789 unsigned int j = idx[i];
790 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
800 prim |= BCI_CMD_DRAW_CONT;
806 static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
807 const drm_savage_cmd_header_t *cmd_header,
808 const drm_savage_cmd_header_t __user *data,
810 const drm_clip_rect_t __user *usr_boxes)
813 unsigned int flags = cmd_header->clear0.flags, mask, value;
814 unsigned int clear_cmd;
815 unsigned int i, nbufs;
820 DRM_GET_USER_UNCHECKED(mask, &((drm_savage_cmd_header_t*)data)
822 DRM_GET_USER_UNCHECKED(value, &((drm_savage_cmd_header_t*)data)
825 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
826 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
827 BCI_CMD_SET_ROP(clear_cmd,0xCC);
829 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
830 ((flags & SAVAGE_BACK) ? 1 : 0) +
831 ((flags & SAVAGE_DEPTH) ? 1 : 0);
835 if (mask != 0xffffffff) {
838 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
842 for (i = 0; i < nbox; ++i) {
844 unsigned int x, y, w, h;
846 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
847 x = box.x1, y = box.y1;
851 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
854 DMA_WRITE(clear_cmd);
857 DMA_WRITE(dev_priv->front_offset);
858 DMA_WRITE(dev_priv->front_bd);
861 DMA_WRITE(dev_priv->back_offset);
862 DMA_WRITE(dev_priv->back_bd);
865 DMA_WRITE(dev_priv->depth_offset);
866 DMA_WRITE(dev_priv->depth_bd);
870 DMA_WRITE(BCI_X_Y(x, y));
871 DMA_WRITE(BCI_W_H(w, h));
875 if (mask != 0xffffffff) {
878 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
879 DMA_WRITE(0xffffffff);
886 static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
888 const drm_clip_rect_t __user *usr_boxes)
891 unsigned int swap_cmd;
897 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
898 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
899 BCI_CMD_SET_ROP(swap_cmd,0xCC);
901 for (i = 0; i < nbox; ++i) {
903 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
907 DMA_WRITE(dev_priv->back_offset);
908 DMA_WRITE(dev_priv->back_bd);
909 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
910 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
911 DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1));
918 static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
919 const drm_savage_cmd_header_t __user *start,
920 const drm_savage_cmd_header_t __user *end,
921 const drm_buf_t *dmabuf,
922 const unsigned int __user *usr_vtxbuf,
923 unsigned int vb_size, unsigned int vb_stride,
925 const drm_clip_rect_t __user *usr_boxes)
930 for (i = 0; i < nbox; ++i) {
932 const drm_savage_cmd_header_t __user *usr_cmdbuf;
933 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
934 dev_priv->emit_clip_rect(dev_priv, &box);
937 while (usr_cmdbuf < end) {
938 drm_savage_cmd_header_t cmd_header;
939 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
942 switch (cmd_header.cmd.cmd) {
943 case SAVAGE_CMD_DMA_PRIM:
944 ret = savage_dispatch_dma_prim(
945 dev_priv, &cmd_header, dmabuf);
947 case SAVAGE_CMD_VB_PRIM:
948 ret = savage_dispatch_vb_prim(
949 dev_priv, &cmd_header,
950 (uint32_t __user *)usr_vtxbuf,
953 case SAVAGE_CMD_DMA_IDX:
954 j = (cmd_header.idx.count + 3) / 4;
955 /* j was check in savage_bci_cmdbuf */
956 ret = savage_dispatch_dma_idx(
957 dev_priv, &cmd_header,
958 (uint16_t __user *)usr_cmdbuf,
962 case SAVAGE_CMD_VB_IDX:
963 j = (cmd_header.idx.count + 3) / 4;
964 /* j was check in savage_bci_cmdbuf */
965 ret = savage_dispatch_vb_idx(
966 dev_priv, &cmd_header,
967 (uint16_t __user *)usr_cmdbuf,
968 (uint32_t __user *)usr_vtxbuf,
973 /* What's the best return code? EFAULT? */
974 DRM_ERROR("IMPLEMENTATION ERROR: "
975 "non-drawing-command %d\n",
977 return DRM_ERR(EINVAL);
988 int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
991 drm_savage_private_t *dev_priv = dev->dev_private;
992 drm_device_dma_t *dma = dev->dma;
994 drm_savage_cmdbuf_t cmdbuf;
995 drm_savage_cmd_header_t __user *usr_cmdbuf;
996 drm_savage_cmd_header_t __user *first_draw_cmd;
997 unsigned int __user *usr_vtxbuf;
998 drm_clip_rect_t __user *usr_boxes;
1004 LOCK_TEST_WITH_RETURN(dev, filp);
1006 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
1009 if (dma && dma->buflist) {
1010 if (cmdbuf.dma_idx > dma->buf_count) {
1011 DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
1012 cmdbuf.dma_idx, dma->buf_count-1);
1013 return DRM_ERR(EINVAL);
1015 dmabuf = dma->buflist[cmdbuf.dma_idx];
1020 usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr;
1021 usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
1022 usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr;
1023 if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) ||
1024 (cmdbuf.vb_size && DRM_VERIFYAREA_READ(
1025 usr_vtxbuf, cmdbuf.vb_size)) ||
1026 (cmdbuf.nbox && DRM_VERIFYAREA_READ(
1027 usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t))))
1028 return DRM_ERR(EFAULT);
1030 /* Make sure writes to DMA buffers are finished before sending
1031 * DMA commands to the graphics hardware. */
1032 DRM_MEMORYBARRIER();
1034 /* Coming from user space. Don't know if the Xserver has
1035 * emitted wait commands. Assuming the worst. */
1036 dev_priv->waiting = 1;
1039 first_draw_cmd = NULL;
1040 while (i < cmdbuf.size) {
1041 drm_savage_cmd_header_t cmd_header;
1042 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
1043 sizeof(cmd_header));
1047 /* Group drawing commands with same state to minimize
1048 * iterations over clip rects. */
1050 switch (cmd_header.cmd.cmd) {
1051 case SAVAGE_CMD_DMA_IDX:
1052 case SAVAGE_CMD_VB_IDX:
1053 j = (cmd_header.idx.count + 3) / 4;
1054 if (i + j > cmdbuf.size) {
1055 DRM_ERROR("indexed drawing command extends "
1056 "beyond end of command buffer\n");
1058 return DRM_ERR(EINVAL);
1061 case SAVAGE_CMD_DMA_PRIM:
1062 case SAVAGE_CMD_VB_PRIM:
1063 if (!first_draw_cmd)
1064 first_draw_cmd = usr_cmdbuf-1;
1069 if (first_draw_cmd) {
1070 ret = savage_dispatch_draw (
1071 dev_priv, first_draw_cmd, usr_cmdbuf-1,
1072 dmabuf, usr_vtxbuf, cmdbuf.vb_size,
1074 cmdbuf.nbox, usr_boxes);
1077 first_draw_cmd = NULL;
1083 switch (cmd_header.cmd.cmd) {
1084 case SAVAGE_CMD_STATE:
1085 j = (cmd_header.state.count + 1) / 2;
1086 if (i + j > cmdbuf.size) {
1087 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1088 "beyond end of command buffer\n");
1090 return DRM_ERR(EINVAL);
1092 ret = savage_dispatch_state(
1093 dev_priv, &cmd_header,
1094 (uint32_t __user *)usr_cmdbuf);
1098 case SAVAGE_CMD_CLEAR:
1099 if (i + 1 > cmdbuf.size) {
1100 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1101 "beyond end of command buffer\n");
1103 return DRM_ERR(EINVAL);
1105 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1107 cmdbuf.nbox, usr_boxes);
1111 case SAVAGE_CMD_SWAP:
1112 ret = savage_dispatch_swap(dev_priv,
1113 cmdbuf.nbox, usr_boxes);
1116 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
1118 return DRM_ERR(EINVAL);
1127 if (first_draw_cmd) {
1128 ret = savage_dispatch_draw (
1129 dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf,
1130 usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride,
1131 cmdbuf.nbox, usr_boxes);
1140 if (dmabuf && cmdbuf.discard) {
1141 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1143 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1144 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1145 savage_freelist_put(dev, dmabuf);