1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com */
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
27 * Gareth Hughes <gareth@valinux.com>
32 #include "dev/drm/drmP.h"
33 #include "dev/drm/drm.h"
34 #include "dev/drm/r128_drm.h"
35 #include "dev/drm/r128_drv.h"
37 /* ================================================================
38 * CCE hardware state programming functions
41 static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
42 drm_clip_rect_t * boxes, int count)
44 u32 aux_sc_cntl = 0x00000000;
46 DRM_DEBUG(" %s\n", __FUNCTION__);
48 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
51 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
52 OUT_RING(boxes[0].x1);
53 OUT_RING(boxes[0].x2 - 1);
54 OUT_RING(boxes[0].y1);
55 OUT_RING(boxes[0].y2 - 1);
57 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
60 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
61 OUT_RING(boxes[1].x1);
62 OUT_RING(boxes[1].x2 - 1);
63 OUT_RING(boxes[1].y1);
64 OUT_RING(boxes[1].y2 - 1);
66 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
69 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
70 OUT_RING(boxes[2].x1);
71 OUT_RING(boxes[2].x2 - 1);
72 OUT_RING(boxes[2].y1);
73 OUT_RING(boxes[2].y2 - 1);
75 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
78 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
79 OUT_RING(aux_sc_cntl);
84 static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
86 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
87 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
89 DRM_DEBUG(" %s\n", __FUNCTION__);
93 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
94 OUT_RING(ctx->scale_3d_cntl);
99 static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
101 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
102 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
104 DRM_DEBUG(" %s\n", __FUNCTION__);
108 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
109 OUT_RING(ctx->dst_pitch_offset_c);
110 OUT_RING(ctx->dp_gui_master_cntl_c);
111 OUT_RING(ctx->sc_top_left_c);
112 OUT_RING(ctx->sc_bottom_right_c);
113 OUT_RING(ctx->z_offset_c);
114 OUT_RING(ctx->z_pitch_c);
115 OUT_RING(ctx->z_sten_cntl_c);
116 OUT_RING(ctx->tex_cntl_c);
117 OUT_RING(ctx->misc_3d_state_cntl_reg);
118 OUT_RING(ctx->texture_clr_cmp_clr_c);
119 OUT_RING(ctx->texture_clr_cmp_msk_c);
120 OUT_RING(ctx->fog_color_c);
125 static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
127 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
128 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
130 DRM_DEBUG(" %s\n", __FUNCTION__);
134 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
135 OUT_RING(ctx->setup_cntl);
136 OUT_RING(ctx->pm4_vc_fpu_setup);
141 static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
143 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
144 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
146 DRM_DEBUG(" %s\n", __FUNCTION__);
150 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
151 OUT_RING(ctx->dp_write_mask);
153 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
154 OUT_RING(ctx->sten_ref_mask_c);
155 OUT_RING(ctx->plane_3d_mask_c);
160 static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
162 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
163 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
165 DRM_DEBUG(" %s\n", __FUNCTION__);
169 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
170 OUT_RING(ctx->window_xy_offset);
175 static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
177 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
178 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
179 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
182 DRM_DEBUG(" %s\n", __FUNCTION__);
184 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
186 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
187 2 + R128_MAX_TEXTURE_LEVELS));
188 OUT_RING(tex->tex_cntl);
189 OUT_RING(tex->tex_combine_cntl);
190 OUT_RING(ctx->tex_size_pitch_c);
191 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
192 OUT_RING(tex->tex_offset[i]);
195 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
196 OUT_RING(ctx->constant_color_c);
197 OUT_RING(tex->tex_border_color);
202 static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
204 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
205 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
208 DRM_DEBUG(" %s\n", __FUNCTION__);
210 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
212 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
213 OUT_RING(tex->tex_cntl);
214 OUT_RING(tex->tex_combine_cntl);
215 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
216 OUT_RING(tex->tex_offset[i]);
219 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
220 OUT_RING(tex->tex_border_color);
225 static __inline__ void r128_emit_state(drm_r128_private_t * dev_priv)
227 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
228 unsigned int dirty = sarea_priv->dirty;
230 DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
232 if (dirty & R128_UPLOAD_CORE) {
233 r128_emit_core(dev_priv);
234 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
237 if (dirty & R128_UPLOAD_CONTEXT) {
238 r128_emit_context(dev_priv);
239 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
242 if (dirty & R128_UPLOAD_SETUP) {
243 r128_emit_setup(dev_priv);
244 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
247 if (dirty & R128_UPLOAD_MASKS) {
248 r128_emit_masks(dev_priv);
249 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
252 if (dirty & R128_UPLOAD_WINDOW) {
253 r128_emit_window(dev_priv);
254 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
257 if (dirty & R128_UPLOAD_TEX0) {
258 r128_emit_tex0(dev_priv);
259 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
262 if (dirty & R128_UPLOAD_TEX1) {
263 r128_emit_tex1(dev_priv);
264 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
267 /* Turn off the texture cache flushing */
268 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
270 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
273 #if R128_PERFORMANCE_BOXES
274 /* ================================================================
275 * Performance monitoring functions
278 static void r128_clear_box(drm_r128_private_t * dev_priv,
279 int x, int y, int w, int h, int r, int g, int b)
285 switch (dev_priv->fb_bpp) {
287 fb_bpp = R128_GMC_DST_16BPP;
288 color = (((r & 0xf8) << 8) |
289 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
292 fb_bpp = R128_GMC_DST_24BPP;
293 color = ((r << 16) | (g << 8) | b);
296 fb_bpp = R128_GMC_DST_32BPP;
297 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
303 offset = dev_priv->back_offset;
304 pitch = dev_priv->back_pitch >> 3;
308 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
309 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
310 R128_GMC_BRUSH_SOLID_COLOR |
312 R128_GMC_SRC_DATATYPE_COLOR |
314 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
316 OUT_RING((pitch << 21) | (offset >> 5));
319 OUT_RING((x << 16) | y);
320 OUT_RING((w << 16) | h);
325 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
327 if (atomic_read(&dev_priv->idle_count) == 0) {
328 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
330 atomic_set(&dev_priv->idle_count, 0);
336 /* ================================================================
337 * CCE command dispatch functions
340 static void r128_print_dirty(const char *msg, unsigned int flags)
342 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
345 (flags & R128_UPLOAD_CORE) ? "core, " : "",
346 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
347 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
348 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
349 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
350 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
351 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
352 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
353 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
356 static void r128_cce_dispatch_clear(drm_device_t * dev,
357 drm_r128_clear_t * clear)
359 drm_r128_private_t *dev_priv = dev->dev_private;
360 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
361 int nbox = sarea_priv->nbox;
362 drm_clip_rect_t *pbox = sarea_priv->boxes;
363 unsigned int flags = clear->flags;
366 DRM_DEBUG("%s\n", __FUNCTION__);
368 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
369 unsigned int tmp = flags;
371 flags &= ~(R128_FRONT | R128_BACK);
372 if (tmp & R128_FRONT)
378 for (i = 0; i < nbox; i++) {
381 int w = pbox[i].x2 - x;
382 int h = pbox[i].y2 - y;
384 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
385 pbox[i].x1, pbox[i].y1, pbox[i].x2,
388 if (flags & (R128_FRONT | R128_BACK)) {
391 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
392 OUT_RING(clear->color_mask);
397 if (flags & R128_FRONT) {
400 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
401 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
402 R128_GMC_BRUSH_SOLID_COLOR |
403 (dev_priv->color_fmt << 8) |
404 R128_GMC_SRC_DATATYPE_COLOR |
406 R128_GMC_CLR_CMP_CNTL_DIS |
407 R128_GMC_AUX_CLIP_DIS);
409 OUT_RING(dev_priv->front_pitch_offset_c);
410 OUT_RING(clear->clear_color);
412 OUT_RING((x << 16) | y);
413 OUT_RING((w << 16) | h);
418 if (flags & R128_BACK) {
421 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
422 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
423 R128_GMC_BRUSH_SOLID_COLOR |
424 (dev_priv->color_fmt << 8) |
425 R128_GMC_SRC_DATATYPE_COLOR |
427 R128_GMC_CLR_CMP_CNTL_DIS |
428 R128_GMC_AUX_CLIP_DIS);
430 OUT_RING(dev_priv->back_pitch_offset_c);
431 OUT_RING(clear->clear_color);
433 OUT_RING((x << 16) | y);
434 OUT_RING((w << 16) | h);
439 if (flags & R128_DEPTH) {
442 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
443 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
444 R128_GMC_BRUSH_SOLID_COLOR |
445 (dev_priv->depth_fmt << 8) |
446 R128_GMC_SRC_DATATYPE_COLOR |
448 R128_GMC_CLR_CMP_CNTL_DIS |
449 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
451 OUT_RING(dev_priv->depth_pitch_offset_c);
452 OUT_RING(clear->clear_depth);
454 OUT_RING((x << 16) | y);
455 OUT_RING((w << 16) | h);
462 static void r128_cce_dispatch_swap(drm_device_t * dev)
464 drm_r128_private_t *dev_priv = dev->dev_private;
465 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
466 int nbox = sarea_priv->nbox;
467 drm_clip_rect_t *pbox = sarea_priv->boxes;
470 DRM_DEBUG("%s\n", __FUNCTION__);
472 #if R128_PERFORMANCE_BOXES
473 /* Do some trivial performance monitoring...
475 r128_cce_performance_boxes(dev_priv);
478 for (i = 0; i < nbox; i++) {
481 int w = pbox[i].x2 - x;
482 int h = pbox[i].y2 - y;
486 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
487 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
488 R128_GMC_DST_PITCH_OFFSET_CNTL |
489 R128_GMC_BRUSH_NONE |
490 (dev_priv->color_fmt << 8) |
491 R128_GMC_SRC_DATATYPE_COLOR |
493 R128_DP_SRC_SOURCE_MEMORY |
494 R128_GMC_CLR_CMP_CNTL_DIS |
495 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
497 /* Make this work even if front & back are flipped:
499 if (dev_priv->current_page == 0) {
500 OUT_RING(dev_priv->back_pitch_offset_c);
501 OUT_RING(dev_priv->front_pitch_offset_c);
503 OUT_RING(dev_priv->front_pitch_offset_c);
504 OUT_RING(dev_priv->back_pitch_offset_c);
507 OUT_RING((x << 16) | y);
508 OUT_RING((x << 16) | y);
509 OUT_RING((w << 16) | h);
514 /* Increment the frame counter. The client-side 3D driver must
515 * throttle the framerate by waiting for this value before
516 * performing the swapbuffer ioctl.
518 dev_priv->sarea_priv->last_frame++;
522 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
523 OUT_RING(dev_priv->sarea_priv->last_frame);
528 static void r128_cce_dispatch_flip(drm_device_t * dev)
530 drm_r128_private_t *dev_priv = dev->dev_private;
532 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
534 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
536 #if R128_PERFORMANCE_BOXES
537 /* Do some trivial performance monitoring...
539 r128_cce_performance_boxes(dev_priv);
544 R128_WAIT_UNTIL_PAGE_FLIPPED();
545 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
547 if (dev_priv->current_page == 0) {
548 OUT_RING(dev_priv->back_offset);
550 OUT_RING(dev_priv->front_offset);
555 /* Increment the frame counter. The client-side 3D driver must
556 * throttle the framerate by waiting for this value before
557 * performing the swapbuffer ioctl.
559 dev_priv->sarea_priv->last_frame++;
560 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
561 1 - dev_priv->current_page;
565 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
566 OUT_RING(dev_priv->sarea_priv->last_frame);
571 static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
573 drm_r128_private_t *dev_priv = dev->dev_private;
574 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
575 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
576 int format = sarea_priv->vc_format;
577 int offset = buf->bus_address;
578 int size = buf->used;
579 int prim = buf_priv->prim;
582 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
585 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
588 buf_priv->dispatched = 1;
590 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
591 r128_emit_state(dev_priv);
595 /* Emit the next set of up to three cliprects */
596 if (i < sarea_priv->nbox) {
597 r128_emit_clip_rects(dev_priv,
598 &sarea_priv->boxes[i],
599 sarea_priv->nbox - i);
602 /* Emit the vertex buffer rendering commands */
605 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
609 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
610 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
615 } while (i < sarea_priv->nbox);
618 if (buf_priv->discard) {
619 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
621 /* Emit the vertex buffer age */
624 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
625 OUT_RING(buf_priv->age);
631 /* FIXME: Check dispatched field */
632 buf_priv->dispatched = 0;
635 dev_priv->sarea_priv->last_dispatch++;
637 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
638 sarea_priv->nbox = 0;
641 static void r128_cce_dispatch_indirect(drm_device_t * dev,
642 drm_buf_t * buf, int start, int end)
644 drm_r128_private_t *dev_priv = dev->dev_private;
645 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
647 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
650 int offset = buf->bus_address + start;
651 int dwords = (end - start + 3) / sizeof(u32);
653 /* Indirect buffer data must be an even number of
654 * dwords, so if we've been given an odd number we must
655 * pad the data with a Type-2 CCE packet.
659 ((char *)dev->agp_buffer_map->handle
660 + buf->offset + start);
661 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
664 buf_priv->dispatched = 1;
666 /* Fire off the indirect buffer */
669 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
676 if (buf_priv->discard) {
677 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
679 /* Emit the indirect buffer age */
682 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
683 OUT_RING(buf_priv->age);
689 /* FIXME: Check dispatched field */
690 buf_priv->dispatched = 0;
693 dev_priv->sarea_priv->last_dispatch++;
696 static void r128_cce_dispatch_indices(drm_device_t * dev,
698 int start, int end, int count)
700 drm_r128_private_t *dev_priv = dev->dev_private;
701 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
702 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
703 int format = sarea_priv->vc_format;
704 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
705 int prim = buf_priv->prim;
710 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
713 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
716 buf_priv->dispatched = 1;
718 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
719 r128_emit_state(dev_priv);
722 dwords = (end - start + 3) / sizeof(u32);
724 data = (u32 *) ((char *)dev->agp_buffer_map->handle
725 + buf->offset + start);
727 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
730 data[1] = cpu_to_le32(offset);
731 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
732 data[3] = cpu_to_le32(format);
733 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
737 #ifdef __LITTLE_ENDIAN
738 data[dwords - 1] &= 0x0000ffff;
740 data[dwords - 1] &= 0xffff0000;
745 /* Emit the next set of up to three cliprects */
746 if (i < sarea_priv->nbox) {
747 r128_emit_clip_rects(dev_priv,
748 &sarea_priv->boxes[i],
749 sarea_priv->nbox - i);
752 r128_cce_dispatch_indirect(dev, buf, start, end);
755 } while (i < sarea_priv->nbox);
758 if (buf_priv->discard) {
759 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
761 /* Emit the vertex buffer age */
764 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
765 OUT_RING(buf_priv->age);
770 /* FIXME: Check dispatched field */
771 buf_priv->dispatched = 0;
774 dev_priv->sarea_priv->last_dispatch++;
776 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
777 sarea_priv->nbox = 0;
780 static int r128_cce_dispatch_blit(DRMFILE filp,
781 drm_device_t * dev, drm_r128_blit_t * blit)
783 drm_r128_private_t *dev_priv = dev->dev_private;
784 drm_device_dma_t *dma = dev->dma;
786 drm_r128_buf_priv_t *buf_priv;
788 int dword_shift, dwords;
792 /* The compiler won't optimize away a division by a variable,
793 * even if the only legal values are powers of two. Thus, we'll
794 * use a shift instead.
796 switch (blit->format) {
797 case R128_DATATYPE_ARGB8888:
800 case R128_DATATYPE_ARGB1555:
801 case R128_DATATYPE_RGB565:
802 case R128_DATATYPE_ARGB4444:
803 case R128_DATATYPE_YVYU422:
804 case R128_DATATYPE_VYUY422:
807 case R128_DATATYPE_CI8:
808 case R128_DATATYPE_RGB8:
812 DRM_ERROR("invalid blit format %d\n", blit->format);
813 return DRM_ERR(EINVAL);
816 /* Flush the pixel cache, and mark the contents as Read Invalid.
817 * This ensures no pixel data gets mixed up with the texture
818 * data from the host data blit, otherwise part of the texture
819 * image may be corrupted.
823 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
824 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
828 /* Dispatch the indirect buffer.
830 buf = dma->buflist[blit->idx];
831 buf_priv = buf->dev_private;
833 if (buf->filp != filp) {
834 DRM_ERROR("process %d using buffer owned by %p\n",
835 DRM_CURRENTPID, buf->filp);
836 return DRM_ERR(EINVAL);
839 DRM_ERROR("sending pending buffer %d\n", blit->idx);
840 return DRM_ERR(EINVAL);
843 buf_priv->discard = 1;
845 dwords = (blit->width * blit->height) >> dword_shift;
847 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
849 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
850 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
851 R128_GMC_BRUSH_NONE |
852 (blit->format << 8) |
853 R128_GMC_SRC_DATATYPE_COLOR |
855 R128_DP_SRC_SOURCE_HOST_DATA |
856 R128_GMC_CLR_CMP_CNTL_DIS |
857 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
859 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
860 data[3] = cpu_to_le32(0xffffffff);
861 data[4] = cpu_to_le32(0xffffffff);
862 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
863 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
864 data[7] = cpu_to_le32(dwords);
866 buf->used = (dwords + 8) * sizeof(u32);
868 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
870 /* Flush the pixel cache after the blit completes. This ensures
871 * the texture data is written out to memory before rendering
876 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
877 OUT_RING(R128_PC_FLUSH_GUI);
884 /* ================================================================
885 * Tiled depth buffer management
887 * FIXME: These should all set the destination write mask for when we
888 * have hardware stencil support.
891 static int r128_cce_dispatch_write_span(drm_device_t * dev,
892 drm_r128_depth_t * depth)
894 drm_r128_private_t *dev_priv = dev->dev_private;
898 int i, buffer_size, mask_size;
903 if (count > 4096 || count <= 0)
904 return DRM_ERR(EMSGSIZE);
906 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
907 return DRM_ERR(EFAULT);
909 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
910 return DRM_ERR(EFAULT);
913 buffer_size = depth->n * sizeof(u32);
914 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
916 return DRM_ERR(ENOMEM);
917 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
918 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
919 return DRM_ERR(EFAULT);
922 mask_size = depth->n * sizeof(u8);
924 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
926 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
927 return DRM_ERR(ENOMEM);
929 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
930 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
931 drm_free(mask, mask_size, DRM_MEM_BUFS);
932 return DRM_ERR(EFAULT);
935 for (i = 0; i < count; i++, x++) {
939 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
940 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
941 R128_GMC_BRUSH_SOLID_COLOR |
942 (dev_priv->depth_fmt << 8) |
943 R128_GMC_SRC_DATATYPE_COLOR |
945 R128_GMC_CLR_CMP_CNTL_DIS |
946 R128_GMC_WR_MSK_DIS);
948 OUT_RING(dev_priv->depth_pitch_offset_c);
951 OUT_RING((x << 16) | y);
952 OUT_RING((1 << 16) | 1);
958 drm_free(mask, mask_size, DRM_MEM_BUFS);
960 for (i = 0; i < count; i++, x++) {
963 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
964 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
965 R128_GMC_BRUSH_SOLID_COLOR |
966 (dev_priv->depth_fmt << 8) |
967 R128_GMC_SRC_DATATYPE_COLOR |
969 R128_GMC_CLR_CMP_CNTL_DIS |
970 R128_GMC_WR_MSK_DIS);
972 OUT_RING(dev_priv->depth_pitch_offset_c);
975 OUT_RING((x << 16) | y);
976 OUT_RING((1 << 16) | 1);
982 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
987 static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
988 drm_r128_depth_t * depth)
990 drm_r128_private_t *dev_priv = dev->dev_private;
994 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
999 if (count > 4096 || count <= 0)
1000 return DRM_ERR(EMSGSIZE);
1002 xbuf_size = count * sizeof(*x);
1003 ybuf_size = count * sizeof(*y);
1004 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1006 return DRM_ERR(ENOMEM);
1008 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1010 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1011 return DRM_ERR(ENOMEM);
1013 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1014 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1015 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1016 return DRM_ERR(EFAULT);
1018 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1019 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1020 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1021 return DRM_ERR(EFAULT);
1024 buffer_size = depth->n * sizeof(u32);
1025 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
1026 if (buffer == NULL) {
1027 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1028 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1029 return DRM_ERR(ENOMEM);
1031 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1032 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1033 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1034 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1035 return DRM_ERR(EFAULT);
1039 mask_size = depth->n * sizeof(u8);
1040 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
1042 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1043 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1044 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1045 return DRM_ERR(ENOMEM);
1047 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1048 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1049 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1050 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1051 drm_free(mask, mask_size, DRM_MEM_BUFS);
1052 return DRM_ERR(EFAULT);
1055 for (i = 0; i < count; i++) {
1059 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1060 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1061 R128_GMC_BRUSH_SOLID_COLOR |
1062 (dev_priv->depth_fmt << 8) |
1063 R128_GMC_SRC_DATATYPE_COLOR |
1065 R128_GMC_CLR_CMP_CNTL_DIS |
1066 R128_GMC_WR_MSK_DIS);
1068 OUT_RING(dev_priv->depth_pitch_offset_c);
1069 OUT_RING(buffer[i]);
1071 OUT_RING((x[i] << 16) | y[i]);
1072 OUT_RING((1 << 16) | 1);
1078 drm_free(mask, mask_size, DRM_MEM_BUFS);
1080 for (i = 0; i < count; i++) {
1083 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1084 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1085 R128_GMC_BRUSH_SOLID_COLOR |
1086 (dev_priv->depth_fmt << 8) |
1087 R128_GMC_SRC_DATATYPE_COLOR |
1089 R128_GMC_CLR_CMP_CNTL_DIS |
1090 R128_GMC_WR_MSK_DIS);
1092 OUT_RING(dev_priv->depth_pitch_offset_c);
1093 OUT_RING(buffer[i]);
1095 OUT_RING((x[i] << 16) | y[i]);
1096 OUT_RING((1 << 16) | 1);
1102 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1103 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1104 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1109 static int r128_cce_dispatch_read_span(drm_device_t * dev,
1110 drm_r128_depth_t * depth)
1112 drm_r128_private_t *dev_priv = dev->dev_private;
1118 if (count > 4096 || count <= 0)
1119 return DRM_ERR(EMSGSIZE);
1121 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
1122 return DRM_ERR(EFAULT);
1124 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1125 return DRM_ERR(EFAULT);
1130 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1131 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1132 R128_GMC_DST_PITCH_OFFSET_CNTL |
1133 R128_GMC_BRUSH_NONE |
1134 (dev_priv->depth_fmt << 8) |
1135 R128_GMC_SRC_DATATYPE_COLOR |
1137 R128_DP_SRC_SOURCE_MEMORY |
1138 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1140 OUT_RING(dev_priv->depth_pitch_offset_c);
1141 OUT_RING(dev_priv->span_pitch_offset_c);
1143 OUT_RING((x << 16) | y);
1144 OUT_RING((0 << 16) | 0);
1145 OUT_RING((count << 16) | 1);
1152 static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
1153 drm_r128_depth_t * depth)
1155 drm_r128_private_t *dev_priv = dev->dev_private;
1157 int i, xbuf_size, ybuf_size;
1159 DRM_DEBUG("%s\n", __FUNCTION__);
1162 if (count > 4096 || count <= 0)
1163 return DRM_ERR(EMSGSIZE);
1165 if (count > dev_priv->depth_pitch) {
1166 count = dev_priv->depth_pitch;
1169 xbuf_size = count * sizeof(*x);
1170 ybuf_size = count * sizeof(*y);
1171 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1173 return DRM_ERR(ENOMEM);
1175 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1177 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1178 return DRM_ERR(ENOMEM);
1180 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1181 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1182 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1183 return DRM_ERR(EFAULT);
1185 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1186 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1187 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1188 return DRM_ERR(EFAULT);
1191 for (i = 0; i < count; i++) {
1194 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1195 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1196 R128_GMC_DST_PITCH_OFFSET_CNTL |
1197 R128_GMC_BRUSH_NONE |
1198 (dev_priv->depth_fmt << 8) |
1199 R128_GMC_SRC_DATATYPE_COLOR |
1201 R128_DP_SRC_SOURCE_MEMORY |
1202 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1204 OUT_RING(dev_priv->depth_pitch_offset_c);
1205 OUT_RING(dev_priv->span_pitch_offset_c);
1207 OUT_RING((x[i] << 16) | y[i]);
1208 OUT_RING((i << 16) | 0);
1209 OUT_RING((1 << 16) | 1);
1214 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1215 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1220 /* ================================================================
1224 static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1226 drm_r128_private_t *dev_priv = dev->dev_private;
1229 DRM_DEBUG("%s\n", __FUNCTION__);
1233 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1234 for (i = 0; i < 32; i++) {
1235 OUT_RING(stipple[i]);
1241 /* ================================================================
1245 static int r128_cce_clear(DRM_IOCTL_ARGS)
1248 drm_r128_private_t *dev_priv = dev->dev_private;
1249 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1250 drm_r128_clear_t clear;
1253 LOCK_TEST_WITH_RETURN(dev, filp);
1255 DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
1258 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1260 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1261 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1263 r128_cce_dispatch_clear(dev, &clear);
1266 /* Make sure we restore the 3D state next time.
1268 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1273 static int r128_do_init_pageflip(drm_device_t * dev)
1275 drm_r128_private_t *dev_priv = dev->dev_private;
1278 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1279 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1281 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1282 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1283 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1285 dev_priv->page_flipping = 1;
1286 dev_priv->current_page = 0;
1287 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1292 static int r128_do_cleanup_pageflip(drm_device_t * dev)
1294 drm_r128_private_t *dev_priv = dev->dev_private;
1297 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1298 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1300 if (dev_priv->current_page != 0) {
1301 r128_cce_dispatch_flip(dev);
1305 dev_priv->page_flipping = 0;
1309 /* Swapping and flipping are different operations, need different ioctls.
1310 * They can & should be intermixed to support multiple 3d windows.
1313 static int r128_cce_flip(DRM_IOCTL_ARGS)
1316 drm_r128_private_t *dev_priv = dev->dev_private;
1317 DRM_DEBUG("%s\n", __FUNCTION__);
1319 LOCK_TEST_WITH_RETURN(dev, filp);
1321 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1323 if (!dev_priv->page_flipping)
1324 r128_do_init_pageflip(dev);
1326 r128_cce_dispatch_flip(dev);
1332 static int r128_cce_swap(DRM_IOCTL_ARGS)
1335 drm_r128_private_t *dev_priv = dev->dev_private;
1336 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1337 DRM_DEBUG("%s\n", __FUNCTION__);
1339 LOCK_TEST_WITH_RETURN(dev, filp);
1341 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1343 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1344 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1346 r128_cce_dispatch_swap(dev);
1347 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1354 static int r128_cce_vertex(DRM_IOCTL_ARGS)
1357 drm_r128_private_t *dev_priv = dev->dev_private;
1358 drm_device_dma_t *dma = dev->dma;
1360 drm_r128_buf_priv_t *buf_priv;
1361 drm_r128_vertex_t vertex;
1363 LOCK_TEST_WITH_RETURN(dev, filp);
1366 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1367 return DRM_ERR(EINVAL);
1370 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
1373 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1374 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
1376 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
1377 DRM_ERROR("buffer index %d (of %d max)\n",
1378 vertex.idx, dma->buf_count - 1);
1379 return DRM_ERR(EINVAL);
1381 if (vertex.prim < 0 ||
1382 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1383 DRM_ERROR("buffer prim %d\n", vertex.prim);
1384 return DRM_ERR(EINVAL);
1387 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1388 VB_AGE_TEST_WITH_RETURN(dev_priv);
1390 buf = dma->buflist[vertex.idx];
1391 buf_priv = buf->dev_private;
1393 if (buf->filp != filp) {
1394 DRM_ERROR("process %d using buffer owned by %p\n",
1395 DRM_CURRENTPID, buf->filp);
1396 return DRM_ERR(EINVAL);
1399 DRM_ERROR("sending pending buffer %d\n", vertex.idx);
1400 return DRM_ERR(EINVAL);
1403 buf->used = vertex.count;
1404 buf_priv->prim = vertex.prim;
1405 buf_priv->discard = vertex.discard;
1407 r128_cce_dispatch_vertex(dev, buf);
1413 static int r128_cce_indices(DRM_IOCTL_ARGS)
1416 drm_r128_private_t *dev_priv = dev->dev_private;
1417 drm_device_dma_t *dma = dev->dma;
1419 drm_r128_buf_priv_t *buf_priv;
1420 drm_r128_indices_t elts;
1423 LOCK_TEST_WITH_RETURN(dev, filp);
1426 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1427 return DRM_ERR(EINVAL);
1430 DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
1433 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1434 elts.idx, elts.start, elts.end, elts.discard);
1436 if (elts.idx < 0 || elts.idx >= dma->buf_count) {
1437 DRM_ERROR("buffer index %d (of %d max)\n",
1438 elts.idx, dma->buf_count - 1);
1439 return DRM_ERR(EINVAL);
1441 if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1442 DRM_ERROR("buffer prim %d\n", elts.prim);
1443 return DRM_ERR(EINVAL);
1446 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1447 VB_AGE_TEST_WITH_RETURN(dev_priv);
1449 buf = dma->buflist[elts.idx];
1450 buf_priv = buf->dev_private;
1452 if (buf->filp != filp) {
1453 DRM_ERROR("process %d using buffer owned by %p\n",
1454 DRM_CURRENTPID, buf->filp);
1455 return DRM_ERR(EINVAL);
1458 DRM_ERROR("sending pending buffer %d\n", elts.idx);
1459 return DRM_ERR(EINVAL);
1462 count = (elts.end - elts.start) / sizeof(u16);
1463 elts.start -= R128_INDEX_PRIM_OFFSET;
1465 if (elts.start & 0x7) {
1466 DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
1467 return DRM_ERR(EINVAL);
1469 if (elts.start < buf->used) {
1470 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
1471 return DRM_ERR(EINVAL);
1474 buf->used = elts.end;
1475 buf_priv->prim = elts.prim;
1476 buf_priv->discard = elts.discard;
1478 r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count);
1484 static int r128_cce_blit(DRM_IOCTL_ARGS)
1487 drm_device_dma_t *dma = dev->dma;
1488 drm_r128_private_t *dev_priv = dev->dev_private;
1489 drm_r128_blit_t blit;
1492 LOCK_TEST_WITH_RETURN(dev, filp);
1494 DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
1497 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx);
1499 if (blit.idx < 0 || blit.idx >= dma->buf_count) {
1500 DRM_ERROR("buffer index %d (of %d max)\n",
1501 blit.idx, dma->buf_count - 1);
1502 return DRM_ERR(EINVAL);
1505 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1506 VB_AGE_TEST_WITH_RETURN(dev_priv);
1508 ret = r128_cce_dispatch_blit(filp, dev, &blit);
1514 static int r128_cce_depth(DRM_IOCTL_ARGS)
1517 drm_r128_private_t *dev_priv = dev->dev_private;
1518 drm_r128_depth_t depth;
1521 LOCK_TEST_WITH_RETURN(dev, filp);
1523 DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
1526 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1528 ret = DRM_ERR(EINVAL);
1529 switch (depth.func) {
1530 case R128_WRITE_SPAN:
1531 ret = r128_cce_dispatch_write_span(dev, &depth);
1532 case R128_WRITE_PIXELS:
1533 ret = r128_cce_dispatch_write_pixels(dev, &depth);
1534 case R128_READ_SPAN:
1535 ret = r128_cce_dispatch_read_span(dev, &depth);
1536 case R128_READ_PIXELS:
1537 ret = r128_cce_dispatch_read_pixels(dev, &depth);
1544 static int r128_cce_stipple(DRM_IOCTL_ARGS)
1547 drm_r128_private_t *dev_priv = dev->dev_private;
1548 drm_r128_stipple_t stipple;
1551 LOCK_TEST_WITH_RETURN(dev, filp);
1553 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
1556 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
1557 return DRM_ERR(EFAULT);
1559 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1561 r128_cce_dispatch_stipple(dev, mask);
1567 static int r128_cce_indirect(DRM_IOCTL_ARGS)
1570 drm_r128_private_t *dev_priv = dev->dev_private;
1571 drm_device_dma_t *dma = dev->dma;
1573 drm_r128_buf_priv_t *buf_priv;
1574 drm_r128_indirect_t indirect;
1579 LOCK_TEST_WITH_RETURN(dev, filp);
1582 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1583 return DRM_ERR(EINVAL);
1586 DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
1589 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
1590 indirect.idx, indirect.start, indirect.end, indirect.discard);
1592 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
1593 DRM_ERROR("buffer index %d (of %d max)\n",
1594 indirect.idx, dma->buf_count - 1);
1595 return DRM_ERR(EINVAL);
1598 buf = dma->buflist[indirect.idx];
1599 buf_priv = buf->dev_private;
1601 if (buf->filp != filp) {
1602 DRM_ERROR("process %d using buffer owned by %p\n",
1603 DRM_CURRENTPID, buf->filp);
1604 return DRM_ERR(EINVAL);
1607 DRM_ERROR("sending pending buffer %d\n", indirect.idx);
1608 return DRM_ERR(EINVAL);
1611 if (indirect.start < buf->used) {
1612 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1613 indirect.start, buf->used);
1614 return DRM_ERR(EINVAL);
1617 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1618 VB_AGE_TEST_WITH_RETURN(dev_priv);
1620 buf->used = indirect.end;
1621 buf_priv->discard = indirect.discard;
1624 /* Wait for the 3D stream to idle before the indirect buffer
1625 * containing 2D acceleration commands is processed.
1628 RADEON_WAIT_UNTIL_3D_IDLE();
1632 /* Dispatch the indirect buffer full of commands from the
1633 * X server. This is insecure and is thus only available to
1634 * privileged clients.
1636 r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end);
1642 static int r128_getparam(DRM_IOCTL_ARGS)
1645 drm_r128_private_t *dev_priv = dev->dev_private;
1646 drm_r128_getparam_t param;
1650 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1651 return DRM_ERR(EINVAL);
1654 DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
1657 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1659 switch (param.param) {
1660 case R128_PARAM_IRQ_NR:
1664 return DRM_ERR(EINVAL);
1667 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
1668 DRM_ERROR("copy_to_user\n");
1669 return DRM_ERR(EFAULT);
1675 void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
1677 if (dev->dev_private) {
1678 drm_r128_private_t *dev_priv = dev->dev_private;
1679 if (dev_priv->page_flipping) {
1680 r128_do_cleanup_pageflip(dev);
1685 void r128_driver_pretakedown(drm_device_t * dev)
1687 r128_do_cleanup_cce(dev);
1690 drm_ioctl_desc_t r128_ioctls[] = {
1691 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, 1, 1},
1692 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, 1, 1},
1693 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, 1, 1},
1694 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, 1, 1},
1695 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, 1, 0},
1696 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, 1, 0},
1697 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, 1, 0},
1698 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, 1, 0},
1699 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, 1, 0},
1700 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, 1, 0},
1701 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, 1, 0},
1702 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, 1, 0},
1703 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, 1, 0},
1704 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, 1, 0},
1705 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, 1, 0},
1706 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, 1, 1},
1707 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, 1, 0},
1710 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);