2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <dev/drm2/drmP.h>
34 #include "radeon_asic.h"
36 #include "r600_reg_safe.h"
40 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
41 struct radeon_cs_reloc **cs_reloc);
42 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
43 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
44 #ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
45 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
49 struct r600_cs_track {
50 /* configuration we miror so that we use same code btw kms/ums */
58 u32 cb_color_base_last[8];
59 struct radeon_bo *cb_color_bo[8];
60 u64 cb_color_bo_mc[8];
61 u64 cb_color_bo_offset[8];
62 struct radeon_bo *cb_color_frag_bo[8];
63 u64 cb_color_frag_offset[8];
64 struct radeon_bo *cb_color_tile_bo[8];
65 u64 cb_color_tile_offset[8];
69 u32 cb_color_size_idx[8]; /* unused */
71 u32 cb_shader_mask; /* unused */
75 u32 vgt_strmout_buffer_en;
76 struct radeon_bo *vgt_strmout_bo[4];
77 u64 vgt_strmout_bo_mc[4]; /* unused */
78 u32 vgt_strmout_bo_offset[4];
79 u32 vgt_strmout_size[4];
82 u32 db_depth_size_idx;
86 struct radeon_bo *db_bo;
88 bool sx_misc_kill_all_prims;
92 struct radeon_bo *htile_bo;
97 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
98 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
99 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
100 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
101 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
102 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
103 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
104 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
108 unsigned blockheight;
110 unsigned valid_color;
111 enum radeon_family min_family;
114 static const struct gpu_formats color_formats_table[] = {
116 FMT_8_BIT(V_038004_COLOR_8, 1),
117 FMT_8_BIT(V_038004_COLOR_4_4, 1),
118 FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
119 FMT_8_BIT(V_038004_FMT_1, 0),
122 FMT_16_BIT(V_038004_COLOR_16, 1),
123 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
124 FMT_16_BIT(V_038004_COLOR_8_8, 1),
125 FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
126 FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
127 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
128 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
129 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
132 FMT_24_BIT(V_038004_FMT_8_8_8),
135 FMT_32_BIT(V_038004_COLOR_32, 1),
136 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
137 FMT_32_BIT(V_038004_COLOR_16_16, 1),
138 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
139 FMT_32_BIT(V_038004_COLOR_8_24, 1),
140 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
141 FMT_32_BIT(V_038004_COLOR_24_8, 1),
142 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
143 FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
144 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
145 FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
146 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
147 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
148 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
149 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
150 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
151 FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
152 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
155 FMT_48_BIT(V_038004_FMT_16_16_16),
156 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
159 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
160 FMT_64_BIT(V_038004_COLOR_32_32, 1),
161 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
162 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
163 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
165 FMT_96_BIT(V_038004_FMT_32_32_32),
166 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
169 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
170 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
172 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
173 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
175 /* block compressed formats */
176 [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
177 [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
178 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
179 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
180 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
181 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
182 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
184 /* The other Evergreen formats */
185 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
188 bool r600_fmt_is_valid_color(u32 format)
190 if (format >= ARRAY_SIZE(color_formats_table))
193 if (color_formats_table[format].valid_color)
199 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
201 if (format >= ARRAY_SIZE(color_formats_table))
204 if (family < color_formats_table[format].min_family)
207 if (color_formats_table[format].blockwidth > 0)
213 int r600_fmt_get_blocksize(u32 format)
215 if (format >= ARRAY_SIZE(color_formats_table))
218 return color_formats_table[format].blocksize;
221 int r600_fmt_get_nblocksx(u32 format, u32 w)
225 if (format >= ARRAY_SIZE(color_formats_table))
228 bw = color_formats_table[format].blockwidth;
232 return (w + bw - 1) / bw;
235 int r600_fmt_get_nblocksy(u32 format, u32 h)
239 if (format >= ARRAY_SIZE(color_formats_table))
242 bh = color_formats_table[format].blockheight;
246 return (h + bh - 1) / bh;
249 struct array_mode_checker {
258 /* returns alignment in pixels for pitch/height/depth and bytes for base */
259 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
267 u32 macro_tile_width = values->nbanks;
268 u32 macro_tile_height = values->npipes;
269 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
270 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
272 switch (values->array_mode) {
273 case ARRAY_LINEAR_GENERAL:
274 /* technically tile_width/_height for pitch/height */
275 *pitch_align = 1; /* tile_width */
276 *height_align = 1; /* tile_height */
280 case ARRAY_LINEAR_ALIGNED:
281 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
284 *base_align = values->group_size;
286 case ARRAY_1D_TILED_THIN1:
287 *pitch_align = max((u32)tile_width,
288 (u32)(values->group_size /
289 (tile_height * values->blocksize * values->nsamples)));
290 *height_align = tile_height;
292 *base_align = values->group_size;
294 case ARRAY_2D_TILED_THIN1:
295 *pitch_align = max((u32)macro_tile_width * tile_width,
296 (u32)((values->group_size * values->nbanks) /
297 (values->blocksize * values->nsamples * tile_width)));
298 *height_align = macro_tile_height * tile_height;
300 *base_align = max(macro_tile_bytes,
301 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
310 static void r600_cs_track_init(struct r600_cs_track *track)
314 /* assume DX9 mode */
315 track->sq_config = DX9_CONSTS;
316 for (i = 0; i < 8; i++) {
317 track->cb_color_base_last[i] = 0;
318 track->cb_color_size[i] = 0;
319 track->cb_color_size_idx[i] = 0;
320 track->cb_color_info[i] = 0;
321 track->cb_color_view[i] = 0xFFFFFFFF;
322 track->cb_color_bo[i] = NULL;
323 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
324 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
325 track->cb_color_frag_bo[i] = NULL;
326 track->cb_color_frag_offset[i] = 0xFFFFFFFF;
327 track->cb_color_tile_bo[i] = NULL;
328 track->cb_color_tile_offset[i] = 0xFFFFFFFF;
329 track->cb_color_mask[i] = 0xFFFFFFFF;
331 track->is_resolve = false;
332 track->nsamples = 16;
333 track->log_nsamples = 4;
334 track->cb_target_mask = 0xFFFFFFFF;
335 track->cb_shader_mask = 0xFFFFFFFF;
336 track->cb_dirty = true;
338 track->db_bo_mc = 0xFFFFFFFF;
339 /* assume the biggest format and that htile is enabled */
340 track->db_depth_info = 7 | (1 << 25);
341 track->db_depth_view = 0xFFFFC000;
342 track->db_depth_size = 0xFFFFFFFF;
343 track->db_depth_size_idx = 0;
344 track->db_depth_control = 0xFFFFFFFF;
345 track->db_dirty = true;
346 track->htile_bo = NULL;
347 track->htile_offset = 0xFFFFFFFF;
348 track->htile_surface = 0;
350 for (i = 0; i < 4; i++) {
351 track->vgt_strmout_size[i] = 0;
352 track->vgt_strmout_bo[i] = NULL;
353 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
354 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
356 track->streamout_dirty = true;
357 track->sx_misc_kill_all_prims = false;
360 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
362 struct r600_cs_track *track = p->track;
363 u32 slice_tile_max, size, tmp;
364 u32 height, height_align, pitch, pitch_align, depth_align;
365 u64 base_offset, base_align;
366 struct array_mode_checker array_check;
367 volatile u32 *ib = p->ib.ptr;
370 /* When resolve is used, the second colorbuffer has always 1 sample. */
371 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
373 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
374 format = G_0280A0_FORMAT(track->cb_color_info[i]);
375 if (!r600_fmt_is_valid_color(format)) {
376 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
377 __func__, __LINE__, format,
378 i, track->cb_color_info[i]);
381 /* pitch in pixels */
382 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
383 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
384 slice_tile_max *= 64;
385 height = slice_tile_max / pitch;
388 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
390 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
391 array_check.array_mode = array_mode;
392 array_check.group_size = track->group_size;
393 array_check.nbanks = track->nbanks;
394 array_check.npipes = track->npipes;
395 array_check.nsamples = nsamples;
396 array_check.blocksize = r600_fmt_get_blocksize(format);
397 if (r600_get_array_mode_alignment(&array_check,
398 &pitch_align, &height_align, &depth_align, &base_align)) {
399 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
400 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
401 track->cb_color_info[i]);
404 switch (array_mode) {
405 case V_0280A0_ARRAY_LINEAR_GENERAL:
407 case V_0280A0_ARRAY_LINEAR_ALIGNED:
409 case V_0280A0_ARRAY_1D_TILED_THIN1:
410 /* avoid breaking userspace */
414 case V_0280A0_ARRAY_2D_TILED_THIN1:
417 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
418 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
419 track->cb_color_info[i]);
423 if (!IS_ALIGNED(pitch, pitch_align)) {
424 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
425 __func__, __LINE__, pitch, pitch_align, array_mode);
428 if (!IS_ALIGNED(height, height_align)) {
429 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
430 __func__, __LINE__, height, height_align, array_mode);
433 if (!IS_ALIGNED(base_offset, base_align)) {
434 dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i,
435 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode);
440 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
441 r600_fmt_get_blocksize(format) * nsamples;
442 switch (array_mode) {
444 case V_0280A0_ARRAY_LINEAR_GENERAL:
445 case V_0280A0_ARRAY_LINEAR_ALIGNED:
446 tmp += track->cb_color_view[i] & 0xFF;
448 case V_0280A0_ARRAY_1D_TILED_THIN1:
449 case V_0280A0_ARRAY_2D_TILED_THIN1:
450 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
453 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
454 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
455 /* the initial DDX does bad things with the CB size occasionally */
456 /* it rounds up height too far for slice tile max but the BO is smaller */
457 /* r600c,g also seem to flush at bad times in some apps resulting in
458 * bogus values here. So for linear just allow anything to avoid breaking
462 dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n",
463 __func__, i, array_mode,
464 (uintmax_t)track->cb_color_bo_offset[i], tmp,
465 radeon_bo_size(track->cb_color_bo[i]),
466 pitch, height, r600_fmt_get_nblocksx(format, pitch),
467 r600_fmt_get_nblocksy(format, height),
468 r600_fmt_get_blocksize(format));
473 tmp = (height * pitch) >> 6;
474 if (tmp < slice_tile_max)
475 slice_tile_max = tmp;
476 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
477 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
478 ib[track->cb_color_size_idx[i]] = tmp;
481 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
482 case V_0280A0_TILE_DISABLE:
484 case V_0280A0_FRAG_ENABLE:
485 if (track->nsamples > 1) {
486 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
487 /* the tile size is 8x8, but the size is in units of bits.
488 * for bytes, do just * 8. */
489 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
491 if (bytes + track->cb_color_frag_offset[i] >
492 radeon_bo_size(track->cb_color_frag_bo[i])) {
493 dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
494 "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n",
495 __func__, tile_max, bytes,
496 (uintmax_t)track->cb_color_frag_offset[i],
497 radeon_bo_size(track->cb_color_frag_bo[i]));
502 case V_0280A0_CLEAR_ENABLE:
504 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
505 /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
506 * (128*128) / (8*8) / 2 = 128 bytes per block. */
507 uint32_t bytes = (block_max + 1) * 128;
509 if (bytes + track->cb_color_tile_offset[i] >
510 radeon_bo_size(track->cb_color_tile_bo[i])) {
511 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
512 "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n",
513 __func__, block_max, bytes,
514 (uintmax_t)track->cb_color_tile_offset[i],
515 radeon_bo_size(track->cb_color_tile_bo[i]));
521 dev_warn(p->dev, "%s invalid tile mode\n", __func__);
527 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
529 struct r600_cs_track *track = p->track;
530 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
531 u32 height_align, pitch_align, depth_align;
534 u64 base_offset, base_align;
535 struct array_mode_checker array_check;
537 volatile u32 *ib = p->ib.ptr;
540 if (track->db_bo == NULL) {
541 dev_warn(p->dev, "z/stencil with no depth buffer\n");
544 switch (G_028010_FORMAT(track->db_depth_info)) {
545 case V_028010_DEPTH_16:
548 case V_028010_DEPTH_X8_24:
549 case V_028010_DEPTH_8_24:
550 case V_028010_DEPTH_X8_24_FLOAT:
551 case V_028010_DEPTH_8_24_FLOAT:
552 case V_028010_DEPTH_32_FLOAT:
555 case V_028010_DEPTH_X24_8_32_FLOAT:
559 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
562 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
563 if (!track->db_depth_size_idx) {
564 dev_warn(p->dev, "z/stencil buffer size not set\n");
567 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
568 tmp = (tmp / bpe) >> 6;
570 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
571 track->db_depth_size, bpe, track->db_offset,
572 radeon_bo_size(track->db_bo));
575 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
577 size = radeon_bo_size(track->db_bo);
578 /* pitch in pixels */
579 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
580 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
581 slice_tile_max *= 64;
582 height = slice_tile_max / pitch;
585 base_offset = track->db_bo_mc + track->db_offset;
586 array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
587 array_check.array_mode = array_mode;
588 array_check.group_size = track->group_size;
589 array_check.nbanks = track->nbanks;
590 array_check.npipes = track->npipes;
591 array_check.nsamples = track->nsamples;
592 array_check.blocksize = bpe;
593 if (r600_get_array_mode_alignment(&array_check,
594 &pitch_align, &height_align, &depth_align, &base_align)) {
595 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
596 G_028010_ARRAY_MODE(track->db_depth_info),
597 track->db_depth_info);
600 switch (array_mode) {
601 case V_028010_ARRAY_1D_TILED_THIN1:
602 /* don't break userspace */
605 case V_028010_ARRAY_2D_TILED_THIN1:
608 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
609 G_028010_ARRAY_MODE(track->db_depth_info),
610 track->db_depth_info);
614 if (!IS_ALIGNED(pitch, pitch_align)) {
615 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
616 __func__, __LINE__, pitch, pitch_align, array_mode);
619 if (!IS_ALIGNED(height, height_align)) {
620 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
621 __func__, __LINE__, height, height_align, array_mode);
624 if (!IS_ALIGNED(base_offset, base_align)) {
625 dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__,
626 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode);
630 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
631 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
632 tmp = ntiles * bpe * 64 * nviews * track->nsamples;
633 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
634 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
636 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
637 radeon_bo_size(track->db_bo));
643 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
647 if (track->htile_bo == NULL) {
648 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
649 __func__, __LINE__, track->db_depth_info);
652 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
653 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
654 __func__, __LINE__, track->db_depth_size);
660 if (G_028D24_LINEAR(track->htile_surface)) {
661 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
662 nbx = roundup2(nbx, 16 * 8);
663 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
664 nby = roundup(nby, track->npipes * 8);
666 /* always assume 8x8 htile */
667 /* align is htile align * 8, htile align vary according to
668 * number of pipe and tile width and nby
670 switch (track->npipes) {
672 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
673 nbx = roundup2(nbx, 64 * 8);
674 nby = roundup2(nby, 64 * 8);
677 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
678 nbx = roundup2(nbx, 64 * 8);
679 nby = roundup2(nby, 32 * 8);
682 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
683 nbx = roundup2(nbx, 32 * 8);
684 nby = roundup2(nby, 32 * 8);
687 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
688 nbx = roundup2(nbx, 32 * 8);
689 nby = roundup2(nby, 16 * 8);
692 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
693 __func__, __LINE__, track->npipes);
697 /* compute number of htile */
700 /* size must be aligned on npipes * 2K boundary */
701 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
702 size += track->htile_offset;
704 if (size > radeon_bo_size(track->htile_bo)) {
705 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
706 __func__, __LINE__, radeon_bo_size(track->htile_bo),
712 track->db_dirty = false;
716 static int r600_cs_track_check(struct radeon_cs_parser *p)
718 struct r600_cs_track *track = p->track;
722 /* on legacy kernel we don't perform advanced check */
726 /* check streamout */
727 if (track->streamout_dirty && track->vgt_strmout_en) {
728 for (i = 0; i < 4; i++) {
729 if (track->vgt_strmout_buffer_en & (1 << i)) {
730 if (track->vgt_strmout_bo[i]) {
731 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
732 (u64)track->vgt_strmout_size[i];
733 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
734 DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n",
735 i, (uintmax_t)offset,
736 radeon_bo_size(track->vgt_strmout_bo[i]));
740 dev_warn(p->dev, "No buffer for streamout %d\n", i);
745 track->streamout_dirty = false;
748 if (track->sx_misc_kill_all_prims)
751 /* check that we have a cb for each enabled target, we don't check
752 * shader_mask because it seems mesa isn't always setting it :(
754 if (track->cb_dirty) {
755 tmp = track->cb_target_mask;
757 /* We must check both colorbuffers for RESOLVE. */
758 if (track->is_resolve) {
762 for (i = 0; i < 8; i++) {
763 if ((tmp >> (i * 4)) & 0xF) {
764 /* at least one component is enabled */
765 if (track->cb_color_bo[i] == NULL) {
766 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
767 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
770 /* perform rewrite of CB_COLOR[0-7]_SIZE */
771 r = r600_cs_track_validate_cb(p, i);
776 track->cb_dirty = false;
779 /* Check depth buffer */
780 if (track->db_dirty &&
781 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
782 (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
783 G_028800_Z_ENABLE(track->db_depth_control))) {
784 r = r600_cs_track_validate_db(p);
793 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
794 * @parser: parser structure holding parsing context.
795 * @pkt: where to store packet informations
797 * Assume that chunk_ib_index is properly set. Will return -EINVAL
798 * if packet is bigger than remaining ib size. or if packets is unknown.
800 static int r600_cs_packet_parse(struct radeon_cs_parser *p,
801 struct radeon_cs_packet *pkt,
804 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
807 if (idx >= ib_chunk->length_dw) {
808 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
809 idx, ib_chunk->length_dw);
812 header = radeon_get_ib_value(p, idx);
814 pkt->type = CP_PACKET_GET_TYPE(header);
815 pkt->count = CP_PACKET_GET_COUNT(header);
819 pkt->reg = CP_PACKET0_GET_REG(header);
822 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
828 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
831 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
832 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
833 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
840 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
841 * @parser: parser structure holding parsing context.
842 * @data: pointer to relocation data
843 * @offset_start: starting offset
844 * @offset_mask: offset mask (to align start offset on)
845 * @reloc: reloc informations
847 * Check next packet is relocation packet3, do bo validation and compute
848 * GPU offset using the provided start.
850 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
851 struct radeon_cs_reloc **cs_reloc)
853 struct radeon_cs_chunk *relocs_chunk;
854 struct radeon_cs_packet p3reloc;
858 if (p->chunk_relocs_idx == -1) {
859 DRM_ERROR("No relocation chunk !\n");
863 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
864 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
868 p->idx += p3reloc.count + 2;
869 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
870 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
874 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
875 if (idx >= relocs_chunk->length_dw) {
876 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
877 idx, relocs_chunk->length_dw);
880 /* FIXME: we assume reloc size is 4 dwords */
881 *cs_reloc = p->relocs_ptr[(idx / 4)];
886 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
887 * @parser: parser structure holding parsing context.
889 * Check next packet is relocation packet3, do bo validation and compute
890 * GPU offset using the provided start.
892 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
894 struct radeon_cs_packet p3reloc;
897 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
901 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
908 * r600_cs_packet_next_vline() - parse userspace VLINE packet
909 * @parser: parser structure holding parsing context.
911 * Userspace sends a special sequence for VLINE waits.
912 * PACKET0 - VLINE_START_END + value
913 * PACKET3 - WAIT_REG_MEM poll vline status reg
914 * RELOC (P3) - crtc_id in reloc.
916 * This function parses this and relocates the VLINE START END
917 * and WAIT_REG_MEM packets to the correct crtc.
918 * It also detects a switched off crtc and nulls out the
921 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
923 struct drm_mode_object *obj;
924 struct drm_crtc *crtc;
925 struct radeon_crtc *radeon_crtc;
926 struct radeon_cs_packet p3reloc, wait_reg_mem;
929 uint32_t header, h_idx, reg, wait_reg_mem_info;
930 volatile uint32_t *ib;
934 /* parse the WAIT_REG_MEM */
935 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
939 /* check its a WAIT_REG_MEM */
940 if (wait_reg_mem.type != PACKET_TYPE3 ||
941 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
942 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
946 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
947 /* bit 4 is reg (0) or mem (1) */
948 if (wait_reg_mem_info & 0x10) {
949 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
952 /* waiting for value to be equal */
953 if ((wait_reg_mem_info & 0x7) != 0x3) {
954 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
957 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
958 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
962 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
963 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
967 /* jump over the NOP */
968 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
973 p->idx += wait_reg_mem.count + 2;
974 p->idx += p3reloc.count + 2;
976 header = radeon_get_ib_value(p, h_idx);
977 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
978 reg = CP_PACKET0_GET_REG(header);
980 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
982 DRM_ERROR("cannot find crtc %d\n", crtc_id);
985 crtc = obj_to_crtc(obj);
986 radeon_crtc = to_radeon_crtc(crtc);
987 crtc_id = radeon_crtc->crtc_id;
989 if (!crtc->enabled) {
990 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
991 ib[h_idx + 2] = PACKET2(0);
992 ib[h_idx + 3] = PACKET2(0);
993 ib[h_idx + 4] = PACKET2(0);
994 ib[h_idx + 5] = PACKET2(0);
995 ib[h_idx + 6] = PACKET2(0);
996 ib[h_idx + 7] = PACKET2(0);
997 ib[h_idx + 8] = PACKET2(0);
998 } else if (crtc_id == 1) {
1000 case AVIVO_D1MODE_VLINE_START_END:
1001 header &= ~R600_CP_PACKET0_REG_MASK;
1002 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1005 DRM_ERROR("unknown crtc reloc\n");
1009 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
1015 static int r600_packet0_check(struct radeon_cs_parser *p,
1016 struct radeon_cs_packet *pkt,
1017 unsigned idx, unsigned reg)
1022 case AVIVO_D1MODE_VLINE_START_END:
1023 r = r600_cs_packet_parse_vline(p);
1025 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1031 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
1038 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
1039 struct radeon_cs_packet *pkt)
1047 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1048 r = r600_packet0_check(p, pkt, idx, reg);
1057 * r600_cs_check_reg() - check if register is authorized or not
1058 * @parser: parser structure holding parsing context
1059 * @reg: register we are testing
1060 * @idx: index into the cs buffer
1062 * This function will test against r600_reg_safe_bm and return 0
1063 * if register is safe. If register is not flag as safe this function
1064 * will test it against a list of register needind special handling.
1066 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1068 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
1069 struct radeon_cs_reloc *reloc;
1074 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1075 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1078 m = 1 << ((reg >> 2) & 31);
1079 if (!(r600_reg_safe_bm[i] & m))
1083 /* force following reg to 0 in an attempt to disable out buffer
1084 * which will need us to better understand how it works to perform
1085 * security check on it (Jerome)
1087 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
1088 case R_008C44_SQ_ESGS_RING_SIZE:
1089 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
1090 case R_008C54_SQ_ESTMP_RING_SIZE:
1091 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
1092 case R_008C74_SQ_FBUF_RING_SIZE:
1093 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
1094 case R_008C5C_SQ_GSTMP_RING_SIZE:
1095 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
1096 case R_008C4C_SQ_GSVS_RING_SIZE:
1097 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1098 case R_008C6C_SQ_PSTMP_RING_SIZE:
1099 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1100 case R_008C7C_SQ_REDUC_RING_SIZE:
1101 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1102 case R_008C64_SQ_VSTMP_RING_SIZE:
1103 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1104 /* get value to populate the IB don't remove */
1105 tmp =radeon_get_ib_value(p, idx);
1109 track->sq_config = radeon_get_ib_value(p, idx);
1111 case R_028800_DB_DEPTH_CONTROL:
1112 track->db_depth_control = radeon_get_ib_value(p, idx);
1113 track->db_dirty = true;
1115 case R_028010_DB_DEPTH_INFO:
1116 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1117 r600_cs_packet_next_is_pkt3_nop(p)) {
1118 r = r600_cs_packet_next_reloc(p, &reloc);
1120 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1124 track->db_depth_info = radeon_get_ib_value(p, idx);
1125 ib[idx] &= C_028010_ARRAY_MODE;
1126 track->db_depth_info &= C_028010_ARRAY_MODE;
1127 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1128 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1129 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1131 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1132 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1135 track->db_depth_info = radeon_get_ib_value(p, idx);
1137 track->db_dirty = true;
1139 case R_028004_DB_DEPTH_VIEW:
1140 track->db_depth_view = radeon_get_ib_value(p, idx);
1141 track->db_dirty = true;
1143 case R_028000_DB_DEPTH_SIZE:
1144 track->db_depth_size = radeon_get_ib_value(p, idx);
1145 track->db_depth_size_idx = idx;
1146 track->db_dirty = true;
1148 case R_028AB0_VGT_STRMOUT_EN:
1149 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1150 track->streamout_dirty = true;
1152 case R_028B20_VGT_STRMOUT_BUFFER_EN:
1153 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1154 track->streamout_dirty = true;
1156 case VGT_STRMOUT_BUFFER_BASE_0:
1157 case VGT_STRMOUT_BUFFER_BASE_1:
1158 case VGT_STRMOUT_BUFFER_BASE_2:
1159 case VGT_STRMOUT_BUFFER_BASE_3:
1160 r = r600_cs_packet_next_reloc(p, &reloc);
1162 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1166 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1167 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1168 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1169 track->vgt_strmout_bo[tmp] = reloc->robj;
1170 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1171 track->streamout_dirty = true;
1173 case VGT_STRMOUT_BUFFER_SIZE_0:
1174 case VGT_STRMOUT_BUFFER_SIZE_1:
1175 case VGT_STRMOUT_BUFFER_SIZE_2:
1176 case VGT_STRMOUT_BUFFER_SIZE_3:
1177 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1178 /* size in register is DWs, convert to bytes */
1179 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1180 track->streamout_dirty = true;
1183 r = r600_cs_packet_next_reloc(p, &reloc);
1185 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1189 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1191 case R_028238_CB_TARGET_MASK:
1192 track->cb_target_mask = radeon_get_ib_value(p, idx);
1193 track->cb_dirty = true;
1195 case R_02823C_CB_SHADER_MASK:
1196 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1198 case R_028C04_PA_SC_AA_CONFIG:
1199 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1200 track->log_nsamples = tmp;
1201 track->nsamples = 1 << tmp;
1202 track->cb_dirty = true;
1204 case R_028808_CB_COLOR_CONTROL:
1205 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1206 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1207 track->cb_dirty = true;
1209 case R_0280A0_CB_COLOR0_INFO:
1210 case R_0280A4_CB_COLOR1_INFO:
1211 case R_0280A8_CB_COLOR2_INFO:
1212 case R_0280AC_CB_COLOR3_INFO:
1213 case R_0280B0_CB_COLOR4_INFO:
1214 case R_0280B4_CB_COLOR5_INFO:
1215 case R_0280B8_CB_COLOR6_INFO:
1216 case R_0280BC_CB_COLOR7_INFO:
1217 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1218 r600_cs_packet_next_is_pkt3_nop(p)) {
1219 r = r600_cs_packet_next_reloc(p, &reloc);
1221 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1224 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1225 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1226 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1227 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1228 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1229 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1230 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1231 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1234 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1235 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1237 track->cb_dirty = true;
1239 case R_028080_CB_COLOR0_VIEW:
1240 case R_028084_CB_COLOR1_VIEW:
1241 case R_028088_CB_COLOR2_VIEW:
1242 case R_02808C_CB_COLOR3_VIEW:
1243 case R_028090_CB_COLOR4_VIEW:
1244 case R_028094_CB_COLOR5_VIEW:
1245 case R_028098_CB_COLOR6_VIEW:
1246 case R_02809C_CB_COLOR7_VIEW:
1247 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1248 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1249 track->cb_dirty = true;
1251 case R_028060_CB_COLOR0_SIZE:
1252 case R_028064_CB_COLOR1_SIZE:
1253 case R_028068_CB_COLOR2_SIZE:
1254 case R_02806C_CB_COLOR3_SIZE:
1255 case R_028070_CB_COLOR4_SIZE:
1256 case R_028074_CB_COLOR5_SIZE:
1257 case R_028078_CB_COLOR6_SIZE:
1258 case R_02807C_CB_COLOR7_SIZE:
1259 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1260 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1261 track->cb_color_size_idx[tmp] = idx;
1262 track->cb_dirty = true;
1264 /* This register were added late, there is userspace
1265 * which does provide relocation for those but set
1266 * 0 offset. In order to avoid breaking old userspace
1267 * we detect this and set address to point to last
1268 * CB_COLOR0_BASE, note that if userspace doesn't set
1269 * CB_COLOR0_BASE before this register we will report
1270 * error. Old userspace always set CB_COLOR0_BASE
1271 * before any of this.
1273 case R_0280E0_CB_COLOR0_FRAG:
1274 case R_0280E4_CB_COLOR1_FRAG:
1275 case R_0280E8_CB_COLOR2_FRAG:
1276 case R_0280EC_CB_COLOR3_FRAG:
1277 case R_0280F0_CB_COLOR4_FRAG:
1278 case R_0280F4_CB_COLOR5_FRAG:
1279 case R_0280F8_CB_COLOR6_FRAG:
1280 case R_0280FC_CB_COLOR7_FRAG:
1281 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1282 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1283 if (!track->cb_color_base_last[tmp]) {
1284 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1287 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1288 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1289 ib[idx] = track->cb_color_base_last[tmp];
1291 r = r600_cs_packet_next_reloc(p, &reloc);
1293 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1296 track->cb_color_frag_bo[tmp] = reloc->robj;
1297 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1298 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1300 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1301 track->cb_dirty = true;
1304 case R_0280C0_CB_COLOR0_TILE:
1305 case R_0280C4_CB_COLOR1_TILE:
1306 case R_0280C8_CB_COLOR2_TILE:
1307 case R_0280CC_CB_COLOR3_TILE:
1308 case R_0280D0_CB_COLOR4_TILE:
1309 case R_0280D4_CB_COLOR5_TILE:
1310 case R_0280D8_CB_COLOR6_TILE:
1311 case R_0280DC_CB_COLOR7_TILE:
1312 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1313 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1314 if (!track->cb_color_base_last[tmp]) {
1315 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1318 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1319 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1320 ib[idx] = track->cb_color_base_last[tmp];
1322 r = r600_cs_packet_next_reloc(p, &reloc);
1324 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1327 track->cb_color_tile_bo[tmp] = reloc->robj;
1328 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1329 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1331 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1332 track->cb_dirty = true;
1335 case R_028100_CB_COLOR0_MASK:
1336 case R_028104_CB_COLOR1_MASK:
1337 case R_028108_CB_COLOR2_MASK:
1338 case R_02810C_CB_COLOR3_MASK:
1339 case R_028110_CB_COLOR4_MASK:
1340 case R_028114_CB_COLOR5_MASK:
1341 case R_028118_CB_COLOR6_MASK:
1342 case R_02811C_CB_COLOR7_MASK:
1343 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1344 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1345 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1346 track->cb_dirty = true;
1349 case CB_COLOR0_BASE:
1350 case CB_COLOR1_BASE:
1351 case CB_COLOR2_BASE:
1352 case CB_COLOR3_BASE:
1353 case CB_COLOR4_BASE:
1354 case CB_COLOR5_BASE:
1355 case CB_COLOR6_BASE:
1356 case CB_COLOR7_BASE:
1357 r = r600_cs_packet_next_reloc(p, &reloc);
1359 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1363 tmp = (reg - CB_COLOR0_BASE) / 4;
1364 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1365 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1366 track->cb_color_base_last[tmp] = ib[idx];
1367 track->cb_color_bo[tmp] = reloc->robj;
1368 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1369 track->cb_dirty = true;
1372 r = r600_cs_packet_next_reloc(p, &reloc);
1374 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1378 track->db_offset = radeon_get_ib_value(p, idx) << 8;
1379 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1380 track->db_bo = reloc->robj;
1381 track->db_bo_mc = reloc->lobj.gpu_offset;
1382 track->db_dirty = true;
1384 case DB_HTILE_DATA_BASE:
1385 r = r600_cs_packet_next_reloc(p, &reloc);
1387 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1391 track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1392 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1393 track->htile_bo = reloc->robj;
1394 track->db_dirty = true;
1396 case DB_HTILE_SURFACE:
1397 track->htile_surface = radeon_get_ib_value(p, idx);
1398 /* force 8x8 htile width and height */
1400 track->db_dirty = true;
1402 case SQ_PGM_START_FS:
1403 case SQ_PGM_START_ES:
1404 case SQ_PGM_START_VS:
1405 case SQ_PGM_START_GS:
1406 case SQ_PGM_START_PS:
1407 case SQ_ALU_CONST_CACHE_GS_0:
1408 case SQ_ALU_CONST_CACHE_GS_1:
1409 case SQ_ALU_CONST_CACHE_GS_2:
1410 case SQ_ALU_CONST_CACHE_GS_3:
1411 case SQ_ALU_CONST_CACHE_GS_4:
1412 case SQ_ALU_CONST_CACHE_GS_5:
1413 case SQ_ALU_CONST_CACHE_GS_6:
1414 case SQ_ALU_CONST_CACHE_GS_7:
1415 case SQ_ALU_CONST_CACHE_GS_8:
1416 case SQ_ALU_CONST_CACHE_GS_9:
1417 case SQ_ALU_CONST_CACHE_GS_10:
1418 case SQ_ALU_CONST_CACHE_GS_11:
1419 case SQ_ALU_CONST_CACHE_GS_12:
1420 case SQ_ALU_CONST_CACHE_GS_13:
1421 case SQ_ALU_CONST_CACHE_GS_14:
1422 case SQ_ALU_CONST_CACHE_GS_15:
1423 case SQ_ALU_CONST_CACHE_PS_0:
1424 case SQ_ALU_CONST_CACHE_PS_1:
1425 case SQ_ALU_CONST_CACHE_PS_2:
1426 case SQ_ALU_CONST_CACHE_PS_3:
1427 case SQ_ALU_CONST_CACHE_PS_4:
1428 case SQ_ALU_CONST_CACHE_PS_5:
1429 case SQ_ALU_CONST_CACHE_PS_6:
1430 case SQ_ALU_CONST_CACHE_PS_7:
1431 case SQ_ALU_CONST_CACHE_PS_8:
1432 case SQ_ALU_CONST_CACHE_PS_9:
1433 case SQ_ALU_CONST_CACHE_PS_10:
1434 case SQ_ALU_CONST_CACHE_PS_11:
1435 case SQ_ALU_CONST_CACHE_PS_12:
1436 case SQ_ALU_CONST_CACHE_PS_13:
1437 case SQ_ALU_CONST_CACHE_PS_14:
1438 case SQ_ALU_CONST_CACHE_PS_15:
1439 case SQ_ALU_CONST_CACHE_VS_0:
1440 case SQ_ALU_CONST_CACHE_VS_1:
1441 case SQ_ALU_CONST_CACHE_VS_2:
1442 case SQ_ALU_CONST_CACHE_VS_3:
1443 case SQ_ALU_CONST_CACHE_VS_4:
1444 case SQ_ALU_CONST_CACHE_VS_5:
1445 case SQ_ALU_CONST_CACHE_VS_6:
1446 case SQ_ALU_CONST_CACHE_VS_7:
1447 case SQ_ALU_CONST_CACHE_VS_8:
1448 case SQ_ALU_CONST_CACHE_VS_9:
1449 case SQ_ALU_CONST_CACHE_VS_10:
1450 case SQ_ALU_CONST_CACHE_VS_11:
1451 case SQ_ALU_CONST_CACHE_VS_12:
1452 case SQ_ALU_CONST_CACHE_VS_13:
1453 case SQ_ALU_CONST_CACHE_VS_14:
1454 case SQ_ALU_CONST_CACHE_VS_15:
1455 r = r600_cs_packet_next_reloc(p, &reloc);
1457 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1461 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1463 case SX_MEMORY_EXPORT_BASE:
1464 r = r600_cs_packet_next_reloc(p, &reloc);
1466 dev_warn(p->dev, "bad SET_CONFIG_REG "
1470 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1473 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1476 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1482 unsigned r600_mip_minify(unsigned size, unsigned level)
1486 val = max(1U, size >> level);
1488 val = roundup_pow_of_two(val);
1492 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1493 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1494 unsigned block_align, unsigned height_align, unsigned base_align,
1495 unsigned *l0_size, unsigned *mipmap_size)
1497 unsigned offset, i, level;
1498 unsigned width, height, depth, size;
1501 unsigned nlevels = llevel - blevel + 1;
1504 blocksize = r600_fmt_get_blocksize(format);
1506 w0 = r600_mip_minify(w0, 0);
1507 h0 = r600_mip_minify(h0, 0);
1508 d0 = r600_mip_minify(d0, 0);
1509 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1510 width = r600_mip_minify(w0, i);
1511 nbx = r600_fmt_get_nblocksx(format, width);
1513 nbx = roundup(nbx, block_align);
1515 height = r600_mip_minify(h0, i);
1516 nby = r600_fmt_get_nblocksy(format, height);
1517 nby = roundup(nby, height_align);
1519 depth = r600_mip_minify(d0, i);
1521 size = nbx * nby * blocksize * nsamples;
1530 if (i == 0 || i == 1)
1531 offset = roundup(offset, base_align);
1535 *mipmap_size = offset;
1537 *mipmap_size = *l0_size;
1539 *mipmap_size -= *l0_size;
1543 * r600_check_texture_resource() - check if register is authorized or not
1544 * @p: parser structure holding parsing context
1545 * @idx: index into the cs buffer
1546 * @texture: texture's bo structure
1547 * @mipmap: mipmap's bo structure
1549 * This function will check that the resource has valid field and that
1550 * the texture and mipmap bo object are big enough to cover this resource.
1552 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1553 struct radeon_bo *texture,
1554 struct radeon_bo *mipmap,
1559 struct r600_cs_track *track = p->track;
1560 u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1561 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1562 u32 height_align, pitch, pitch_align, depth_align;
1565 struct array_mode_checker array_check;
1569 /* on legacy kernel we don't perform advanced check */
1570 if (p->rdev == NULL)
1573 /* convert to bytes */
1577 word0 = radeon_get_ib_value(p, idx + 0);
1578 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1579 if (tiling_flags & RADEON_TILING_MACRO)
1580 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1581 else if (tiling_flags & RADEON_TILING_MICRO)
1582 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1584 word1 = radeon_get_ib_value(p, idx + 1);
1585 word2 = radeon_get_ib_value(p, idx + 2) << 8;
1586 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1587 word4 = radeon_get_ib_value(p, idx + 4);
1588 word5 = radeon_get_ib_value(p, idx + 5);
1589 dim = G_038000_DIM(word0);
1590 w0 = G_038000_TEX_WIDTH(word0) + 1;
1591 pitch = (G_038000_PITCH(word0) + 1) * 8;
1592 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1593 d0 = G_038004_TEX_DEPTH(word1);
1594 format = G_038004_DATA_FORMAT(word1);
1595 blevel = G_038010_BASE_LEVEL(word4);
1596 llevel = G_038014_LAST_LEVEL(word5);
1597 /* pitch in texels */
1598 array_check.array_mode = G_038000_TILE_MODE(word0);
1599 array_check.group_size = track->group_size;
1600 array_check.nbanks = track->nbanks;
1601 array_check.npipes = track->npipes;
1602 array_check.nsamples = 1;
1603 array_check.blocksize = r600_fmt_get_blocksize(format);
1607 case V_038000_SQ_TEX_DIM_1D:
1608 case V_038000_SQ_TEX_DIM_2D:
1609 case V_038000_SQ_TEX_DIM_3D:
1611 case V_038000_SQ_TEX_DIM_CUBEMAP:
1612 if (p->family >= CHIP_RV770)
1617 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1618 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1621 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1624 case V_038000_SQ_TEX_DIM_2D_MSAA:
1625 array_check.nsamples = 1 << llevel;
1629 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1632 if (!r600_fmt_is_valid_texture(format, p->family)) {
1633 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1634 __func__, __LINE__, format);
1638 if (r600_get_array_mode_alignment(&array_check,
1639 &pitch_align, &height_align, &depth_align, &base_align)) {
1640 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1641 __func__, __LINE__, G_038000_TILE_MODE(word0));
1645 /* XXX check height as well... */
1647 if (!IS_ALIGNED(pitch, pitch_align)) {
1648 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1649 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1652 if (!IS_ALIGNED(base_offset, base_align)) {
1653 dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n",
1654 __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0));
1657 if (!IS_ALIGNED(mip_offset, base_align)) {
1658 dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n",
1659 __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0));
1663 if (blevel > llevel) {
1664 dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1668 barray = G_038014_BASE_ARRAY(word5);
1669 larray = G_038014_LAST_ARRAY(word5);
1671 nfaces = larray - barray + 1;
1673 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1674 pitch_align, height_align, base_align,
1675 &l0_size, &mipmap_size);
1676 /* using get ib will give us the offset into the texture bo */
1677 if ((l0_size + word2) > radeon_bo_size(texture)) {
1678 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1679 w0, h0, pitch_align, height_align,
1680 array_check.array_mode, format, word2,
1681 l0_size, radeon_bo_size(texture));
1682 dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align);
1685 /* using get ib will give us the offset into the mipmap bo */
1686 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1687 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1688 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1693 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1698 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1699 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1702 m = 1 << ((reg >> 2) & 31);
1703 if (!(r600_reg_safe_bm[i] & m))
1705 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1709 static int r600_packet3_check(struct radeon_cs_parser *p,
1710 struct radeon_cs_packet *pkt)
1712 struct radeon_cs_reloc *reloc;
1713 struct r600_cs_track *track;
1717 unsigned start_reg, end_reg, reg;
1721 track = (struct r600_cs_track *)p->track;
1724 idx_value = radeon_get_ib_value(p, idx);
1726 switch (pkt->opcode) {
1727 case PACKET3_SET_PREDICATION:
1733 if (pkt->count != 1) {
1734 DRM_ERROR("bad SET PREDICATION\n");
1738 tmp = radeon_get_ib_value(p, idx + 1);
1739 pred_op = (tmp >> 16) & 0x7;
1741 /* for the clear predicate operation */
1746 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1750 r = r600_cs_packet_next_reloc(p, &reloc);
1752 DRM_ERROR("bad SET PREDICATION\n");
1756 offset = reloc->lobj.gpu_offset +
1757 (idx_value & 0xfffffff0) +
1758 ((u64)(tmp & 0xff) << 32);
1760 ib[idx + 0] = offset;
1761 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1765 case PACKET3_START_3D_CMDBUF:
1766 if (p->family >= CHIP_RV770 || pkt->count) {
1767 DRM_ERROR("bad START_3D\n");
1771 case PACKET3_CONTEXT_CONTROL:
1772 if (pkt->count != 1) {
1773 DRM_ERROR("bad CONTEXT_CONTROL\n");
1777 case PACKET3_INDEX_TYPE:
1778 case PACKET3_NUM_INSTANCES:
1780 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1784 case PACKET3_DRAW_INDEX:
1787 if (pkt->count != 3) {
1788 DRM_ERROR("bad DRAW_INDEX\n");
1791 r = r600_cs_packet_next_reloc(p, &reloc);
1793 DRM_ERROR("bad DRAW_INDEX\n");
1797 offset = reloc->lobj.gpu_offset +
1799 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1802 ib[idx+1] = upper_32_bits(offset) & 0xff;
1804 r = r600_cs_track_check(p);
1806 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1811 case PACKET3_DRAW_INDEX_AUTO:
1812 if (pkt->count != 1) {
1813 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1816 r = r600_cs_track_check(p);
1818 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1822 case PACKET3_DRAW_INDEX_IMMD_BE:
1823 case PACKET3_DRAW_INDEX_IMMD:
1824 if (pkt->count < 2) {
1825 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1828 r = r600_cs_track_check(p);
1830 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1834 case PACKET3_WAIT_REG_MEM:
1835 if (pkt->count != 5) {
1836 DRM_ERROR("bad WAIT_REG_MEM\n");
1839 /* bit 4 is reg (0) or mem (1) */
1840 if (idx_value & 0x10) {
1843 r = r600_cs_packet_next_reloc(p, &reloc);
1845 DRM_ERROR("bad WAIT_REG_MEM\n");
1849 offset = reloc->lobj.gpu_offset +
1850 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1851 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1853 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1854 ib[idx+2] = upper_32_bits(offset) & 0xff;
1857 case PACKET3_CP_DMA:
1861 if (pkt->count != 4) {
1862 DRM_ERROR("bad CP DMA\n");
1865 command = radeon_get_ib_value(p, idx+4);
1866 size = command & 0x1fffff;
1867 if (command & PACKET3_CP_DMA_CMD_SAS) {
1868 /* src address space is register */
1869 DRM_ERROR("CP DMA SAS not supported\n");
1872 if (command & PACKET3_CP_DMA_CMD_SAIC) {
1873 DRM_ERROR("CP DMA SAIC only supported for registers\n");
1876 /* src address space is memory */
1877 r = r600_cs_packet_next_reloc(p, &reloc);
1879 DRM_ERROR("bad CP DMA SRC\n");
1883 tmp = radeon_get_ib_value(p, idx) +
1884 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1886 offset = reloc->lobj.gpu_offset + tmp;
1888 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1889 dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n",
1890 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
1895 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1897 if (command & PACKET3_CP_DMA_CMD_DAS) {
1898 /* dst address space is register */
1899 DRM_ERROR("CP DMA DAS not supported\n");
1902 /* dst address space is memory */
1903 if (command & PACKET3_CP_DMA_CMD_DAIC) {
1904 DRM_ERROR("CP DMA DAIC only supported for registers\n");
1907 r = r600_cs_packet_next_reloc(p, &reloc);
1909 DRM_ERROR("bad CP DMA DST\n");
1913 tmp = radeon_get_ib_value(p, idx+2) +
1914 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1916 offset = reloc->lobj.gpu_offset + tmp;
1918 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1919 dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n",
1920 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
1925 ib[idx+3] = upper_32_bits(offset) & 0xff;
1929 case PACKET3_SURFACE_SYNC:
1930 if (pkt->count != 3) {
1931 DRM_ERROR("bad SURFACE_SYNC\n");
1934 /* 0xffffffff/0x0 is flush all cache flag */
1935 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1936 radeon_get_ib_value(p, idx + 2) != 0) {
1937 r = r600_cs_packet_next_reloc(p, &reloc);
1939 DRM_ERROR("bad SURFACE_SYNC\n");
1942 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1945 case PACKET3_EVENT_WRITE:
1946 if (pkt->count != 2 && pkt->count != 0) {
1947 DRM_ERROR("bad EVENT_WRITE\n");
1953 r = r600_cs_packet_next_reloc(p, &reloc);
1955 DRM_ERROR("bad EVENT_WRITE\n");
1958 offset = reloc->lobj.gpu_offset +
1959 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1960 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1962 ib[idx+1] = offset & 0xfffffff8;
1963 ib[idx+2] = upper_32_bits(offset) & 0xff;
1966 case PACKET3_EVENT_WRITE_EOP:
1970 if (pkt->count != 4) {
1971 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1974 r = r600_cs_packet_next_reloc(p, &reloc);
1976 DRM_ERROR("bad EVENT_WRITE\n");
1980 offset = reloc->lobj.gpu_offset +
1981 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
1982 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1984 ib[idx+1] = offset & 0xfffffffc;
1985 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1988 case PACKET3_SET_CONFIG_REG:
1989 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1990 end_reg = 4 * pkt->count + start_reg - 4;
1991 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1992 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1993 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1994 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1997 for (i = 0; i < pkt->count; i++) {
1998 reg = start_reg + (4 * i);
1999 r = r600_cs_check_reg(p, reg, idx+1+i);
2004 case PACKET3_SET_CONTEXT_REG:
2005 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
2006 end_reg = 4 * pkt->count + start_reg - 4;
2007 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
2008 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2009 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2010 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2013 for (i = 0; i < pkt->count; i++) {
2014 reg = start_reg + (4 * i);
2015 r = r600_cs_check_reg(p, reg, idx+1+i);
2020 case PACKET3_SET_RESOURCE:
2021 if (pkt->count % 7) {
2022 DRM_ERROR("bad SET_RESOURCE\n");
2025 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
2026 end_reg = 4 * pkt->count + start_reg - 4;
2027 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
2028 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2029 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2030 DRM_ERROR("bad SET_RESOURCE\n");
2033 for (i = 0; i < (pkt->count / 7); i++) {
2034 struct radeon_bo *texture, *mipmap;
2035 u32 size, offset, base_offset, mip_offset;
2037 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
2038 case SQ_TEX_VTX_VALID_TEXTURE:
2040 r = r600_cs_packet_next_reloc(p, &reloc);
2042 DRM_ERROR("bad SET_RESOURCE\n");
2045 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2046 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2047 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
2048 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
2049 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
2050 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
2052 texture = reloc->robj;
2054 r = r600_cs_packet_next_reloc(p, &reloc);
2056 DRM_ERROR("bad SET_RESOURCE\n");
2059 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2060 mipmap = reloc->robj;
2061 r = r600_check_texture_resource(p, idx+(i*7)+1,
2063 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
2064 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
2065 reloc->lobj.tiling_flags);
2068 ib[idx+1+(i*7)+2] += base_offset;
2069 ib[idx+1+(i*7)+3] += mip_offset;
2071 case SQ_TEX_VTX_VALID_BUFFER:
2075 r = r600_cs_packet_next_reloc(p, &reloc);
2077 DRM_ERROR("bad SET_RESOURCE\n");
2080 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
2081 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
2082 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2083 /* force size to size of the buffer */
2084 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
2085 size + offset, radeon_bo_size(reloc->robj));
2086 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
2089 offset64 = reloc->lobj.gpu_offset + offset;
2090 ib[idx+1+(i*8)+0] = offset64;
2091 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2092 (upper_32_bits(offset64) & 0xff);
2095 case SQ_TEX_VTX_INVALID_TEXTURE:
2096 case SQ_TEX_VTX_INVALID_BUFFER:
2098 DRM_ERROR("bad SET_RESOURCE\n");
2103 case PACKET3_SET_ALU_CONST:
2104 if (track->sq_config & DX9_CONSTS) {
2105 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2106 end_reg = 4 * pkt->count + start_reg - 4;
2107 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2108 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2109 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2110 DRM_ERROR("bad SET_ALU_CONST\n");
2115 case PACKET3_SET_BOOL_CONST:
2116 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2117 end_reg = 4 * pkt->count + start_reg - 4;
2118 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2119 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2120 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2121 DRM_ERROR("bad SET_BOOL_CONST\n");
2125 case PACKET3_SET_LOOP_CONST:
2126 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2127 end_reg = 4 * pkt->count + start_reg - 4;
2128 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2129 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2130 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2131 DRM_ERROR("bad SET_LOOP_CONST\n");
2135 case PACKET3_SET_CTL_CONST:
2136 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2137 end_reg = 4 * pkt->count + start_reg - 4;
2138 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2139 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2140 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2141 DRM_ERROR("bad SET_CTL_CONST\n");
2145 case PACKET3_SET_SAMPLER:
2146 if (pkt->count % 3) {
2147 DRM_ERROR("bad SET_SAMPLER\n");
2150 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2151 end_reg = 4 * pkt->count + start_reg - 4;
2152 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2153 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2154 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2155 DRM_ERROR("bad SET_SAMPLER\n");
2159 case PACKET3_STRMOUT_BASE_UPDATE:
2160 /* RS780 and RS880 also need this */
2161 if (p->family < CHIP_RS780) {
2162 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2165 if (pkt->count != 1) {
2166 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2169 if (idx_value > 3) {
2170 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2176 r = r600_cs_packet_next_reloc(p, &reloc);
2178 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2182 if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2183 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2187 offset = radeon_get_ib_value(p, idx+1) << 8;
2188 if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2189 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n",
2190 (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]);
2194 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2195 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n",
2196 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2199 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2202 case PACKET3_SURFACE_BASE_UPDATE:
2203 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2204 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2208 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2212 case PACKET3_STRMOUT_BUFFER_UPDATE:
2213 if (pkt->count != 4) {
2214 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2217 /* Updating memory at DST_ADDRESS. */
2218 if (idx_value & 0x1) {
2220 r = r600_cs_packet_next_reloc(p, &reloc);
2222 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2225 offset = radeon_get_ib_value(p, idx+1);
2226 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2227 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2228 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n",
2229 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2232 offset += reloc->lobj.gpu_offset;
2234 ib[idx+2] = upper_32_bits(offset) & 0xff;
2236 /* Reading data from SRC_ADDRESS. */
2237 if (((idx_value >> 1) & 0x3) == 2) {
2239 r = r600_cs_packet_next_reloc(p, &reloc);
2241 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2244 offset = radeon_get_ib_value(p, idx+3);
2245 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2246 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2247 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n",
2248 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2251 offset += reloc->lobj.gpu_offset;
2253 ib[idx+4] = upper_32_bits(offset) & 0xff;
2256 case PACKET3_MEM_WRITE:
2260 if (pkt->count != 3) {
2261 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2264 r = r600_cs_packet_next_reloc(p, &reloc);
2266 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2269 offset = radeon_get_ib_value(p, idx+0);
2270 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2272 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2275 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2276 DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n",
2277 (uintmax_t)offset + 8, radeon_bo_size(reloc->robj));
2280 offset += reloc->lobj.gpu_offset;
2282 ib[idx+1] = upper_32_bits(offset) & 0xff;
2285 case PACKET3_COPY_DW:
2286 if (pkt->count != 4) {
2287 DRM_ERROR("bad COPY_DW (invalid count)\n");
2290 if (idx_value & 0x1) {
2292 /* SRC is memory. */
2293 r = r600_cs_packet_next_reloc(p, &reloc);
2295 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2298 offset = radeon_get_ib_value(p, idx+1);
2299 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2300 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2301 DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n",
2302 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2305 offset += reloc->lobj.gpu_offset;
2307 ib[idx+2] = upper_32_bits(offset) & 0xff;
2310 reg = radeon_get_ib_value(p, idx+1) << 2;
2311 if (!r600_is_safe_reg(p, reg, idx+1))
2314 if (idx_value & 0x2) {
2316 /* DST is memory. */
2317 r = r600_cs_packet_next_reloc(p, &reloc);
2319 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2322 offset = radeon_get_ib_value(p, idx+3);
2323 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2324 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2325 DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n",
2326 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2329 offset += reloc->lobj.gpu_offset;
2331 ib[idx+4] = upper_32_bits(offset) & 0xff;
2334 reg = radeon_get_ib_value(p, idx+3) << 2;
2335 if (!r600_is_safe_reg(p, reg, idx+3))
2342 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2348 int r600_cs_parse(struct radeon_cs_parser *p)
2350 struct radeon_cs_packet pkt;
2351 struct r600_cs_track *track;
2354 if (p->track == NULL) {
2355 /* initialize tracker, we are in kms */
2356 track = malloc(sizeof(*track),
2357 DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
2360 r600_cs_track_init(track);
2361 if (p->rdev->family < CHIP_RV770) {
2362 track->npipes = p->rdev->config.r600.tiling_npipes;
2363 track->nbanks = p->rdev->config.r600.tiling_nbanks;
2364 track->group_size = p->rdev->config.r600.tiling_group_size;
2365 } else if (p->rdev->family <= CHIP_RV740) {
2366 track->npipes = p->rdev->config.rv770.tiling_npipes;
2367 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2368 track->group_size = p->rdev->config.rv770.tiling_group_size;
2373 r = r600_cs_packet_parse(p, &pkt, p->idx);
2375 free(p->track, DRM_MEM_DRIVER);
2379 p->idx += pkt.count + 2;
2382 r = r600_cs_parse_packet0(p, &pkt);
2387 r = r600_packet3_check(p, &pkt);
2390 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2391 free(p->track, DRM_MEM_DRIVER);
2396 free(p->track, DRM_MEM_DRIVER);
2400 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2402 for (r = 0; r < p->ib.length_dw; r++) {
2403 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);
2407 free(p->track, DRM_MEM_DRIVER);
2416 * r600_dma_cs_next_reloc() - parse next reloc
2417 * @p: parser structure holding parsing context.
2418 * @cs_reloc: reloc informations
2420 * Return the next reloc, do bo validation and compute
2421 * GPU offset using the provided start.
2423 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2424 struct radeon_cs_reloc **cs_reloc)
2426 struct radeon_cs_chunk *relocs_chunk;
2430 if (p->chunk_relocs_idx == -1) {
2431 DRM_ERROR("No relocation chunk !\n");
2434 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2435 idx = p->dma_reloc_idx;
2436 if (idx >= p->nrelocs) {
2437 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2441 *cs_reloc = p->relocs_ptr[idx];
2446 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2447 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2448 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2451 * r600_dma_cs_parse() - parse the DMA IB
2452 * @p: parser structure holding parsing context.
2454 * Parses the DMA IB from the CS ioctl and updates
2455 * the GPU addresses based on the reloc information and
2456 * checks for errors. (R6xx-R7xx)
2457 * Returns 0 for success and an error on failure.
2459 int r600_dma_cs_parse(struct radeon_cs_parser *p)
2461 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2462 struct radeon_cs_reloc *src_reloc, *dst_reloc;
2463 u32 header, cmd, count, tiled;
2464 volatile u32 *ib = p->ib.ptr;
2466 u64 src_offset, dst_offset;
2470 if (p->idx >= ib_chunk->length_dw) {
2471 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2472 p->idx, ib_chunk->length_dw);
2476 header = radeon_get_ib_value(p, idx);
2477 cmd = GET_DMA_CMD(header);
2478 count = GET_DMA_COUNT(header);
2479 tiled = GET_DMA_T(header);
2482 case DMA_PACKET_WRITE:
2483 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2485 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2489 dst_offset = radeon_get_ib_value(p, idx+1);
2492 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2493 p->idx += count + 5;
2495 dst_offset = radeon_get_ib_value(p, idx+1);
2496 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2498 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2499 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2500 p->idx += count + 3;
2502 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2503 dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n",
2504 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2508 case DMA_PACKET_COPY:
2509 r = r600_dma_cs_next_reloc(p, &src_reloc);
2511 DRM_ERROR("bad DMA_PACKET_COPY\n");
2514 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2516 DRM_ERROR("bad DMA_PACKET_COPY\n");
2520 idx_value = radeon_get_ib_value(p, idx + 2);
2522 if (idx_value & (1U << 31)) {
2523 /* tiled src, linear dst */
2524 src_offset = radeon_get_ib_value(p, idx+1);
2526 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2528 dst_offset = radeon_get_ib_value(p, idx+5);
2529 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2530 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2531 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2533 /* linear src, tiled dst */
2534 src_offset = radeon_get_ib_value(p, idx+5);
2535 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2536 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2537 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2539 dst_offset = radeon_get_ib_value(p, idx+1);
2541 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2545 if (p->family >= CHIP_RV770) {
2546 src_offset = radeon_get_ib_value(p, idx+2);
2547 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2548 dst_offset = radeon_get_ib_value(p, idx+1);
2549 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2551 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2552 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2553 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2554 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2557 src_offset = radeon_get_ib_value(p, idx+2);
2558 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2559 dst_offset = radeon_get_ib_value(p, idx+1);
2560 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2562 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2563 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2564 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2565 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2569 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2570 dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n",
2571 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2574 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2575 dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n",
2576 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2580 case DMA_PACKET_CONSTANT_FILL:
2581 if (p->family < CHIP_RV770) {
2582 DRM_ERROR("Constant Fill is 7xx only !\n");
2585 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2587 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2590 dst_offset = radeon_get_ib_value(p, idx+1);
2591 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2592 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2593 dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n",
2594 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2597 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2598 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
2601 case DMA_PACKET_NOP:
2605 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2608 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2610 for (r = 0; r < p->ib->length_dw; r++) {
2611 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);