2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <dev/drm2/drmP.h>
32 #include <dev/drm2/radeon/radeon_drm.h>
33 #include "radeon_reg.h"
36 void r100_cs_dump_packet(struct radeon_cs_parser *p,
37 struct radeon_cs_packet *pkt);
39 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
41 struct drm_device *ddev = p->rdev->ddev;
42 struct radeon_cs_chunk *chunk;
46 if (p->chunk_relocs_idx == -1) {
49 chunk = &p->chunks[p->chunk_relocs_idx];
51 /* FIXME: we assume that each relocs use 4 dwords */
52 p->nrelocs = chunk->length_dw / 4;
53 p->relocs_ptr = malloc(p->nrelocs * sizeof(void *),
54 DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
55 if (p->relocs_ptr == NULL) {
58 p->relocs = malloc(p->nrelocs * sizeof(struct radeon_cs_reloc),
59 DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
60 if (p->relocs == NULL) {
63 for (i = 0; i < p->nrelocs; i++) {
64 struct drm_radeon_cs_reloc *r;
67 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
68 for (j = 0; j < i; j++) {
69 if (r->handle == p->relocs[j].handle) {
70 p->relocs_ptr[i] = &p->relocs[j];
76 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
79 if (p->relocs[i].gobj == NULL) {
80 DRM_ERROR("gem object lookup failed 0x%x\n",
84 p->relocs_ptr[i] = &p->relocs[i];
85 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
86 p->relocs[i].lobj.bo = p->relocs[i].robj;
87 p->relocs[i].lobj.wdomain = r->write_domain;
88 p->relocs[i].lobj.rdomain = r->read_domains;
89 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
90 p->relocs[i].handle = r->handle;
91 p->relocs[i].flags = r->flags;
92 radeon_bo_list_add_object(&p->relocs[i].lobj,
96 p->relocs[i].handle = 0;
98 return radeon_bo_list_validate(&p->validated);
101 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
103 p->priority = priority;
107 DRM_ERROR("unknown ring id: %d\n", ring);
109 case RADEON_CS_RING_GFX:
110 p->ring = RADEON_RING_TYPE_GFX_INDEX;
112 case RADEON_CS_RING_COMPUTE:
113 if (p->rdev->family >= CHIP_TAHITI) {
115 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
117 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
119 p->ring = RADEON_RING_TYPE_GFX_INDEX;
121 case RADEON_CS_RING_DMA:
122 if (p->rdev->family >= CHIP_CAYMAN) {
124 p->ring = R600_RING_TYPE_DMA_INDEX;
126 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
127 } else if (p->rdev->family >= CHIP_R600) {
128 p->ring = R600_RING_TYPE_DMA_INDEX;
137 static void radeon_cs_sync_to(struct radeon_cs_parser *p,
138 struct radeon_fence *fence)
140 struct radeon_fence *other;
145 other = p->ib.sync_to[fence->ring];
146 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
149 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
153 for (i = 0; i < p->nrelocs; i++) {
154 if (!p->relocs[i].robj)
157 radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
161 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
162 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
164 struct drm_radeon_cs *cs = data;
165 uint64_t *chunk_array_ptr;
167 u32 ring = RADEON_CS_RING_GFX;
170 if (!cs->num_chunks) {
174 INIT_LIST_HEAD(&p->validated);
177 p->ib.semaphore = NULL;
178 p->const_ib.sa_bo = NULL;
179 p->const_ib.semaphore = NULL;
180 p->chunk_ib_idx = -1;
181 p->chunk_relocs_idx = -1;
182 p->chunk_flags_idx = -1;
183 p->chunk_const_ib_idx = -1;
184 p->chunks_array = malloc(cs->num_chunks * sizeof(uint64_t),
185 DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
186 if (p->chunks_array == NULL) {
189 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
190 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
191 sizeof(uint64_t)*cs->num_chunks)) {
195 p->nchunks = cs->num_chunks;
196 p->chunks = malloc(p->nchunks * sizeof(struct radeon_cs_chunk),
197 DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
198 if (p->chunks == NULL) {
201 for (i = 0; i < p->nchunks; i++) {
202 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
203 struct drm_radeon_cs_chunk user_chunk;
204 uint32_t __user *cdata;
206 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
207 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
208 sizeof(struct drm_radeon_cs_chunk))) {
211 p->chunks[i].length_dw = user_chunk.length_dw;
212 p->chunks[i].kdata = NULL;
213 p->chunks[i].chunk_id = user_chunk.chunk_id;
215 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
216 p->chunk_relocs_idx = i;
218 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
220 /* zero length IB isn't useful */
221 if (p->chunks[i].length_dw == 0)
224 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
225 p->chunk_const_ib_idx = i;
226 /* zero length CONST IB isn't useful */
227 if (p->chunks[i].length_dw == 0)
230 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
231 p->chunk_flags_idx = i;
232 /* zero length flags aren't useful */
233 if (p->chunks[i].length_dw == 0)
237 p->chunks[i].length_dw = user_chunk.length_dw;
238 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
240 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
241 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
242 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
243 size = p->chunks[i].length_dw * sizeof(uint32_t);
244 p->chunks[i].kdata = malloc(size, DRM_MEM_DRIVER, M_NOWAIT);
245 if (p->chunks[i].kdata == NULL) {
248 if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
249 p->chunks[i].user_ptr, size)) {
252 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
253 p->cs_flags = p->chunks[i].kdata[0];
254 if (p->chunks[i].length_dw > 1)
255 ring = p->chunks[i].kdata[1];
256 if (p->chunks[i].length_dw > 2)
257 priority = (s32)p->chunks[i].kdata[2];
262 /* these are KMS only */
264 if ((p->cs_flags & RADEON_CS_USE_VM) &&
265 !p->rdev->vm_manager.enabled) {
266 DRM_ERROR("VM not active on asic!\n");
270 /* we only support VM on SI+ */
271 if ((p->rdev->family >= CHIP_TAHITI) &&
272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
273 DRM_ERROR("VM required on SI+!\n");
277 if (radeon_cs_get_ring(p, ring, priority))
281 /* deal with non-vm */
282 if ((p->chunk_ib_idx != -1) &&
283 ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
284 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
285 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
286 DRM_ERROR("cs IB too big: %d\n",
287 p->chunks[p->chunk_ib_idx].length_dw);
290 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
291 p->chunks[p->chunk_ib_idx].kpage[0] = malloc(PAGE_SIZE, DRM_MEM_DRIVER, M_NOWAIT);
292 p->chunks[p->chunk_ib_idx].kpage[1] = malloc(PAGE_SIZE, DRM_MEM_DRIVER, M_NOWAIT);
293 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
294 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
295 free(p->chunks[p->chunk_ib_idx].kpage[0], DRM_MEM_DRIVER);
296 free(p->chunks[p->chunk_ib_idx].kpage[1], DRM_MEM_DRIVER);
297 p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
298 p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
302 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
303 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
304 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
305 p->chunks[p->chunk_ib_idx].last_page_index =
306 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
313 * cs_parser_fini() - clean parser states
314 * @parser: parser structure holding parsing context.
315 * @error: error number
317 * If error is set than unvalidate buffer, otherwise just free memory
318 * used by parsing context.
320 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
325 ttm_eu_fence_buffer_objects(&parser->validated,
328 ttm_eu_backoff_reservation(&parser->validated);
331 if (parser->relocs != NULL) {
332 for (i = 0; i < parser->nrelocs; i++) {
333 if (parser->relocs[i].gobj)
334 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
337 free(parser->track, DRM_MEM_DRIVER);
338 free(parser->relocs, DRM_MEM_DRIVER);
339 free(parser->relocs_ptr, DRM_MEM_DRIVER);
340 for (i = 0; i < parser->nchunks; i++) {
341 free(parser->chunks[i].kdata, DRM_MEM_DRIVER);
342 if ((parser->rdev->flags & RADEON_IS_AGP)) {
343 free(parser->chunks[i].kpage[0], DRM_MEM_DRIVER);
344 free(parser->chunks[i].kpage[1], DRM_MEM_DRIVER);
347 free(parser->chunks, DRM_MEM_DRIVER);
348 free(parser->chunks_array, DRM_MEM_DRIVER);
349 radeon_ib_free(parser->rdev, &parser->ib);
350 radeon_ib_free(parser->rdev, &parser->const_ib);
353 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
354 struct radeon_cs_parser *parser)
356 struct radeon_cs_chunk *ib_chunk;
359 if (parser->chunk_ib_idx == -1)
362 if (parser->cs_flags & RADEON_CS_USE_VM)
365 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
366 /* Copy the packet into the IB, the parser will read from the
367 * input memory (cached) and write to the IB (which can be
370 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
371 NULL, ib_chunk->length_dw * 4);
373 DRM_ERROR("Failed to get ib !\n");
376 parser->ib.length_dw = ib_chunk->length_dw;
377 r = radeon_cs_parse(rdev, parser->ring, parser);
378 if (r || parser->parser_error) {
379 DRM_ERROR("Invalid command stream !\n");
382 r = radeon_cs_finish_pages(parser);
384 DRM_ERROR("Invalid command stream !\n");
387 radeon_cs_sync_rings(parser);
388 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
390 DRM_ERROR("Failed to schedule IB !\n");
395 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
396 struct radeon_vm *vm)
398 struct radeon_device *rdev = parser->rdev;
399 struct radeon_bo_list *lobj;
400 struct radeon_bo *bo;
403 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
407 list_for_each_entry(lobj, &parser->validated, tv.head) {
409 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
417 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
418 struct radeon_cs_parser *parser)
420 struct radeon_cs_chunk *ib_chunk;
421 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
422 struct radeon_vm *vm = &fpriv->vm;
425 if (parser->chunk_ib_idx == -1)
427 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
430 if ((rdev->family >= CHIP_TAHITI) &&
431 (parser->chunk_const_ib_idx != -1)) {
432 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
433 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
434 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
437 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
438 vm, ib_chunk->length_dw * 4);
440 DRM_ERROR("Failed to get const ib !\n");
443 parser->const_ib.is_const_ib = true;
444 parser->const_ib.length_dw = ib_chunk->length_dw;
445 /* Copy the packet into the IB */
446 if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
447 ib_chunk->length_dw * 4)) {
450 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
456 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
457 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
458 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
461 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
462 vm, ib_chunk->length_dw * 4);
464 DRM_ERROR("Failed to get ib !\n");
467 parser->ib.length_dw = ib_chunk->length_dw;
468 /* Copy the packet into the IB */
469 if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
470 ib_chunk->length_dw * 4)) {
473 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
478 sx_xlock(&rdev->vm_manager.lock);
479 sx_xlock(&vm->mutex);
480 r = radeon_vm_alloc_pt(rdev, vm);
484 r = radeon_bo_vm_update_pte(parser, vm);
488 radeon_cs_sync_rings(parser);
489 radeon_cs_sync_to(parser, vm->fence);
490 radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
492 if ((rdev->family >= CHIP_TAHITI) &&
493 (parser->chunk_const_ib_idx != -1)) {
494 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
496 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
500 radeon_vm_fence(rdev, vm, parser->ib.fence);
504 radeon_vm_add_to_lru(rdev, vm);
505 sx_xunlock(&vm->mutex);
506 sx_xunlock(&rdev->vm_manager.lock);
510 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
513 r = radeon_gpu_reset(rdev);
520 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
522 struct radeon_device *rdev = dev->dev_private;
523 struct radeon_cs_parser parser;
526 sx_slock(&rdev->exclusive_lock);
527 if (!rdev->accel_working) {
528 sx_sunlock(&rdev->exclusive_lock);
531 /* initialize parser */
532 memset(&parser, 0, sizeof(struct radeon_cs_parser));
535 parser.dev = rdev->dev;
536 parser.family = rdev->family;
537 r = radeon_cs_parser_init(&parser, data);
539 DRM_ERROR("Failed to initialize parser !\n");
540 radeon_cs_parser_fini(&parser, r);
541 sx_sunlock(&rdev->exclusive_lock);
542 r = radeon_cs_handle_lockup(rdev, r);
545 r = radeon_cs_parser_relocs(&parser);
547 if (r != -ERESTARTSYS)
548 DRM_ERROR("Failed to parse relocation %d!\n", r);
549 radeon_cs_parser_fini(&parser, r);
550 sx_sunlock(&rdev->exclusive_lock);
551 r = radeon_cs_handle_lockup(rdev, r);
554 r = radeon_cs_ib_chunk(rdev, &parser);
558 r = radeon_cs_ib_vm_chunk(rdev, &parser);
563 radeon_cs_parser_fini(&parser, r);
564 sx_sunlock(&rdev->exclusive_lock);
565 r = radeon_cs_handle_lockup(rdev, r);
569 int radeon_cs_finish_pages(struct radeon_cs_parser *p)
571 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
573 int size = PAGE_SIZE;
575 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
576 if (i == ibc->last_page_index) {
577 size = (ibc->length_dw * 4) % PAGE_SIZE;
582 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
583 (char *)ibc->user_ptr + (i * PAGE_SIZE),
590 static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
593 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
595 int size = PAGE_SIZE;
596 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
599 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
600 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
601 (char *)ibc->user_ptr + (i * PAGE_SIZE),
603 p->parser_error = -EFAULT;
608 if (pg_idx == ibc->last_page_index) {
609 size = (ibc->length_dw * 4) % PAGE_SIZE;
614 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
616 ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
618 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
619 (char *)ibc->user_ptr + (pg_idx * PAGE_SIZE),
621 p->parser_error = -EFAULT;
625 /* copy to IB for non single case */
627 memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
629 ibc->last_copied_page = pg_idx;
630 ibc->kpage_idx[new_page] = pg_idx;
635 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
637 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
638 u32 pg_idx, pg_offset;
642 pg_idx = (idx * 4) / PAGE_SIZE;
643 pg_offset = (idx * 4) % PAGE_SIZE;
645 if (ibc->kpage_idx[0] == pg_idx)
646 return ibc->kpage[0][pg_offset/4];
647 if (ibc->kpage_idx[1] == pg_idx)
648 return ibc->kpage[1][pg_offset/4];
650 new_page = radeon_cs_update_pages(p, pg_idx);
652 p->parser_error = new_page;
656 idx_value = ibc->kpage[new_page][pg_offset/4];