1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <dev/drm2/drmP.h>
35 #include <dev/drm2/ttm/ttm_bo_driver.h>
36 #include <dev/drm2/ttm/ttm_placement.h>
37 #include <sys/sf_buf.h>
39 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 ttm_bo_mem_put(bo, &bo->mem);
44 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 struct ttm_tt *ttm = bo->ttm;
49 struct ttm_mem_reg *old_mem = &bo->mem;
52 if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 ttm_bo_free_old_node(bo);
55 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 old_mem->mem_type = TTM_PL_SYSTEM;
60 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
61 if (unlikely(ret != 0))
64 if (new_mem->mem_type != TTM_PL_SYSTEM) {
65 ret = ttm_tt_bind(ttm, new_mem);
66 if (unlikely(ret != 0))
71 new_mem->mm_node = NULL;
76 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
78 if (likely(man->io_reserve_fastpath))
82 if (sx_xlock_sig(&man->io_reserve_mutex))
88 sx_xlock(&man->io_reserve_mutex);
92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94 if (likely(man->io_reserve_fastpath))
97 sx_xunlock(&man->io_reserve_mutex);
100 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
102 struct ttm_buffer_object *bo;
104 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107 bo = list_first_entry(&man->io_reserve_lru,
108 struct ttm_buffer_object,
110 list_del_init(&bo->io_reserve_lru);
111 ttm_bo_unmap_virtual_locked(bo);
116 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
117 struct ttm_mem_reg *mem)
119 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
122 if (!bdev->driver->io_mem_reserve)
124 if (likely(man->io_reserve_fastpath))
125 return bdev->driver->io_mem_reserve(bdev, mem);
127 if (bdev->driver->io_mem_reserve &&
128 mem->bus.io_reserved_count++ == 0) {
130 ret = bdev->driver->io_mem_reserve(bdev, mem);
131 if (ret == -EAGAIN) {
132 ret = ttm_mem_io_evict(man);
140 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
141 struct ttm_mem_reg *mem)
143 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
145 if (likely(man->io_reserve_fastpath))
148 if (bdev->driver->io_mem_reserve &&
149 --mem->bus.io_reserved_count == 0 &&
150 bdev->driver->io_mem_free)
151 bdev->driver->io_mem_free(bdev, mem);
155 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
157 struct ttm_mem_reg *mem = &bo->mem;
160 if (!mem->bus.io_reserved_vm) {
161 struct ttm_mem_type_manager *man =
162 &bo->bdev->man[mem->mem_type];
164 ret = ttm_mem_io_reserve(bo->bdev, mem);
165 if (unlikely(ret != 0))
167 mem->bus.io_reserved_vm = true;
168 if (man->use_io_reserve_lru)
169 list_add_tail(&bo->io_reserve_lru,
170 &man->io_reserve_lru);
175 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
177 struct ttm_mem_reg *mem = &bo->mem;
179 if (mem->bus.io_reserved_vm) {
180 mem->bus.io_reserved_vm = false;
181 list_del_init(&bo->io_reserve_lru);
182 ttm_mem_io_free(bo->bdev, mem);
187 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
190 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195 (void) ttm_mem_io_lock(man, false);
196 ret = ttm_mem_io_reserve(bdev, mem);
197 ttm_mem_io_unlock(man);
198 if (ret || !mem->bus.is_iomem)
202 addr = mem->bus.addr;
204 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
205 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
206 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
208 (void) ttm_mem_io_lock(man, false);
209 ttm_mem_io_free(bdev, mem);
210 ttm_mem_io_unlock(man);
219 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
222 struct ttm_mem_type_manager *man;
224 man = &bdev->man[mem->mem_type];
226 if (virtual && mem->bus.addr == NULL)
227 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
228 (void) ttm_mem_io_lock(man, false);
229 ttm_mem_io_free(bdev, mem);
230 ttm_mem_io_unlock(man);
233 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
236 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
238 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
241 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
242 /* iowrite32(ioread32(srcP++), dstP++); */
247 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251 vm_page_t d = ttm->pages[page];
257 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
259 /* XXXKIB can't sleep ? */
260 dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
264 memcpy(dst, src, PAGE_SIZE);
266 pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
271 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
275 vm_page_t s = ttm->pages[page];
281 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
282 src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
286 memcpy(dst, src, PAGE_SIZE);
288 pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
293 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
294 bool evict, bool no_wait_gpu,
295 struct ttm_mem_reg *new_mem)
297 struct ttm_bo_device *bdev = bo->bdev;
298 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
299 struct ttm_tt *ttm = bo->ttm;
300 struct ttm_mem_reg *old_mem = &bo->mem;
301 struct ttm_mem_reg old_copy = *old_mem;
307 unsigned long add = 0;
310 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
313 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
317 if (old_iomap == NULL && new_iomap == NULL)
319 if (old_iomap == NULL && ttm == NULL)
322 if (ttm->state == tt_unpopulated) {
323 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
325 /* if we fail here don't nuke the mm node
326 * as the bo still owns it */
327 old_copy.mm_node = NULL;
335 if ((old_mem->mem_type == new_mem->mem_type) &&
336 (new_mem->start < old_mem->start + old_mem->size)) {
338 add = new_mem->num_pages - 1;
341 for (i = 0; i < new_mem->num_pages; ++i) {
342 page = i * dir + add;
343 if (old_iomap == NULL) {
344 vm_memattr_t prot = ttm_io_prot(old_mem->placement);
345 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
347 } else if (new_iomap == NULL) {
348 vm_memattr_t prot = ttm_io_prot(new_mem->placement);
349 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
352 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
354 /* failing here, means keep old copy as-is */
355 old_copy.mm_node = NULL;
363 new_mem->mm_node = NULL;
365 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
372 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
374 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
375 ttm_bo_mem_put(bo, &old_copy);
379 MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
381 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
383 free(bo, M_TTM_TRANSF_OBJ);
387 * ttm_buffer_object_transfer
389 * @bo: A pointer to a struct ttm_buffer_object.
390 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
391 * holding the data of @bo with the old placement.
393 * This is a utility function that may be called after an accelerated move
394 * has been scheduled. A new buffer object is created as a placeholder for
395 * the old data while it's being copied. When that buffer object is idle,
396 * it can be destroyed, releasing the space of the old placement.
402 ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
403 struct ttm_buffer_object **new_obj)
405 struct ttm_buffer_object *fbo;
406 struct ttm_bo_device *bdev = bo->bdev;
407 struct ttm_bo_driver *driver = bdev->driver;
409 fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_WAITOK);
413 * Fix up members that we shouldn't copy directly:
414 * TODO: Explicit member copy would probably be better here.
417 INIT_LIST_HEAD(&fbo->ddestroy);
418 INIT_LIST_HEAD(&fbo->lru);
419 INIT_LIST_HEAD(&fbo->swap);
420 INIT_LIST_HEAD(&fbo->io_reserve_lru);
422 atomic_set(&fbo->cpu_writers, 0);
424 mtx_lock(&bdev->fence_lock);
426 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
428 fbo->sync_obj = NULL;
429 mtx_unlock(&bdev->fence_lock);
430 refcount_init(&fbo->list_kref, 1);
431 refcount_init(&fbo->kref, 1);
432 fbo->destroy = &ttm_transfered_destroy;
440 ttm_io_prot(uint32_t caching_flags)
442 #if defined(__i386__) || defined(__amd64__)
443 if (caching_flags & TTM_PL_FLAG_WC)
444 return (VM_MEMATTR_WRITE_COMBINING);
447 * We do not support i386, look at the linux source
448 * for the reason of the comment.
450 return (VM_MEMATTR_UNCACHEABLE);
456 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
457 unsigned long offset,
459 struct ttm_bo_kmap_obj *map)
461 struct ttm_mem_reg *mem = &bo->mem;
463 if (bo->mem.bus.addr) {
464 map->bo_kmap_type = ttm_bo_map_premapped;
465 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
467 map->bo_kmap_type = ttm_bo_map_iomap;
468 map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
469 bo->mem.bus.offset + offset, size,
470 (mem->placement & TTM_PL_FLAG_WC) ?
471 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
474 return (!map->virtual) ? -ENOMEM : 0;
477 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
478 unsigned long start_page,
479 unsigned long num_pages,
480 struct ttm_bo_kmap_obj *map)
482 struct ttm_mem_reg *mem = &bo->mem;
484 struct ttm_tt *ttm = bo->ttm;
489 if (ttm->state == tt_unpopulated) {
490 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
495 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
497 * We're mapping a single page, and the desired
498 * page protection is consistent with the bo.
501 map->bo_kmap_type = ttm_bo_map_kmap;
502 map->page = ttm->pages[start_page];
503 map->sf = sf_buf_alloc(map->page, 0);
504 map->virtual = (void *)sf_buf_kva(map->sf);
507 * We need to use vmap to get the desired page protection
508 * or to make the buffer object look contiguous.
510 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
511 VM_MEMATTR_WRITE_COMBINING :
512 ttm_io_prot(mem->placement);
513 map->bo_kmap_type = ttm_bo_map_vmap;
514 map->num_pages = num_pages;
515 map->virtual = (void *)kmem_alloc_nofault(kernel_map,
516 num_pages * PAGE_SIZE);
517 if (map->virtual != NULL) {
518 for (i = 0; i < num_pages; i++) {
520 pmap_page_set_memattr(ttm->pages[start_page +
523 pmap_qenter((vm_offset_t)map->virtual,
524 &ttm->pages[start_page], num_pages);
527 return (!map->virtual) ? -ENOMEM : 0;
530 int ttm_bo_kmap(struct ttm_buffer_object *bo,
531 unsigned long start_page, unsigned long num_pages,
532 struct ttm_bo_kmap_obj *map)
534 struct ttm_mem_type_manager *man =
535 &bo->bdev->man[bo->mem.mem_type];
536 unsigned long offset, size;
539 MPASS(list_empty(&bo->swap));
542 if (num_pages > bo->num_pages)
544 if (start_page > bo->num_pages)
547 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
550 (void) ttm_mem_io_lock(man, false);
551 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
552 ttm_mem_io_unlock(man);
555 if (!bo->mem.bus.is_iomem) {
556 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
558 offset = start_page << PAGE_SHIFT;
559 size = num_pages << PAGE_SHIFT;
560 return ttm_bo_ioremap(bo, offset, size, map);
564 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
566 struct ttm_buffer_object *bo = map->bo;
567 struct ttm_mem_type_manager *man =
568 &bo->bdev->man[bo->mem.mem_type];
572 switch (map->bo_kmap_type) {
573 case ttm_bo_map_iomap:
574 pmap_unmapdev((vm_offset_t)map->virtual, map->size);
576 case ttm_bo_map_vmap:
577 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
578 kmem_free(kernel_map, (vm_offset_t)map->virtual,
579 map->num_pages * PAGE_SIZE);
581 case ttm_bo_map_kmap:
582 sf_buf_free(map->sf);
584 case ttm_bo_map_premapped:
589 (void) ttm_mem_io_lock(man, false);
590 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
591 ttm_mem_io_unlock(man);
597 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
601 struct ttm_mem_reg *new_mem)
603 struct ttm_bo_device *bdev = bo->bdev;
604 struct ttm_bo_driver *driver = bdev->driver;
605 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
606 struct ttm_mem_reg *old_mem = &bo->mem;
608 struct ttm_buffer_object *ghost_obj;
609 void *tmp_obj = NULL;
611 mtx_lock(&bdev->fence_lock);
613 tmp_obj = bo->sync_obj;
616 bo->sync_obj = driver->sync_obj_ref(sync_obj);
618 ret = ttm_bo_wait(bo, false, false, false);
619 mtx_unlock(&bdev->fence_lock);
621 driver->sync_obj_unref(&tmp_obj);
625 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
627 ttm_tt_unbind(bo->ttm);
628 ttm_tt_destroy(bo->ttm);
631 ttm_bo_free_old_node(bo);
634 * This should help pipeline ordinary buffer moves.
636 * Hang old buffer memory on a new buffer object,
637 * and leave it to be released when the GPU
638 * operation has completed.
641 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
642 mtx_unlock(&bdev->fence_lock);
644 driver->sync_obj_unref(&tmp_obj);
646 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
651 * If we're not moving to fixed memory, the TTM object
652 * needs to stay alive. Otherwhise hang it on the ghost
653 * bo to be unbound and destroyed.
656 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
657 ghost_obj->ttm = NULL;
661 ttm_bo_unreserve(ghost_obj);
662 ttm_bo_unref(&ghost_obj);
666 new_mem->mm_node = NULL;