1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 * Copyright (c) 2013 The FreeBSD Foundation
32 * All rights reserved.
34 * Portions of this software were developed by Konstantin Belousov
35 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
43 #include <dev/drm2/drmP.h>
44 #include <dev/drm2/ttm/ttm_module.h>
45 #include <dev/drm2/ttm/ttm_bo_driver.h>
46 #include <dev/drm2/ttm/ttm_placement.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pageout.h>
52 #define TTM_BO_VM_NUM_PREFAULT 16
54 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
55 ttm_bo_cmp_rb_tree_items);
58 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
59 struct ttm_buffer_object *b)
62 if (a->vm_node->start < b->vm_node->start) {
64 } else if (a->vm_node->start > b->vm_node->start) {
71 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
72 unsigned long page_start,
73 unsigned long num_pages)
75 unsigned long cur_offset;
76 struct ttm_buffer_object *bo;
77 struct ttm_buffer_object *best_bo = NULL;
79 bo = RB_ROOT(&bdev->addr_space_rb);
81 cur_offset = bo->vm_node->start;
82 if (page_start >= cur_offset) {
84 if (page_start == cur_offset)
86 bo = RB_RIGHT(bo, vm_rb);
88 bo = RB_LEFT(bo, vm_rb);
91 if (unlikely(best_bo == NULL))
94 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
95 (page_start + num_pages)))
102 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
103 int prot, vm_page_t *mres)
106 struct ttm_buffer_object *bo = vm_obj->handle;
107 struct ttm_bo_device *bdev = bo->bdev;
108 struct ttm_tt *ttm = NULL;
111 int retval = VM_PAGER_OK;
112 struct ttm_mem_type_manager *man =
113 &bdev->man[bo->mem.mem_type];
115 vm_object_pip_add(vm_obj, 1);
117 (void)vm_page_remove(*mres);
120 VM_OBJECT_WUNLOCK(vm_obj);
124 ret = ttm_bo_reserve(bo, false, false, false, 0);
125 if (unlikely(ret != 0)) {
127 kern_yield(PRI_USER);
132 if (bdev->driver->fault_reserve_notify) {
133 ret = bdev->driver->fault_reserve_notify(bo);
140 kern_yield(PRI_USER);
143 retval = VM_PAGER_ERROR;
149 * Wait for buffer data in transit, due to a pipelined
153 mtx_lock(&bdev->fence_lock);
154 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
156 * Here, the behavior differs between Linux and FreeBSD.
158 * On Linux, the wait is interruptible (3rd argument to
159 * ttm_bo_wait). There must be some mechanism to resume
160 * page fault handling, once the signal is processed.
162 * On FreeBSD, the wait is uninteruptible. This is not a
163 * problem as we can't end up with an unkillable process
164 * here, because the wait will eventually time out.
166 * An example of this situation is the Xorg process
167 * which uses SIGALRM internally. The signal could
168 * interrupt the wait, causing the page fault to fail
169 * and the process to receive SIGSEGV.
171 ret = ttm_bo_wait(bo, false, false, false);
172 mtx_unlock(&bdev->fence_lock);
173 if (unlikely(ret != 0)) {
174 retval = VM_PAGER_ERROR;
178 mtx_unlock(&bdev->fence_lock);
180 ret = ttm_mem_io_lock(man, true);
181 if (unlikely(ret != 0)) {
182 retval = VM_PAGER_ERROR;
185 ret = ttm_mem_io_reserve_vm(bo);
186 if (unlikely(ret != 0)) {
187 retval = VM_PAGER_ERROR;
192 * Strictly, we're not allowed to modify vma->vm_page_prot here,
193 * since the mmap_sem is only held in read mode. However, we
194 * modify only the caching bits of vma->vm_page_prot and
195 * consider those bits protected by
196 * the bo->mutex, as we should be the only writers.
197 * There shouldn't really be any readers of these bits except
198 * within vm_insert_mixed()? fork?
200 * TODO: Add a list of vmas to the bo, and change the
201 * vma->vm_page_prot when the object changes caching policy, with
202 * the correct locks held.
204 if (!bo->mem.bus.is_iomem) {
205 /* Allocate all page at once, most common usage */
207 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
208 retval = VM_PAGER_ERROR;
213 if (bo->mem.bus.is_iomem) {
214 m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
216 KASSERT((m->flags & PG_FICTITIOUS) != 0,
217 ("physical address %#jx not fictitious",
218 (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
220 pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
223 m = ttm->pages[OFF_TO_IDX(offset)];
225 retval = VM_PAGER_ERROR;
228 pmap_page_set_memattr(m,
229 (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
230 VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
233 VM_OBJECT_WLOCK(vm_obj);
234 if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0) {
235 ttm_mem_io_unlock(man);
236 ttm_bo_unreserve(bo);
239 m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
241 if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
243 VM_OBJECT_WUNLOCK(vm_obj);
245 VM_OBJECT_WLOCK(vm_obj);
246 ttm_mem_io_unlock(man);
247 ttm_bo_unreserve(bo);
252 ("inconsistent insert bo %p m %p m1 %p offset %jx",
253 bo, m, m1, (uintmax_t)offset));
257 KASSERT(*mres != m, ("losing %p %p", *mres, m));
263 ttm_mem_io_unlock(man);
265 ttm_bo_unreserve(bo);
266 vm_object_pip_wakeup(vm_obj);
270 VM_OBJECT_WLOCK(vm_obj);
274 VM_OBJECT_WLOCK(vm_obj);
279 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
280 vm_ooffset_t foff, struct ucred *cred, u_short *color)
284 * On Linux, a reference to the buffer object is acquired here.
285 * The reason is that this function is not called when the
286 * mmap() is initialized, but only when a process forks for
287 * instance. Therefore on Linux, the reference on the bo is
288 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
289 * then released in ttm_bo_vm_close().
291 * Here, this function is called during mmap() initialization.
292 * Thus, the reference acquired in ttm_bo_mmap_single() is
301 ttm_bo_vm_dtor(void *handle)
303 struct ttm_buffer_object *bo = handle;
308 static struct cdev_pager_ops ttm_pager_ops = {
309 .cdev_pg_fault = ttm_bo_vm_fault,
310 .cdev_pg_ctor = ttm_bo_vm_ctor,
311 .cdev_pg_dtor = ttm_bo_vm_dtor
315 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
316 struct vm_object **obj_res, int nprot)
318 struct ttm_bo_driver *driver;
319 struct ttm_buffer_object *bo;
320 struct vm_object *vm_obj;
323 rw_wlock(&bdev->vm_lock);
324 bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
325 if (likely(bo != NULL))
326 refcount_acquire(&bo->kref);
327 rw_wunlock(&bdev->vm_lock);
329 if (unlikely(bo == NULL)) {
330 printf("[TTM] Could not find buffer object to map\n");
334 driver = bo->bdev->driver;
335 if (unlikely(!driver->verify_access)) {
339 ret = driver->verify_access(bo);
340 if (unlikely(ret != 0))
343 vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
344 size, nprot, 0, curthread->td_ucred);
345 if (vm_obj == NULL) {
350 * Note: We're transferring the bo reference to vm_obj->handle here.
361 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
367 vm_obj = cdev_pager_lookup(bo);
371 VM_OBJECT_WLOCK(vm_obj);
373 for (i = 0; i < bo->num_pages; i++) {
374 m = vm_page_lookup(vm_obj, i);
377 if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
379 cdev_pager_free_page(vm_obj, m);
381 VM_OBJECT_WUNLOCK(vm_obj);
383 vm_object_deallocate(vm_obj);
387 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
389 if (vma->vm_pgoff != 0)
392 vma->vm_ops = &ttm_bo_vm_ops;
393 vma->vm_private_data = ttm_bo_reference(bo);
394 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
398 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
399 const char __user *wbuf, char __user *rbuf, size_t count,
400 loff_t *f_pos, bool write)
402 struct ttm_buffer_object *bo;
403 struct ttm_bo_driver *driver;
404 struct ttm_bo_kmap_obj map;
405 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
406 unsigned long kmap_offset;
407 unsigned long kmap_end;
408 unsigned long kmap_num;
410 unsigned int page_offset;
413 bool no_wait = false;
416 read_lock(&bdev->vm_lock);
417 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
418 if (likely(bo != NULL))
419 ttm_bo_reference(bo);
420 read_unlock(&bdev->vm_lock);
422 if (unlikely(bo == NULL))
425 driver = bo->bdev->driver;
426 if (unlikely(!driver->verify_access)) {
431 ret = driver->verify_access(bo, filp);
432 if (unlikely(ret != 0))
435 kmap_offset = dev_offset - bo->vm_node->start;
436 if (unlikely(kmap_offset >= bo->num_pages)) {
441 page_offset = *f_pos & ~PAGE_MASK;
442 io_size = bo->num_pages - kmap_offset;
443 io_size = (io_size << PAGE_SHIFT) - page_offset;
447 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
448 kmap_num = kmap_end - kmap_offset + 1;
450 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
462 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
463 if (unlikely(ret != 0)) {
464 ttm_bo_unreserve(bo);
468 virtual = ttm_kmap_obj_virtual(&map, &dummy);
469 virtual += page_offset;
472 ret = copy_from_user(virtual, wbuf, io_size);
474 ret = copy_to_user(rbuf, virtual, io_size);
477 ttm_bo_unreserve(bo);
480 if (unlikely(ret != 0))
491 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
492 char __user *rbuf, size_t count, loff_t *f_pos,
495 struct ttm_bo_kmap_obj map;
496 unsigned long kmap_offset;
497 unsigned long kmap_end;
498 unsigned long kmap_num;
500 unsigned int page_offset;
503 bool no_wait = false;
506 kmap_offset = (*f_pos >> PAGE_SHIFT);
507 if (unlikely(kmap_offset >= bo->num_pages))
510 page_offset = *f_pos & ~PAGE_MASK;
511 io_size = bo->num_pages - kmap_offset;
512 io_size = (io_size << PAGE_SHIFT) - page_offset;
516 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
517 kmap_num = kmap_end - kmap_offset + 1;
519 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
530 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
531 if (unlikely(ret != 0)) {
532 ttm_bo_unreserve(bo);
536 virtual = ttm_kmap_obj_virtual(&map, &dummy);
537 virtual += page_offset;
540 ret = copy_from_user(virtual, wbuf, io_size);
542 ret = copy_to_user(rbuf, virtual, io_size);
545 ttm_bo_unreserve(bo);
548 if (unlikely(ret != 0))