]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/ttm/ttm_bo_vm.c
Merge once more from ^/vendor/llvm-project/release-10.x, to get the
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / ttm / ttm_bo_vm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include "opt_vm.h"
42
43 #include <dev/drm2/drmP.h>
44 #include <dev/drm2/ttm/ttm_module.h>
45 #include <dev/drm2/ttm/ttm_bo_driver.h>
46 #include <dev/drm2/ttm/ttm_placement.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pageout.h>
51
52 #define TTM_BO_VM_NUM_PREFAULT 16
53
54 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
55     ttm_bo_cmp_rb_tree_items);
56
57 int
58 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
59     struct ttm_buffer_object *b)
60 {
61
62         if (a->vm_node->start < b->vm_node->start) {
63                 return (-1);
64         } else if (a->vm_node->start > b->vm_node->start) {
65                 return (1);
66         } else {
67                 return (0);
68         }
69 }
70
71 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
72                                                      unsigned long page_start,
73                                                      unsigned long num_pages)
74 {
75         unsigned long cur_offset;
76         struct ttm_buffer_object *bo;
77         struct ttm_buffer_object *best_bo = NULL;
78
79         bo = RB_ROOT(&bdev->addr_space_rb);
80         while (bo != NULL) {
81                 cur_offset = bo->vm_node->start;
82                 if (page_start >= cur_offset) {
83                         best_bo = bo;
84                         if (page_start == cur_offset)
85                                 break;
86                         bo = RB_RIGHT(bo, vm_rb);
87                 } else
88                         bo = RB_LEFT(bo, vm_rb);
89         }
90
91         if (unlikely(best_bo == NULL))
92                 return NULL;
93
94         if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
95                      (page_start + num_pages)))
96                 return NULL;
97
98         return best_bo;
99 }
100
101 static int
102 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
103     int prot, vm_page_t *mres)
104 {
105
106         struct ttm_buffer_object *bo = vm_obj->handle;
107         struct ttm_bo_device *bdev = bo->bdev;
108         struct ttm_tt *ttm = NULL;
109         vm_page_t m, m1;
110         int ret;
111         int retval = VM_PAGER_OK;
112         struct ttm_mem_type_manager *man =
113                 &bdev->man[bo->mem.mem_type];
114
115         vm_object_pip_add(vm_obj, 1);
116         if (*mres != NULL) {
117                 (void)vm_page_remove(*mres);
118         }
119 retry:
120         VM_OBJECT_WUNLOCK(vm_obj);
121         m = NULL;
122
123 reserve:
124         ret = ttm_bo_reserve(bo, false, false, false, 0);
125         if (unlikely(ret != 0)) {
126                 if (ret == -EBUSY) {
127                         kern_yield(PRI_USER);
128                         goto reserve;
129                 }
130         }
131
132         if (bdev->driver->fault_reserve_notify) {
133                 ret = bdev->driver->fault_reserve_notify(bo);
134                 switch (ret) {
135                 case 0:
136                         break;
137                 case -EBUSY:
138                 case -ERESTARTSYS:
139                 case -EINTR:
140                         kern_yield(PRI_USER);
141                         goto reserve;
142                 default:
143                         retval = VM_PAGER_ERROR;
144                         goto out_unlock;
145                 }
146         }
147
148         /*
149          * Wait for buffer data in transit, due to a pipelined
150          * move.
151          */
152
153         mtx_lock(&bdev->fence_lock);
154         if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
155                 /*
156                  * Here, the behavior differs between Linux and FreeBSD.
157                  *
158                  * On Linux, the wait is interruptible (3rd argument to
159                  * ttm_bo_wait). There must be some mechanism to resume
160                  * page fault handling, once the signal is processed.
161                  *
162                  * On FreeBSD, the wait is uninteruptible. This is not a
163                  * problem as we can't end up with an unkillable process
164                  * here, because the wait will eventually time out.
165                  *
166                  * An example of this situation is the Xorg process
167                  * which uses SIGALRM internally. The signal could
168                  * interrupt the wait, causing the page fault to fail
169                  * and the process to receive SIGSEGV.
170                  */
171                 ret = ttm_bo_wait(bo, false, false, false);
172                 mtx_unlock(&bdev->fence_lock);
173                 if (unlikely(ret != 0)) {
174                         retval = VM_PAGER_ERROR;
175                         goto out_unlock;
176                 }
177         } else
178                 mtx_unlock(&bdev->fence_lock);
179
180         ret = ttm_mem_io_lock(man, true);
181         if (unlikely(ret != 0)) {
182                 retval = VM_PAGER_ERROR;
183                 goto out_unlock;
184         }
185         ret = ttm_mem_io_reserve_vm(bo);
186         if (unlikely(ret != 0)) {
187                 retval = VM_PAGER_ERROR;
188                 goto out_io_unlock;
189         }
190
191         /*
192          * Strictly, we're not allowed to modify vma->vm_page_prot here,
193          * since the mmap_sem is only held in read mode. However, we
194          * modify only the caching bits of vma->vm_page_prot and
195          * consider those bits protected by
196          * the bo->mutex, as we should be the only writers.
197          * There shouldn't really be any readers of these bits except
198          * within vm_insert_mixed()? fork?
199          *
200          * TODO: Add a list of vmas to the bo, and change the
201          * vma->vm_page_prot when the object changes caching policy, with
202          * the correct locks held.
203          */
204         if (!bo->mem.bus.is_iomem) {
205                 /* Allocate all page at once, most common usage */
206                 ttm = bo->ttm;
207                 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
208                         retval = VM_PAGER_ERROR;
209                         goto out_io_unlock;
210                 }
211         }
212
213         if (bo->mem.bus.is_iomem) {
214                 m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
215                     offset);
216                 KASSERT((m->flags & PG_FICTITIOUS) != 0,
217                     ("physical address %#jx not fictitious",
218                     (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
219                     + offset)));
220                 pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
221         } else {
222                 ttm = bo->ttm;
223                 m = ttm->pages[OFF_TO_IDX(offset)];
224                 if (unlikely(!m)) {
225                         retval = VM_PAGER_ERROR;
226                         goto out_io_unlock;
227                 }
228                 pmap_page_set_memattr(m,
229                     (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
230                     VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
231         }
232
233         VM_OBJECT_WLOCK(vm_obj);
234         if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0) {
235                 ttm_mem_io_unlock(man);
236                 ttm_bo_unreserve(bo);
237                 goto retry;
238         }
239         m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
240         /* XXX This looks like it should just be vm_page_replace? */
241         if (m1 == NULL) {
242                 if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
243                         vm_page_xunbusy(m);
244                         VM_OBJECT_WUNLOCK(vm_obj);
245                         vm_wait(vm_obj);
246                         VM_OBJECT_WLOCK(vm_obj);
247                         ttm_mem_io_unlock(man);
248                         ttm_bo_unreserve(bo);
249                         goto retry;
250                 }
251         } else {
252                 KASSERT(m == m1,
253                     ("inconsistent insert bo %p m %p m1 %p offset %jx",
254                     bo, m, m1, (uintmax_t)offset));
255         }
256         vm_page_valid(m);
257         if (*mres != NULL) {
258                 KASSERT(*mres != m, ("losing %p %p", *mres, m));
259                 vm_page_xunbusy(*mres);
260                 vm_page_free(*mres);
261         }
262         *mres = m;
263
264 out_io_unlock1:
265         ttm_mem_io_unlock(man);
266 out_unlock1:
267         ttm_bo_unreserve(bo);
268         vm_object_pip_wakeup(vm_obj);
269         return (retval);
270
271 out_io_unlock:
272         VM_OBJECT_WLOCK(vm_obj);
273         goto out_io_unlock1;
274
275 out_unlock:
276         VM_OBJECT_WLOCK(vm_obj);
277         goto out_unlock1;
278 }
279
280 static int
281 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
282     vm_ooffset_t foff, struct ucred *cred, u_short *color)
283 {
284
285         /*
286          * On Linux, a reference to the buffer object is acquired here.
287          * The reason is that this function is not called when the
288          * mmap() is initialized, but only when a process forks for
289          * instance. Therefore on Linux, the reference on the bo is
290          * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
291          * then released in ttm_bo_vm_close().
292          *
293          * Here, this function is called during mmap() initialization.
294          * Thus, the reference acquired in ttm_bo_mmap_single() is
295          * sufficient.
296          */
297
298         *color = 0;
299         return (0);
300 }
301
302 static void
303 ttm_bo_vm_dtor(void *handle)
304 {
305         struct ttm_buffer_object *bo = handle;
306
307         ttm_bo_unref(&bo);
308 }
309
310 static struct cdev_pager_ops ttm_pager_ops = {
311         .cdev_pg_fault = ttm_bo_vm_fault,
312         .cdev_pg_ctor = ttm_bo_vm_ctor,
313         .cdev_pg_dtor = ttm_bo_vm_dtor
314 };
315
316 int
317 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
318     struct vm_object **obj_res, int nprot)
319 {
320         struct ttm_bo_driver *driver;
321         struct ttm_buffer_object *bo;
322         struct vm_object *vm_obj;
323         int ret;
324
325         rw_wlock(&bdev->vm_lock);
326         bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
327         if (likely(bo != NULL))
328                 refcount_acquire(&bo->kref);
329         rw_wunlock(&bdev->vm_lock);
330
331         if (unlikely(bo == NULL)) {
332                 printf("[TTM] Could not find buffer object to map\n");
333                 return (-EINVAL);
334         }
335
336         driver = bo->bdev->driver;
337         if (unlikely(!driver->verify_access)) {
338                 ret = -EPERM;
339                 goto out_unref;
340         }
341         ret = driver->verify_access(bo);
342         if (unlikely(ret != 0))
343                 goto out_unref;
344
345         vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
346             size, nprot, 0, curthread->td_ucred);
347         if (vm_obj == NULL) {
348                 ret = -EINVAL;
349                 goto out_unref;
350         }
351         /*
352          * Note: We're transferring the bo reference to vm_obj->handle here.
353          */
354         *offset = 0;
355         *obj_res = vm_obj;
356         return 0;
357 out_unref:
358         ttm_bo_unref(&bo);
359         return ret;
360 }
361
362 void
363 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
364 {
365         vm_object_t vm_obj;
366         vm_page_t m;
367         int i;
368
369         vm_obj = cdev_pager_lookup(bo);
370         if (vm_obj == NULL)
371                 return;
372
373         VM_OBJECT_WLOCK(vm_obj);
374 retry:
375         for (i = 0; i < bo->num_pages; i++) {
376                 m = vm_page_lookup(vm_obj, i);
377                 if (m == NULL)
378                         continue;
379                 if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
380                         goto retry;
381                 cdev_pager_free_page(vm_obj, m);
382         }
383         VM_OBJECT_WUNLOCK(vm_obj);
384
385         vm_object_deallocate(vm_obj);
386 }
387
388 #if 0
389 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
390 {
391         if (vma->vm_pgoff != 0)
392                 return -EACCES;
393
394         vma->vm_ops = &ttm_bo_vm_ops;
395         vma->vm_private_data = ttm_bo_reference(bo);
396         vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
397         return 0;
398 }
399
400 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
401                   const char __user *wbuf, char __user *rbuf, size_t count,
402                   loff_t *f_pos, bool write)
403 {
404         struct ttm_buffer_object *bo;
405         struct ttm_bo_driver *driver;
406         struct ttm_bo_kmap_obj map;
407         unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
408         unsigned long kmap_offset;
409         unsigned long kmap_end;
410         unsigned long kmap_num;
411         size_t io_size;
412         unsigned int page_offset;
413         char *virtual;
414         int ret;
415         bool no_wait = false;
416         bool dummy;
417
418         read_lock(&bdev->vm_lock);
419         bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
420         if (likely(bo != NULL))
421                 ttm_bo_reference(bo);
422         read_unlock(&bdev->vm_lock);
423
424         if (unlikely(bo == NULL))
425                 return -EFAULT;
426
427         driver = bo->bdev->driver;
428         if (unlikely(!driver->verify_access)) {
429                 ret = -EPERM;
430                 goto out_unref;
431         }
432
433         ret = driver->verify_access(bo, filp);
434         if (unlikely(ret != 0))
435                 goto out_unref;
436
437         kmap_offset = dev_offset - bo->vm_node->start;
438         if (unlikely(kmap_offset >= bo->num_pages)) {
439                 ret = -EFBIG;
440                 goto out_unref;
441         }
442
443         page_offset = *f_pos & ~PAGE_MASK;
444         io_size = bo->num_pages - kmap_offset;
445         io_size = (io_size << PAGE_SHIFT) - page_offset;
446         if (count < io_size)
447                 io_size = count;
448
449         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
450         kmap_num = kmap_end - kmap_offset + 1;
451
452         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
453
454         switch (ret) {
455         case 0:
456                 break;
457         case -EBUSY:
458                 ret = -EAGAIN;
459                 goto out_unref;
460         default:
461                 goto out_unref;
462         }
463
464         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
465         if (unlikely(ret != 0)) {
466                 ttm_bo_unreserve(bo);
467                 goto out_unref;
468         }
469
470         virtual = ttm_kmap_obj_virtual(&map, &dummy);
471         virtual += page_offset;
472
473         if (write)
474                 ret = copy_from_user(virtual, wbuf, io_size);
475         else
476                 ret = copy_to_user(rbuf, virtual, io_size);
477
478         ttm_bo_kunmap(&map);
479         ttm_bo_unreserve(bo);
480         ttm_bo_unref(&bo);
481
482         if (unlikely(ret != 0))
483                 return -EFBIG;
484
485         *f_pos += io_size;
486
487         return io_size;
488 out_unref:
489         ttm_bo_unref(&bo);
490         return ret;
491 }
492
493 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
494                         char __user *rbuf, size_t count, loff_t *f_pos,
495                         bool write)
496 {
497         struct ttm_bo_kmap_obj map;
498         unsigned long kmap_offset;
499         unsigned long kmap_end;
500         unsigned long kmap_num;
501         size_t io_size;
502         unsigned int page_offset;
503         char *virtual;
504         int ret;
505         bool no_wait = false;
506         bool dummy;
507
508         kmap_offset = (*f_pos >> PAGE_SHIFT);
509         if (unlikely(kmap_offset >= bo->num_pages))
510                 return -EFBIG;
511
512         page_offset = *f_pos & ~PAGE_MASK;
513         io_size = bo->num_pages - kmap_offset;
514         io_size = (io_size << PAGE_SHIFT) - page_offset;
515         if (count < io_size)
516                 io_size = count;
517
518         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
519         kmap_num = kmap_end - kmap_offset + 1;
520
521         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
522
523         switch (ret) {
524         case 0:
525                 break;
526         case -EBUSY:
527                 return -EAGAIN;
528         default:
529                 return ret;
530         }
531
532         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
533         if (unlikely(ret != 0)) {
534                 ttm_bo_unreserve(bo);
535                 return ret;
536         }
537
538         virtual = ttm_kmap_obj_virtual(&map, &dummy);
539         virtual += page_offset;
540
541         if (write)
542                 ret = copy_from_user(virtual, wbuf, io_size);
543         else
544                 ret = copy_to_user(rbuf, virtual, io_size);
545
546         ttm_bo_kunmap(&map);
547         ttm_bo_unreserve(bo);
548         ttm_bo_unref(&bo);
549
550         if (unlikely(ret != 0))
551                 return ret;
552
553         *f_pos += io_size;
554
555         return io_size;
556 }
557 #endif