]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/i915/i915_gem.c
MFV r314565,314567,314570:
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
56
57 #include <dev/drm2/drmP.h>
58 #include <dev/drm2/i915/i915_drm.h>
59 #include <dev/drm2/i915/i915_drv.h>
60 #include <dev/drm2/i915/intel_drv.h>
61
62 #include <sys/resourcevar.h>
63 #include <sys/sched.h>
64 #include <sys/sf_buf.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_pageout.h>
68
69 #include <machine/md_var.h>
70
71 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
72 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
73 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
74                                                     unsigned alignment,
75                                                     bool map_and_fenceable,
76                                                     bool nonblocking);
77 static int i915_gem_phys_pwrite(struct drm_device *dev,
78                                 struct drm_i915_gem_object *obj,
79                                 struct drm_i915_gem_pwrite *args,
80                                 struct drm_file *file);
81
82 static void i915_gem_write_fence(struct drm_device *dev, int reg,
83                                  struct drm_i915_gem_object *obj);
84 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
85                                          struct drm_i915_fence_reg *fence,
86                                          bool enable);
87
88 static void i915_gem_inactive_shrink(void *);
89 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
90 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
91 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
92
93 static int i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
94     off_t start, off_t end);
95
96 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex,
97     bool *fresh);
98
99 MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
100 long i915_gem_wired_pages_cnt;
101
102 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
103 {
104         if (obj->tiling_mode)
105                 i915_gem_release_mmap(obj);
106
107         /* As we do not have an associated fence register, we will force
108          * a tiling change if we ever need to acquire one.
109          */
110         obj->fence_dirty = false;
111         obj->fence_reg = I915_FENCE_REG_NONE;
112 }
113
114 /* some bookkeeping */
115 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
116                                   size_t size)
117 {
118         dev_priv->mm.object_count++;
119         dev_priv->mm.object_memory += size;
120 }
121
122 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
123                                      size_t size)
124 {
125         dev_priv->mm.object_count--;
126         dev_priv->mm.object_memory -= size;
127 }
128
129 static int
130 i915_gem_wait_for_error(struct drm_device *dev)
131 {
132         struct drm_i915_private *dev_priv = dev->dev_private;
133         struct completion *x = &dev_priv->error_completion;
134         int ret;
135
136         if (!atomic_read(&dev_priv->mm.wedged))
137                 return 0;
138
139         /*
140          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
141          * userspace. If it takes that long something really bad is going on and
142          * we should simply try to bail out and fail as gracefully as possible.
143          */
144         ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
145         if (ret == 0) {
146                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
147                 return -EIO;
148         } else if (ret < 0) {
149                 return ret;
150         }
151
152         if (atomic_read(&dev_priv->mm.wedged)) {
153                 /* GPU is hung, bump the completion count to account for
154                  * the token we just consumed so that we never hit zero and
155                  * end up waiting upon a subsequent completion event that
156                  * will never happen.
157                  */
158                 mtx_lock(&x->lock);
159                 x->done++;
160                 mtx_unlock(&x->lock);
161         }
162         return 0;
163 }
164
165 int i915_mutex_lock_interruptible(struct drm_device *dev)
166 {
167         int ret;
168
169         ret = i915_gem_wait_for_error(dev);
170         if (ret)
171                 return ret;
172
173         /*
174          * interruptible shall it be. might indeed be if dev_lock is
175          * changed to sx
176          */
177         ret = sx_xlock_sig(&dev->dev_struct_lock);
178         if (ret)
179                 return -EINTR;
180
181         WARN_ON(i915_verify_lists(dev));
182         return 0;
183 }
184
185 static inline bool
186 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
187 {
188         return obj->gtt_space && !obj->active;
189 }
190
191 int
192 i915_gem_init_ioctl(struct drm_device *dev, void *data,
193                     struct drm_file *file)
194 {
195         struct drm_i915_gem_init *args = data;
196
197         if (drm_core_check_feature(dev, DRIVER_MODESET))
198                 return -ENODEV;
199
200         if (args->gtt_start >= args->gtt_end ||
201             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
202                 return -EINVAL;
203
204         /* GEM with user mode setting was never supported on ilk and later. */
205         if (INTEL_INFO(dev)->gen >= 5)
206                 return -ENODEV;
207
208         /*
209          * XXXKIB. The second-time initialization should be guarded
210          * against.
211          */
212         DRM_LOCK(dev);
213         i915_gem_init_global_gtt(dev, args->gtt_start,
214                                  args->gtt_end, args->gtt_end);
215         DRM_UNLOCK(dev);
216
217         return 0;
218 }
219
220 int
221 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
222                             struct drm_file *file)
223 {
224         struct drm_i915_private *dev_priv = dev->dev_private;
225         struct drm_i915_gem_get_aperture *args = data;
226         struct drm_i915_gem_object *obj;
227         size_t pinned;
228
229         pinned = 0;
230         DRM_LOCK(dev);
231         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
232                 if (obj->pin_count)
233                         pinned += obj->gtt_space->size;
234         DRM_UNLOCK(dev);
235
236         args->aper_size = dev_priv->mm.gtt_total;
237         args->aper_available_size = args->aper_size - pinned;
238
239         return 0;
240 }
241
242 static int
243 i915_gem_create(struct drm_file *file,
244                 struct drm_device *dev,
245                 uint64_t size,
246                 uint32_t *handle_p)
247 {
248         struct drm_i915_gem_object *obj;
249         int ret;
250         u32 handle;
251
252         size = roundup(size, PAGE_SIZE);
253         if (size == 0)
254                 return -EINVAL;
255
256         /* Allocate the new object */
257         obj = i915_gem_alloc_object(dev, size);
258         if (obj == NULL)
259                 return -ENOMEM;
260
261         ret = drm_gem_handle_create(file, &obj->base, &handle);
262         if (ret) {
263                 drm_gem_object_release(&obj->base);
264                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
265                 free(obj, DRM_I915_GEM);
266                 return ret;
267         }
268
269         /* drop reference from allocate - handle holds it now */
270         drm_gem_object_unreference(&obj->base);
271         CTR2(KTR_DRM, "object_create %p %x", obj, size);
272
273         *handle_p = handle;
274         return 0;
275 }
276
277 int
278 i915_gem_dumb_create(struct drm_file *file,
279                      struct drm_device *dev,
280                      struct drm_mode_create_dumb *args)
281 {
282         /* have to work out size/pitch and return them */
283         args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
284         args->size = args->pitch * args->height;
285         return i915_gem_create(file, dev,
286                                args->size, &args->handle);
287 }
288
289 int i915_gem_dumb_destroy(struct drm_file *file,
290                           struct drm_device *dev,
291                           uint32_t handle)
292 {
293         return drm_gem_handle_delete(file, handle);
294 }
295
296 /**
297  * Creates a new mm object and returns a handle to it.
298  */
299 int
300 i915_gem_create_ioctl(struct drm_device *dev, void *data,
301                       struct drm_file *file)
302 {
303         struct drm_i915_gem_create *args = data;
304
305         return i915_gem_create(file, dev,
306                                args->size, &args->handle);
307 }
308
309 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
310 {
311         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
312
313         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
314                 obj->tiling_mode != I915_TILING_NONE;
315 }
316
317 static inline int
318 __copy_to_user_swizzled(char __user *cpu_vaddr,
319                         const char *gpu_vaddr, int gpu_offset,
320                         int length)
321 {
322         int ret, cpu_offset = 0;
323
324         while (length > 0) {
325                 int cacheline_end = roundup2(gpu_offset + 1, 64);
326                 int this_length = min(cacheline_end - gpu_offset, length);
327                 int swizzled_gpu_offset = gpu_offset ^ 64;
328
329                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
330                                      gpu_vaddr + swizzled_gpu_offset,
331                                      this_length);
332                 if (ret)
333                         return ret + length;
334
335                 cpu_offset += this_length;
336                 gpu_offset += this_length;
337                 length -= this_length;
338         }
339
340         return 0;
341 }
342
343 static inline int
344 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
345                           const char __user *cpu_vaddr,
346                           int length)
347 {
348         int ret, cpu_offset = 0;
349
350         while (length > 0) {
351                 int cacheline_end = roundup2(gpu_offset + 1, 64);
352                 int this_length = min(cacheline_end - gpu_offset, length);
353                 int swizzled_gpu_offset = gpu_offset ^ 64;
354
355                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
356                                        cpu_vaddr + cpu_offset,
357                                        this_length);
358                 if (ret)
359                         return ret + length;
360
361                 cpu_offset += this_length;
362                 gpu_offset += this_length;
363                 length -= this_length;
364         }
365
366         return 0;
367 }
368
369 /* Per-page copy function for the shmem pread fastpath.
370  * Flushes invalid cachelines before reading the target if
371  * needs_clflush is set. */
372 static int
373 shmem_pread_fast(vm_page_t page, int shmem_page_offset, int page_length,
374                  char __user *user_data,
375                  bool page_do_bit17_swizzling, bool needs_clflush)
376 {
377         char *vaddr;
378         struct sf_buf *sf;
379         int ret;
380
381         if (unlikely(page_do_bit17_swizzling))
382                 return -EINVAL;
383
384         sched_pin();
385         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
386         if (sf == NULL) {
387                 sched_unpin();
388                 return (-EFAULT);
389         }
390         vaddr = (char *)sf_buf_kva(sf);
391         if (needs_clflush)
392                 drm_clflush_virt_range(vaddr + shmem_page_offset,
393                                        page_length);
394         ret = __copy_to_user_inatomic(user_data,
395                                       vaddr + shmem_page_offset,
396                                       page_length);
397         sf_buf_free(sf);
398         sched_unpin();
399
400         return ret ? -EFAULT : 0;
401 }
402
403 static void
404 shmem_clflush_swizzled_range(char *addr, unsigned long length,
405                              bool swizzled)
406 {
407         if (unlikely(swizzled)) {
408                 unsigned long start = (unsigned long) addr;
409                 unsigned long end = (unsigned long) addr + length;
410
411                 /* For swizzling simply ensure that we always flush both
412                  * channels. Lame, but simple and it works. Swizzled
413                  * pwrite/pread is far from a hotpath - current userspace
414                  * doesn't use it at all. */
415                 start = round_down(start, 128);
416                 end = round_up(end, 128);
417
418                 drm_clflush_virt_range((void *)start, end - start);
419         } else {
420                 drm_clflush_virt_range(addr, length);
421         }
422
423 }
424
425 /* Only difference to the fast-path function is that this can handle bit17
426  * and uses non-atomic copy and kmap functions. */
427 static int
428 shmem_pread_slow(vm_page_t page, int shmem_page_offset, int page_length,
429                  char __user *user_data,
430                  bool page_do_bit17_swizzling, bool needs_clflush)
431 {
432         char *vaddr;
433         struct sf_buf *sf;
434         int ret;
435
436         sf = sf_buf_alloc(page, 0);
437         vaddr = (char *)sf_buf_kva(sf);
438         if (needs_clflush)
439                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
440                                              page_length,
441                                              page_do_bit17_swizzling);
442
443         if (page_do_bit17_swizzling)
444                 ret = __copy_to_user_swizzled(user_data,
445                                               vaddr, shmem_page_offset,
446                                               page_length);
447         else
448                 ret = __copy_to_user(user_data,
449                                      vaddr + shmem_page_offset,
450                                      page_length);
451         sf_buf_free(sf);
452
453         return ret ? - EFAULT : 0;
454 }
455
456 static int
457 i915_gem_shmem_pread(struct drm_device *dev,
458                      struct drm_i915_gem_object *obj,
459                      struct drm_i915_gem_pread *args,
460                      struct drm_file *file)
461 {
462         char __user *user_data;
463         ssize_t remain;
464         off_t offset;
465         int shmem_page_offset, page_length, ret = 0;
466         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
467         int hit_slowpath = 0;
468         int prefaulted = 0;
469         int needs_clflush = 0;
470
471         user_data = to_user_ptr(args->data_ptr);
472         remain = args->size;
473
474         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
475
476         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
477                 /* If we're not in the cpu read domain, set ourself into the gtt
478                  * read domain and manually flush cachelines (if required). This
479                  * optimizes for the case when the gpu will dirty the data
480                  * anyway again before the next pread happens. */
481                 if (obj->cache_level == I915_CACHE_NONE)
482                         needs_clflush = 1;
483                 if (obj->gtt_space) {
484                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
485                         if (ret)
486                                 return ret;
487                 }
488         }
489
490         ret = i915_gem_object_get_pages(obj);
491         if (ret)
492                 return ret;
493
494         i915_gem_object_pin_pages(obj);
495
496         offset = args->offset;
497
498         VM_OBJECT_WLOCK(obj->base.vm_obj);
499         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
500             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
501                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
502
503                 if (remain <= 0)
504                         break;
505
506                 /* Operation in this page
507                  *
508                  * shmem_page_offset = offset within page in shmem file
509                  * page_length = bytes to copy for this page
510                  */
511                 shmem_page_offset = offset_in_page(offset);
512                 page_length = remain;
513                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
514                         page_length = PAGE_SIZE - shmem_page_offset;
515
516                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
517                         (page_to_phys(page) & (1 << 17)) != 0;
518
519                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
520                                        user_data, page_do_bit17_swizzling,
521                                        needs_clflush);
522                 if (ret == 0)
523                         goto next_page;
524
525                 hit_slowpath = 1;
526                 DRM_UNLOCK(dev);
527
528                 if (!prefaulted) {
529                         ret = fault_in_multipages_writeable(user_data, remain);
530                         /* Userspace is tricking us, but we've already clobbered
531                          * its pages with the prefault and promised to write the
532                          * data up to the first fault. Hence ignore any errors
533                          * and just continue. */
534                         (void)ret;
535                         prefaulted = 1;
536                 }
537
538                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
539                                        user_data, page_do_bit17_swizzling,
540                                        needs_clflush);
541
542                 DRM_LOCK(dev);
543
544 next_page:
545                 vm_page_reference(page);
546
547                 if (ret)
548                         goto out;
549
550                 remain -= page_length;
551                 user_data += page_length;
552                 offset += page_length;
553                 VM_OBJECT_WLOCK(obj->base.vm_obj);
554         }
555
556 out:
557         i915_gem_object_unpin_pages(obj);
558
559         if (hit_slowpath) {
560                 /* Fixup: Kill any reinstated backing storage pages */
561                 if (obj->madv == __I915_MADV_PURGED)
562                         i915_gem_object_truncate(obj);
563         }
564
565         return ret;
566 }
567
568 /**
569  * Reads data from the object referenced by handle.
570  *
571  * On error, the contents of *data are undefined.
572  */
573 int
574 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
575                      struct drm_file *file)
576 {
577         struct drm_i915_gem_pread *args = data;
578         struct drm_i915_gem_object *obj;
579         int ret = 0;
580
581         if (args->size == 0)
582                 return 0;
583
584         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_WRITE))
585                 return -EFAULT;
586
587         ret = i915_mutex_lock_interruptible(dev);
588         if (ret)
589                 return ret;
590
591         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
592         if (&obj->base == NULL) {
593                 ret = -ENOENT;
594                 goto unlock;
595         }
596
597         /* Bounds check source.  */
598         if (args->offset > obj->base.size ||
599             args->size > obj->base.size - args->offset) {
600                 ret = -EINVAL;
601                 goto out;
602         }
603
604 #ifdef FREEBSD_WIP
605         /* prime objects have no backing filp to GEM pread/pwrite
606          * pages from.
607          */
608         if (!obj->base.filp) {
609                 ret = -EINVAL;
610                 goto out;
611         }
612 #endif /* FREEBSD_WIP */
613
614         CTR3(KTR_DRM, "pread %p %jx %jx", obj, args->offset, args->size);
615
616         ret = i915_gem_shmem_pread(dev, obj, args, file);
617
618 out:
619         drm_gem_object_unreference(&obj->base);
620 unlock:
621         DRM_UNLOCK(dev);
622         return ret;
623 }
624
625 /* This is the fast write path which cannot handle
626  * page faults in the source data
627  */
628
629 static inline int
630 fast_user_write(vm_paddr_t mapping_addr,
631                 off_t page_base, int page_offset,
632                 char __user *user_data,
633                 int length)
634 {
635         void __iomem *vaddr_atomic;
636         void *vaddr;
637         unsigned long unwritten;
638
639         vaddr_atomic = pmap_mapdev_attr(mapping_addr + page_base,
640             length, PAT_WRITE_COMBINING);
641         /* We can use the cpu mem copy function because this is X86. */
642         vaddr = (char __force*)vaddr_atomic + page_offset;
643         unwritten = __copy_from_user_inatomic_nocache(vaddr,
644                                                       user_data, length);
645         pmap_unmapdev((vm_offset_t)vaddr_atomic, length);
646         return unwritten;
647 }
648
649 /**
650  * This is the fast pwrite path, where we copy the data directly from the
651  * user into the GTT, uncached.
652  */
653 static int
654 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
655                          struct drm_i915_gem_object *obj,
656                          struct drm_i915_gem_pwrite *args,
657                          struct drm_file *file)
658 {
659         drm_i915_private_t *dev_priv = dev->dev_private;
660         ssize_t remain;
661         off_t offset, page_base;
662         char __user *user_data;
663         int page_offset, page_length, ret;
664
665         ret = i915_gem_object_pin(obj, 0, true, true);
666         if (ret)
667                 goto out;
668
669         ret = i915_gem_object_set_to_gtt_domain(obj, true);
670         if (ret)
671                 goto out_unpin;
672
673         ret = i915_gem_object_put_fence(obj);
674         if (ret)
675                 goto out_unpin;
676
677         user_data = to_user_ptr(args->data_ptr);
678         remain = args->size;
679
680         offset = obj->gtt_offset + args->offset;
681
682         while (remain > 0) {
683                 /* Operation in this page
684                  *
685                  * page_base = page offset within aperture
686                  * page_offset = offset within page
687                  * page_length = bytes to copy for this page
688                  */
689                 page_base = offset & ~PAGE_MASK;
690                 page_offset = offset_in_page(offset);
691                 page_length = remain;
692                 if ((page_offset + remain) > PAGE_SIZE)
693                         page_length = PAGE_SIZE - page_offset;
694
695                 /* If we get a fault while copying data, then (presumably) our
696                  * source page isn't available.  Return the error and we'll
697                  * retry in the slow path.
698                  */
699                 if (fast_user_write(dev_priv->mm.gtt_base_addr, page_base,
700                                     page_offset, user_data, page_length)) {
701                         ret = -EFAULT;
702                         goto out_unpin;
703                 }
704
705                 remain -= page_length;
706                 user_data += page_length;
707                 offset += page_length;
708         }
709
710 out_unpin:
711         i915_gem_object_unpin(obj);
712 out:
713         return ret;
714 }
715
716 /* Per-page copy function for the shmem pwrite fastpath.
717  * Flushes invalid cachelines before writing to the target if
718  * needs_clflush_before is set and flushes out any written cachelines after
719  * writing if needs_clflush is set. */
720 static int
721 shmem_pwrite_fast(vm_page_t page, int shmem_page_offset, int page_length,
722                   char __user *user_data,
723                   bool page_do_bit17_swizzling,
724                   bool needs_clflush_before,
725                   bool needs_clflush_after)
726 {
727         char *vaddr;
728         struct sf_buf *sf;
729         int ret;
730
731         if (unlikely(page_do_bit17_swizzling))
732                 return -EINVAL;
733
734         sched_pin();
735         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
736         if (sf == NULL) {
737                 sched_unpin();
738                 return (-EFAULT);
739         }
740         vaddr = (char *)sf_buf_kva(sf);
741         if (needs_clflush_before)
742                 drm_clflush_virt_range(vaddr + shmem_page_offset,
743                                        page_length);
744         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
745                                                 user_data,
746                                                 page_length);
747         if (needs_clflush_after)
748                 drm_clflush_virt_range(vaddr + shmem_page_offset,
749                                        page_length);
750         sf_buf_free(sf);
751         sched_unpin();
752
753         return ret ? -EFAULT : 0;
754 }
755
756 /* Only difference to the fast-path function is that this can handle bit17
757  * and uses non-atomic copy and kmap functions. */
758 static int
759 shmem_pwrite_slow(vm_page_t page, int shmem_page_offset, int page_length,
760                   char __user *user_data,
761                   bool page_do_bit17_swizzling,
762                   bool needs_clflush_before,
763                   bool needs_clflush_after)
764 {
765         char *vaddr;
766         struct sf_buf *sf;
767         int ret;
768
769         sf = sf_buf_alloc(page, 0);
770         vaddr = (char *)sf_buf_kva(sf);
771         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
772                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
773                                              page_length,
774                                              page_do_bit17_swizzling);
775         if (page_do_bit17_swizzling)
776                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
777                                                 user_data,
778                                                 page_length);
779         else
780                 ret = __copy_from_user(vaddr + shmem_page_offset,
781                                        user_data,
782                                        page_length);
783         if (needs_clflush_after)
784                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
785                                              page_length,
786                                              page_do_bit17_swizzling);
787         sf_buf_free(sf);
788
789         return ret ? -EFAULT : 0;
790 }
791
792 static int
793 i915_gem_shmem_pwrite(struct drm_device *dev,
794                       struct drm_i915_gem_object *obj,
795                       struct drm_i915_gem_pwrite *args,
796                       struct drm_file *file)
797 {
798         ssize_t remain;
799         off_t offset;
800         char __user *user_data;
801         int shmem_page_offset, page_length, ret = 0;
802         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
803         int hit_slowpath = 0;
804         int needs_clflush_after = 0;
805         int needs_clflush_before = 0;
806
807         user_data = to_user_ptr(args->data_ptr);
808         remain = args->size;
809
810         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
811
812         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
813                 /* If we're not in the cpu write domain, set ourself into the gtt
814                  * write domain and manually flush cachelines (if required). This
815                  * optimizes for the case when the gpu will use the data
816                  * right away and we therefore have to clflush anyway. */
817                 if (obj->cache_level == I915_CACHE_NONE)
818                         needs_clflush_after = 1;
819                 if (obj->gtt_space) {
820                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
821                         if (ret)
822                                 return ret;
823                 }
824         }
825         /* Same trick applies for invalidate partially written cachelines before
826          * writing.  */
827         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
828             && obj->cache_level == I915_CACHE_NONE)
829                 needs_clflush_before = 1;
830
831         ret = i915_gem_object_get_pages(obj);
832         if (ret)
833                 return ret;
834
835         i915_gem_object_pin_pages(obj);
836
837         offset = args->offset;
838         obj->dirty = 1;
839
840         VM_OBJECT_WLOCK(obj->base.vm_obj);
841         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
842             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
843                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
844                 int partial_cacheline_write;
845
846                 if (remain <= 0)
847                         break;
848
849                 /* Operation in this page
850                  *
851                  * shmem_page_offset = offset within page in shmem file
852                  * page_length = bytes to copy for this page
853                  */
854                 shmem_page_offset = offset_in_page(offset);
855
856                 page_length = remain;
857                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
858                         page_length = PAGE_SIZE - shmem_page_offset;
859
860                 /* If we don't overwrite a cacheline completely we need to be
861                  * careful to have up-to-date data by first clflushing. Don't
862                  * overcomplicate things and flush the entire patch. */
863                 partial_cacheline_write = needs_clflush_before &&
864                         ((shmem_page_offset | page_length)
865                                 & (cpu_clflush_line_size - 1));
866
867                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
868                         (page_to_phys(page) & (1 << 17)) != 0;
869
870                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
871                                         user_data, page_do_bit17_swizzling,
872                                         partial_cacheline_write,
873                                         needs_clflush_after);
874                 if (ret == 0)
875                         goto next_page;
876
877                 hit_slowpath = 1;
878                 DRM_UNLOCK(dev);
879                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
880                                         user_data, page_do_bit17_swizzling,
881                                         partial_cacheline_write,
882                                         needs_clflush_after);
883
884                 DRM_LOCK(dev);
885
886 next_page:
887                 vm_page_dirty(page);
888                 vm_page_reference(page);
889
890                 if (ret)
891                         goto out;
892
893                 remain -= page_length;
894                 user_data += page_length;
895                 offset += page_length;
896                 VM_OBJECT_WLOCK(obj->base.vm_obj);
897         }
898
899 out:
900         i915_gem_object_unpin_pages(obj);
901
902         if (hit_slowpath) {
903                 /* Fixup: Kill any reinstated backing storage pages */
904                 if (obj->madv == __I915_MADV_PURGED)
905                         i915_gem_object_truncate(obj);
906                 /* and flush dirty cachelines in case the object isn't in the cpu write
907                  * domain anymore. */
908                 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
909                         i915_gem_clflush_object(obj);
910                         i915_gem_chipset_flush(dev);
911                 }
912         }
913
914         if (needs_clflush_after)
915                 i915_gem_chipset_flush(dev);
916
917         return ret;
918 }
919
920 /**
921  * Writes data to the object referenced by handle.
922  *
923  * On error, the contents of the buffer that were to be modified are undefined.
924  */
925 int
926 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
927                       struct drm_file *file)
928 {
929         struct drm_i915_gem_pwrite *args = data;
930         struct drm_i915_gem_object *obj;
931         int ret;
932
933         if (args->size == 0)
934                 return 0;
935
936         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_READ))
937                 return -EFAULT;
938
939         ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
940                                            args->size);
941         if (ret)
942                 return -EFAULT;
943
944         ret = i915_mutex_lock_interruptible(dev);
945         if (ret)
946                 return ret;
947
948         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
949         if (&obj->base == NULL) {
950                 ret = -ENOENT;
951                 goto unlock;
952         }
953
954         /* Bounds check destination. */
955         if (args->offset > obj->base.size ||
956             args->size > obj->base.size - args->offset) {
957                 ret = -EINVAL;
958                 goto out;
959         }
960
961 #ifdef FREEBSD_WIP
962         /* prime objects have no backing filp to GEM pread/pwrite
963          * pages from.
964          */
965         if (!obj->base.filp) {
966                 ret = -EINVAL;
967                 goto out;
968         }
969 #endif /* FREEBSD_WIP */
970
971         CTR3(KTR_DRM, "pwrite %p %jx %jx", obj, args->offset, args->size);
972
973         ret = -EFAULT;
974         /* We can only do the GTT pwrite on untiled buffers, as otherwise
975          * it would end up going through the fenced access, and we'll get
976          * different detiling behavior between reading and writing.
977          * pread/pwrite currently are reading and writing from the CPU
978          * perspective, requiring manual detiling by the client.
979          */
980         if (obj->phys_obj) {
981                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
982                 goto out;
983         }
984
985         if (obj->cache_level == I915_CACHE_NONE &&
986             obj->tiling_mode == I915_TILING_NONE &&
987             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
988                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
989                 /* Note that the gtt paths might fail with non-page-backed user
990                  * pointers (e.g. gtt mappings when moving data between
991                  * textures). Fallback to the shmem path in that case. */
992         }
993
994         if (ret == -EFAULT || ret == -ENOSPC)
995                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
996
997 out:
998         drm_gem_object_unreference(&obj->base);
999 unlock:
1000         DRM_UNLOCK(dev);
1001         return ret;
1002 }
1003
1004 int
1005 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1006                      bool interruptible)
1007 {
1008         if (atomic_read(&dev_priv->mm.wedged)) {
1009                 struct completion *x = &dev_priv->error_completion;
1010                 bool recovery_complete;
1011
1012                 /* Give the error handler a chance to run. */
1013                 mtx_lock(&x->lock);
1014                 recovery_complete = x->done > 0;
1015                 mtx_unlock(&x->lock);
1016
1017                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1018                  * -EIO unconditionally for these. */
1019                 if (!interruptible)
1020                         return -EIO;
1021
1022                 /* Recovery complete, but still wedged means reset failure. */
1023                 if (recovery_complete)
1024                         return -EIO;
1025
1026                 return -EAGAIN;
1027         }
1028
1029         return 0;
1030 }
1031
1032 /*
1033  * Compare seqno against outstanding lazy request. Emit a request if they are
1034  * equal.
1035  */
1036 static int
1037 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1038 {
1039         int ret;
1040
1041         DRM_LOCK_ASSERT(ring->dev);
1042
1043         ret = 0;
1044         if (seqno == ring->outstanding_lazy_request)
1045                 ret = i915_add_request(ring, NULL, NULL);
1046
1047         return ret;
1048 }
1049
1050 /**
1051  * __wait_seqno - wait until execution of seqno has finished
1052  * @ring: the ring expected to report seqno
1053  * @seqno: duh!
1054  * @interruptible: do an interruptible wait (normally yes)
1055  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1056  *
1057  * Returns 0 if the seqno was found within the alloted time. Else returns the
1058  * errno with remaining time filled in timeout argument.
1059  */
1060 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1061                         bool interruptible, struct timespec *timeout)
1062 {
1063         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1064         struct timespec before, now, wait_time={1,0};
1065         sbintime_t timeout_sbt;
1066         long end;
1067         bool wait_forever = true;
1068         int ret, flags;
1069
1070         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1071                 return 0;
1072
1073         CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
1074
1075         if (timeout != NULL) {
1076                 wait_time = *timeout;
1077                 wait_forever = false;
1078         }
1079
1080         timeout_sbt = tstosbt(wait_time);
1081
1082         if (WARN_ON(!ring->irq_get(ring)))
1083                 return -ENODEV;
1084
1085         /* Record current time in case interrupted by signal, or wedged * */
1086         getrawmonotonic(&before);
1087
1088 #define EXIT_COND \
1089         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1090         atomic_read(&dev_priv->mm.wedged))
1091         flags = interruptible ? PCATCH : 0;
1092         mtx_lock(&dev_priv->irq_lock);
1093         do {
1094                 if (EXIT_COND) {
1095                         end = 1;
1096                 } else {
1097                         ret = -msleep_sbt(&ring->irq_queue, &dev_priv->irq_lock, flags,
1098                             "915gwr", timeout_sbt, 0, 0);
1099
1100                         /*
1101                          * NOTE Linux<->FreeBSD: Convert msleep_sbt() return
1102                          * value to something close to wait_event*_timeout()
1103                          * functions used on Linux.
1104                          *
1105                          * >0 -> condition is true (end = time remaining)
1106                          * =0 -> sleep timed out
1107                          * <0 -> error (interrupted)
1108                          *
1109                          * We fake the remaining time by returning 1. We
1110                          * compute a proper value later.
1111                          */
1112                         if (EXIT_COND)
1113                                 /* We fake a remaining time of 1 tick. */
1114                                 end = 1;
1115                         else if (ret == -EINTR || ret == -ERESTART)
1116                                 /* Interrupted. */
1117                                 end = -ERESTARTSYS;
1118                         else
1119                                 /* Timeout. */
1120                                 end = 0;
1121                 }
1122
1123                 ret = i915_gem_check_wedge(dev_priv, interruptible);
1124                 if (ret)
1125                         end = ret;
1126         } while (end == 0 && wait_forever);
1127         mtx_unlock(&dev_priv->irq_lock);
1128
1129         getrawmonotonic(&now);
1130
1131         ring->irq_put(ring);
1132         CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, end);
1133 #undef EXIT_COND
1134
1135         if (timeout) {
1136                 timespecsub(&now, &before);
1137                 timespecsub(timeout, &now);
1138         }
1139
1140         switch (end) {
1141         case -EIO:
1142         case -EAGAIN: /* Wedged */
1143         case -ERESTARTSYS: /* Signal */
1144         case -ETIMEDOUT: /* Timeout */
1145                 return (int)end;
1146         case 0: /* Timeout */
1147                 return -ETIMEDOUT;
1148         default: /* Completed */
1149                 WARN_ON(end < 0); /* We're not aware of other errors */
1150                 return 0;
1151         }
1152 }
1153
1154 /**
1155  * Waits for a sequence number to be signaled, and cleans up the
1156  * request and object lists appropriately for that event.
1157  */
1158 int
1159 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1160 {
1161         struct drm_device *dev = ring->dev;
1162         struct drm_i915_private *dev_priv = dev->dev_private;
1163         bool interruptible = dev_priv->mm.interruptible;
1164         int ret;
1165
1166         DRM_LOCK_ASSERT(dev);
1167         BUG_ON(seqno == 0);
1168
1169         ret = i915_gem_check_wedge(dev_priv, interruptible);
1170         if (ret)
1171                 return ret;
1172
1173         ret = i915_gem_check_olr(ring, seqno);
1174         if (ret)
1175                 return ret;
1176
1177         return __wait_seqno(ring, seqno, interruptible, NULL);
1178 }
1179
1180 /**
1181  * Ensures that all rendering to the object has completed and the object is
1182  * safe to unbind from the GTT or access from the CPU.
1183  */
1184 static __must_check int
1185 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1186                                bool readonly)
1187 {
1188         struct intel_ring_buffer *ring = obj->ring;
1189         u32 seqno;
1190         int ret;
1191
1192         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1193         if (seqno == 0)
1194                 return 0;
1195
1196         ret = i915_wait_seqno(ring, seqno);
1197         if (ret)
1198                 return ret;
1199
1200         i915_gem_retire_requests_ring(ring);
1201
1202         /* Manually manage the write flush as we may have not yet
1203          * retired the buffer.
1204          */
1205         if (obj->last_write_seqno &&
1206             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1207                 obj->last_write_seqno = 0;
1208                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1209         }
1210
1211         return 0;
1212 }
1213
1214 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1215  * as the object state may change during this call.
1216  */
1217 static __must_check int
1218 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1219                                             bool readonly)
1220 {
1221         struct drm_device *dev = obj->base.dev;
1222         struct drm_i915_private *dev_priv = dev->dev_private;
1223         struct intel_ring_buffer *ring = obj->ring;
1224         u32 seqno;
1225         int ret;
1226
1227         DRM_LOCK_ASSERT(dev);
1228         BUG_ON(!dev_priv->mm.interruptible);
1229
1230         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1231         if (seqno == 0)
1232                 return 0;
1233
1234         ret = i915_gem_check_wedge(dev_priv, true);
1235         if (ret)
1236                 return ret;
1237
1238         ret = i915_gem_check_olr(ring, seqno);
1239         if (ret)
1240                 return ret;
1241
1242         DRM_UNLOCK(dev);
1243         ret = __wait_seqno(ring, seqno, true, NULL);
1244         DRM_LOCK(dev);
1245
1246         i915_gem_retire_requests_ring(ring);
1247
1248         /* Manually manage the write flush as we may have not yet
1249          * retired the buffer.
1250          */
1251         if (ret == 0 &&
1252             obj->last_write_seqno &&
1253             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1254                 obj->last_write_seqno = 0;
1255                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1256         }
1257
1258         return ret;
1259 }
1260
1261 /**
1262  * Called when user space prepares to use an object with the CPU, either
1263  * through the mmap ioctl's mapping or a GTT mapping.
1264  */
1265 int
1266 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1267                           struct drm_file *file)
1268 {
1269         struct drm_i915_gem_set_domain *args = data;
1270         struct drm_i915_gem_object *obj;
1271         uint32_t read_domains = args->read_domains;
1272         uint32_t write_domain = args->write_domain;
1273         int ret;
1274
1275         /* Only handle setting domains to types used by the CPU. */
1276         if (write_domain & I915_GEM_GPU_DOMAINS)
1277                 return -EINVAL;
1278
1279         if (read_domains & I915_GEM_GPU_DOMAINS)
1280                 return -EINVAL;
1281
1282         /* Having something in the write domain implies it's in the read
1283          * domain, and only that read domain.  Enforce that in the request.
1284          */
1285         if (write_domain != 0 && read_domains != write_domain)
1286                 return -EINVAL;
1287
1288         ret = i915_mutex_lock_interruptible(dev);
1289         if (ret)
1290                 return ret;
1291
1292         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1293         if (&obj->base == NULL) {
1294                 ret = -ENOENT;
1295                 goto unlock;
1296         }
1297
1298         /* Try to flush the object off the GPU without holding the lock.
1299          * We will repeat the flush holding the lock in the normal manner
1300          * to catch cases where we are gazumped.
1301          */
1302         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1303         if (ret)
1304                 goto unref;
1305
1306         if (read_domains & I915_GEM_DOMAIN_GTT) {
1307                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1308
1309                 /* Silently promote "you're not bound, there was nothing to do"
1310                  * to success, since the client was just asking us to
1311                  * make sure everything was done.
1312                  */
1313                 if (ret == -EINVAL)
1314                         ret = 0;
1315         } else {
1316                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1317         }
1318
1319 unref:
1320         drm_gem_object_unreference(&obj->base);
1321 unlock:
1322         DRM_UNLOCK(dev);
1323         return ret;
1324 }
1325
1326 /**
1327  * Called when user space has done writes to this buffer
1328  */
1329 int
1330 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1331                          struct drm_file *file)
1332 {
1333         struct drm_i915_gem_sw_finish *args = data;
1334         struct drm_i915_gem_object *obj;
1335         int ret = 0;
1336
1337         ret = i915_mutex_lock_interruptible(dev);
1338         if (ret)
1339                 return ret;
1340
1341         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1342         if (&obj->base == NULL) {
1343                 ret = -ENOENT;
1344                 goto unlock;
1345         }
1346
1347         /* Pinned buffers may be scanout, so flush the cache */
1348         if (obj->pin_count)
1349                 i915_gem_object_flush_cpu_write_domain(obj);
1350
1351         drm_gem_object_unreference(&obj->base);
1352 unlock:
1353         DRM_UNLOCK(dev);
1354         return ret;
1355 }
1356
1357 /**
1358  * Maps the contents of an object, returning the address it is mapped
1359  * into.
1360  *
1361  * While the mapping holds a reference on the contents of the object, it doesn't
1362  * imply a ref on the object itself.
1363  */
1364 int
1365 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1366                     struct drm_file *file)
1367 {
1368         struct drm_i915_gem_mmap *args = data;
1369         struct drm_gem_object *obj;
1370         struct proc *p;
1371         vm_map_t map;
1372         vm_offset_t addr;
1373         vm_size_t size;
1374         int error, rv;
1375
1376         obj = drm_gem_object_lookup(dev, file, args->handle);
1377         if (obj == NULL)
1378                 return -ENOENT;
1379
1380 #ifdef FREEBSD_WIP
1381         /* prime objects have no backing filp to GEM mmap
1382          * pages from.
1383          */
1384         if (!obj->filp) {
1385                 drm_gem_object_unreference_unlocked(obj);
1386                 return -EINVAL;
1387         }
1388 #endif /* FREEBSD_WIP */
1389
1390         error = 0;
1391         if (args->size == 0)
1392                 goto out;
1393         p = curproc;
1394         map = &p->p_vmspace->vm_map;
1395         size = round_page(args->size);
1396         PROC_LOCK(p);
1397         if (map->size + size > lim_cur_proc(p, RLIMIT_VMEM)) {
1398                 PROC_UNLOCK(p);
1399                 error = -ENOMEM;
1400                 goto out;
1401         }
1402         PROC_UNLOCK(p);
1403
1404         addr = 0;
1405         vm_object_reference(obj->vm_obj);
1406         rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
1407             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1408             VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
1409         if (rv != KERN_SUCCESS) {
1410                 vm_object_deallocate(obj->vm_obj);
1411                 error = -vm_mmap_to_errno(rv);
1412         } else {
1413                 args->addr_ptr = (uint64_t)addr;
1414         }
1415 out:
1416         drm_gem_object_unreference_unlocked(obj);
1417         return (error);
1418 }
1419
1420 static int
1421 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1422     vm_ooffset_t foff, struct ucred *cred, u_short *color)
1423 {
1424
1425         /*
1426          * NOTE Linux<->FreeBSD: drm_gem_mmap_single() takes care of
1427          * calling drm_gem_object_reference(). That's why we don't
1428          * do this here. i915_gem_pager_dtor(), below, will call
1429          * drm_gem_object_unreference().
1430          *
1431          * On Linux, drm_gem_vm_open() references the object because
1432          * it's called the mapping is copied. drm_gem_vm_open() is not
1433          * called when the mapping is created. So the possible sequences
1434          * are:
1435          *     1. drm_gem_mmap():     ref++
1436          *     2. drm_gem_vm_close(): ref--
1437          *
1438          *     1. drm_gem_mmap():     ref++
1439          *     2. drm_gem_vm_open():  ref++ (for the copied vma)
1440          *     3. drm_gem_vm_close(): ref-- (for the copied vma)
1441          *     4. drm_gem_vm_close(): ref-- (for the initial vma)
1442          *
1443          * On FreeBSD, i915_gem_pager_ctor() is called once during the
1444          * creation of the mapping. No callback is called when the
1445          * mapping is shared during a fork(). i915_gem_pager_dtor() is
1446          * called when the last reference to the mapping is dropped. So
1447          * the only sequence is:
1448          *     1. drm_gem_mmap_single(): ref++
1449          *     2. i915_gem_pager_ctor(): <noop>
1450          *     3. i915_gem_pager_dtor(): ref--
1451          */
1452
1453         *color = 0; /* XXXKIB */
1454         return (0);
1455 }
1456
1457 /**
1458  * i915_gem_fault - fault a page into the GTT
1459  * vma: VMA in question
1460  * vmf: fault info
1461  *
1462  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1463  * from userspace.  The fault handler takes care of binding the object to
1464  * the GTT (if needed), allocating and programming a fence register (again,
1465  * only if needed based on whether the old reg is still valid or the object
1466  * is tiled) and inserting a new PTE into the faulting process.
1467  *
1468  * Note that the faulting process may involve evicting existing objects
1469  * from the GTT and/or fence registers to make room.  So performance may
1470  * suffer if the GTT working set is large or there are few fence registers
1471  * left.
1472  */
1473
1474 int i915_intr_pf;
1475
1476 static int
1477 i915_gem_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
1478     vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
1479 {
1480         struct drm_gem_object *gem_obj = vm_obj->handle;
1481         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
1482         struct drm_device *dev = obj->base.dev;
1483         drm_i915_private_t *dev_priv = dev->dev_private;
1484         vm_page_t page;
1485         int ret = 0;
1486         bool write = (max_prot & VM_PROT_WRITE) != 0;
1487         bool pinned;
1488
1489         VM_OBJECT_WUNLOCK(vm_obj);
1490 retry:
1491         ret = 0;
1492         pinned = 0;
1493         page = NULL;
1494
1495         if (i915_intr_pf) {
1496                 ret = i915_mutex_lock_interruptible(dev);
1497                 if (ret != 0)
1498                         goto out;
1499         } else
1500                 DRM_LOCK(dev);
1501
1502         /*
1503          * Since the object lock was dropped, other thread might have
1504          * faulted on the same GTT address and instantiated the
1505          * mapping for the page.  Recheck.
1506          */
1507         VM_OBJECT_WLOCK(vm_obj);
1508         page = vm_page_lookup(vm_obj, pidx);
1509         if (page != NULL) {
1510                 if (vm_page_busied(page)) {
1511                         DRM_UNLOCK(dev);
1512                         vm_page_lock(page);
1513                         VM_OBJECT_WUNLOCK(vm_obj);
1514                         vm_page_busy_sleep(page, "915pee", false);
1515                         goto retry;
1516                 }
1517                 goto have_page;
1518         } else
1519                 VM_OBJECT_WUNLOCK(vm_obj);
1520
1521         /* Now bind it into the GTT if needed */
1522         ret = i915_gem_object_pin(obj, 0, true, false);
1523         if (ret)
1524                 goto unlock;
1525         pinned = 1;
1526
1527         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1528         if (ret)
1529                 goto unpin;
1530
1531         ret = i915_gem_object_get_fence(obj);
1532         if (ret)
1533                 goto unpin;
1534
1535         obj->fault_mappable = true;
1536
1537         page = PHYS_TO_VM_PAGE(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
1538             IDX_TO_OFF(pidx));
1539         if (page == NULL) {
1540                 ret = -EFAULT;
1541                 goto unpin;
1542         }
1543         KASSERT((page->flags & PG_FICTITIOUS) != 0,
1544             ("physical address %#jx not fictitious, page %p",
1545             (uintmax_t)(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
1546             IDX_TO_OFF(pidx)), page));
1547         KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
1548
1549         VM_OBJECT_WLOCK(vm_obj);
1550         if (vm_page_busied(page)) {
1551                 i915_gem_object_unpin(obj);
1552                 DRM_UNLOCK(dev);
1553                 vm_page_lock(page);
1554                 VM_OBJECT_WUNLOCK(vm_obj);
1555                 vm_page_busy_sleep(page, "915pbs", false);
1556                 goto retry;
1557         }
1558         if (vm_page_insert(page, vm_obj, pidx)) {
1559                 i915_gem_object_unpin(obj);
1560                 DRM_UNLOCK(dev);
1561                 VM_OBJECT_WUNLOCK(vm_obj);
1562                 VM_WAIT;
1563                 goto retry;
1564         }
1565         page->valid = VM_PAGE_BITS_ALL;
1566 have_page:
1567         vm_page_xbusy(page);
1568
1569         CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, pidx, fault_type,
1570             page->phys_addr);
1571         if (pinned) {
1572                 /*
1573                  * We may have not pinned the object if the page was
1574                  * found by the call to vm_page_lookup().
1575                  */
1576                 i915_gem_object_unpin(obj);
1577         }
1578         DRM_UNLOCK(dev);
1579         *first = *last = pidx;
1580         return (VM_PAGER_OK);
1581
1582 unpin:
1583         i915_gem_object_unpin(obj);
1584 unlock:
1585         DRM_UNLOCK(dev);
1586 out:
1587         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1588         CTR4(KTR_DRM, "fault_fail %p %jx %x err %d", gem_obj, pidx, fault_type,
1589             -ret);
1590         if (ret == -ERESTARTSYS) {
1591                 /*
1592                  * NOTE Linux<->FreeBSD: Convert Linux' -ERESTARTSYS to
1593                  * the more common -EINTR, so the page fault is retried.
1594                  */
1595                 ret = -EINTR;
1596         }
1597         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1598                 kern_yield(PRI_USER);
1599                 goto retry;
1600         }
1601         VM_OBJECT_WLOCK(vm_obj);
1602         return (VM_PAGER_ERROR);
1603 }
1604
1605 static void
1606 i915_gem_pager_dtor(void *handle)
1607 {
1608         struct drm_gem_object *obj = handle;
1609         struct drm_device *dev = obj->dev;
1610
1611         DRM_LOCK(dev);
1612         drm_gem_object_unreference(obj);
1613         DRM_UNLOCK(dev);
1614 }
1615
1616 struct cdev_pager_ops i915_gem_pager_ops = {
1617         .cdev_pg_populate       = i915_gem_pager_populate,
1618         .cdev_pg_ctor           = i915_gem_pager_ctor,
1619         .cdev_pg_dtor           = i915_gem_pager_dtor,
1620 };
1621
1622 /**
1623  * i915_gem_release_mmap - remove physical page mappings
1624  * @obj: obj in question
1625  *
1626  * Preserve the reservation of the mmapping with the DRM core code, but
1627  * relinquish ownership of the pages back to the system.
1628  *
1629  * It is vital that we remove the page mapping if we have mapped a tiled
1630  * object through the GTT and then lose the fence register due to
1631  * resource pressure. Similarly if the object has been moved out of the
1632  * aperture, than pages mapped into userspace must be revoked. Removing the
1633  * mapping will then trigger a page fault on the next user access, allowing
1634  * fixup by i915_gem_fault().
1635  */
1636 void
1637 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1638 {
1639         vm_object_t devobj;
1640         vm_page_t page;
1641         int i, page_count;
1642
1643         if (!obj->fault_mappable)
1644                 return;
1645
1646         CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset,
1647             OFF_TO_IDX(obj->base.size));
1648         devobj = cdev_pager_lookup(obj);
1649         if (devobj != NULL) {
1650                 page_count = OFF_TO_IDX(obj->base.size);
1651
1652                 VM_OBJECT_WLOCK(devobj);
1653 retry:
1654                 for (i = 0; i < page_count; i++) {
1655                         page = vm_page_lookup(devobj, i);
1656                         if (page == NULL)
1657                                 continue;
1658                         if (vm_page_sleep_if_busy(page, "915unm"))
1659                                 goto retry;
1660                         cdev_pager_free_page(devobj, page);
1661                 }
1662                 VM_OBJECT_WUNLOCK(devobj);
1663                 vm_object_deallocate(devobj);
1664         }
1665
1666         obj->fault_mappable = false;
1667 }
1668
1669 static uint32_t
1670 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1671 {
1672         uint32_t gtt_size;
1673
1674         if (INTEL_INFO(dev)->gen >= 4 ||
1675             tiling_mode == I915_TILING_NONE)
1676                 return size;
1677
1678         /* Previous chips need a power-of-two fence region when tiling */
1679         if (INTEL_INFO(dev)->gen == 3)
1680                 gtt_size = 1024*1024;
1681         else
1682                 gtt_size = 512*1024;
1683
1684         while (gtt_size < size)
1685                 gtt_size <<= 1;
1686
1687         return gtt_size;
1688 }
1689
1690 /**
1691  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1692  * @obj: object to check
1693  *
1694  * Return the required GTT alignment for an object, taking into account
1695  * potential fence register mapping.
1696  */
1697 static uint32_t
1698 i915_gem_get_gtt_alignment(struct drm_device *dev,
1699                            uint32_t size,
1700                            int tiling_mode)
1701 {
1702         /*
1703          * Minimum alignment is 4k (GTT page size), but might be greater
1704          * if a fence register is needed for the object.
1705          */
1706         if (INTEL_INFO(dev)->gen >= 4 ||
1707             tiling_mode == I915_TILING_NONE)
1708                 return 4096;
1709
1710         /*
1711          * Previous chips need to be aligned to the size of the smallest
1712          * fence register that can contain the object.
1713          */
1714         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1715 }
1716
1717 /**
1718  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1719  *                                       unfenced object
1720  * @dev: the device
1721  * @size: size of the object
1722  * @tiling_mode: tiling mode of the object
1723  *
1724  * Return the required GTT alignment for an object, only taking into account
1725  * unfenced tiled surface requirements.
1726  */
1727 uint32_t
1728 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1729                                     uint32_t size,
1730                                     int tiling_mode)
1731 {
1732         /*
1733          * Minimum alignment is 4k (GTT page size) for sane hw.
1734          */
1735         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1736             tiling_mode == I915_TILING_NONE)
1737                 return 4096;
1738
1739         /* Previous hardware however needs to be aligned to a power-of-two
1740          * tile height. The simplest method for determining this is to reuse
1741          * the power-of-tile object size.
1742          */
1743         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1744 }
1745
1746 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1747 {
1748         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1749         int ret;
1750
1751         if (obj->base.on_map)
1752                 return 0;
1753
1754         dev_priv->mm.shrinker_no_lock_stealing = true;
1755
1756         ret = drm_gem_create_mmap_offset(&obj->base);
1757         if (ret != -ENOSPC)
1758                 goto out;
1759
1760         /* Badly fragmented mmap space? The only way we can recover
1761          * space is by destroying unwanted objects. We can't randomly release
1762          * mmap_offsets as userspace expects them to be persistent for the
1763          * lifetime of the objects. The closest we can is to release the
1764          * offsets on purgeable objects by truncating it and marking it purged,
1765          * which prevents userspace from ever using that object again.
1766          */
1767         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1768         ret = drm_gem_create_mmap_offset(&obj->base);
1769         if (ret != -ENOSPC)
1770                 goto out;
1771
1772         i915_gem_shrink_all(dev_priv);
1773         ret = drm_gem_create_mmap_offset(&obj->base);
1774 out:
1775         dev_priv->mm.shrinker_no_lock_stealing = false;
1776
1777         return ret;
1778 }
1779
1780 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1781 {
1782         if (!obj->base.on_map)
1783                 return;
1784
1785         drm_gem_free_mmap_offset(&obj->base);
1786 }
1787
1788 int
1789 i915_gem_mmap_gtt(struct drm_file *file,
1790                   struct drm_device *dev,
1791                   uint32_t handle,
1792                   uint64_t *offset)
1793 {
1794         struct drm_i915_private *dev_priv = dev->dev_private;
1795         struct drm_i915_gem_object *obj;
1796         int ret;
1797
1798         ret = i915_mutex_lock_interruptible(dev);
1799         if (ret)
1800                 return ret;
1801
1802         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1803         if (&obj->base == NULL) {
1804                 ret = -ENOENT;
1805                 goto unlock;
1806         }
1807
1808         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1809                 ret = -E2BIG;
1810                 goto out;
1811         }
1812
1813         if (obj->madv != I915_MADV_WILLNEED) {
1814                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1815                 ret = -EINVAL;
1816                 goto out;
1817         }
1818
1819         ret = i915_gem_object_create_mmap_offset(obj);
1820         if (ret)
1821                 goto out;
1822
1823         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1824             DRM_GEM_MAPPING_KEY;
1825
1826 out:
1827         drm_gem_object_unreference(&obj->base);
1828 unlock:
1829         DRM_UNLOCK(dev);
1830         return ret;
1831 }
1832
1833 /**
1834  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1835  * @dev: DRM device
1836  * @data: GTT mapping ioctl data
1837  * @file: GEM object info
1838  *
1839  * Simply returns the fake offset to userspace so it can mmap it.
1840  * The mmap call will end up in drm_gem_mmap(), which will set things
1841  * up so we can get faults in the handler above.
1842  *
1843  * The fault handler will take care of binding the object into the GTT
1844  * (since it may have been evicted to make room for something), allocating
1845  * a fence register, and mapping the appropriate aperture address into
1846  * userspace.
1847  */
1848 int
1849 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1850                         struct drm_file *file)
1851 {
1852         struct drm_i915_gem_mmap_gtt *args = data;
1853
1854         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1855 }
1856
1857 /* Immediately discard the backing storage */
1858 static void
1859 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1860 {
1861         vm_object_t vm_obj;
1862
1863         vm_obj = obj->base.vm_obj;
1864         VM_OBJECT_WLOCK(vm_obj);
1865         vm_object_page_remove(vm_obj, 0, 0, false);
1866         VM_OBJECT_WUNLOCK(vm_obj);
1867         i915_gem_object_free_mmap_offset(obj);
1868
1869         obj->madv = __I915_MADV_PURGED;
1870 }
1871
1872 static inline int
1873 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1874 {
1875         return obj->madv == I915_MADV_DONTNEED;
1876 }
1877
1878 static void
1879 i915_gem_object_put_pages_range_locked(struct drm_i915_gem_object *obj,
1880     vm_pindex_t si, vm_pindex_t ei)
1881 {
1882         vm_object_t vm_obj;
1883         vm_page_t page;
1884         vm_pindex_t i;
1885
1886         vm_obj = obj->base.vm_obj;
1887         VM_OBJECT_ASSERT_LOCKED(vm_obj);
1888         for (i = si,  page = vm_page_lookup(vm_obj, i); i < ei;
1889             page = vm_page_next(page), i++) {
1890                 KASSERT(page->pindex == i, ("pindex %jx %jx",
1891                     (uintmax_t)page->pindex, (uintmax_t)i));
1892                 vm_page_lock(page);
1893                 vm_page_unwire(page, PQ_INACTIVE);
1894                 if (page->wire_count == 0)
1895                         atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1896                 vm_page_unlock(page);
1897         }
1898 }
1899
1900 #define GEM_PARANOID_CHECK_GTT 0
1901 #if GEM_PARANOID_CHECK_GTT
1902 static void
1903 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
1904     int page_count)
1905 {
1906         struct drm_i915_private *dev_priv;
1907         vm_paddr_t pa;
1908         unsigned long start, end;
1909         u_int i;
1910         int j;
1911
1912         dev_priv = dev->dev_private;
1913         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
1914         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
1915         for (i = start; i < end; i++) {
1916                 pa = intel_gtt_read_pte_paddr(i);
1917                 for (j = 0; j < page_count; j++) {
1918                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
1919                                 panic("Page %p in GTT pte index %d pte %x",
1920                                     ma[i], i, intel_gtt_read_pte(i));
1921                         }
1922                 }
1923         }
1924 }
1925 #endif
1926
1927 static void
1928 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1929 {
1930         int page_count = obj->base.size / PAGE_SIZE;
1931         int ret, i;
1932
1933         BUG_ON(obj->madv == __I915_MADV_PURGED);
1934
1935         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1936         if (ret) {
1937                 /* In the event of a disaster, abandon all caches and
1938                  * hope for the best.
1939                  */
1940                 WARN_ON(ret != -EIO);
1941                 i915_gem_clflush_object(obj);
1942                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1943         }
1944
1945         if (i915_gem_object_needs_bit17_swizzle(obj))
1946                 i915_gem_object_save_bit_17_swizzle(obj);
1947
1948         if (obj->madv == I915_MADV_DONTNEED)
1949                 obj->dirty = 0;
1950
1951         VM_OBJECT_WLOCK(obj->base.vm_obj);
1952 #if GEM_PARANOID_CHECK_GTT
1953         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1954 #endif
1955         for (i = 0; i < page_count; i++) {
1956                 vm_page_t page = obj->pages[i];
1957
1958                 if (obj->dirty)
1959                         vm_page_dirty(page);
1960
1961                 if (obj->madv == I915_MADV_WILLNEED)
1962                         vm_page_reference(page);
1963
1964                 vm_page_lock(page);
1965                 vm_page_unwire(obj->pages[i], PQ_ACTIVE);
1966                 vm_page_unlock(page);
1967                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1968         }
1969         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
1970         obj->dirty = 0;
1971
1972         free(obj->pages, DRM_I915_GEM);
1973         obj->pages = NULL;
1974 }
1975
1976 static int
1977 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1978 {
1979         const struct drm_i915_gem_object_ops *ops = obj->ops;
1980
1981         if (obj->pages == NULL)
1982                 return 0;
1983
1984         BUG_ON(obj->gtt_space);
1985
1986         if (obj->pages_pin_count)
1987                 return -EBUSY;
1988
1989         /* ->put_pages might need to allocate memory for the bit17 swizzle
1990          * array, hence protect them from being reaped by removing them from gtt
1991          * lists early. */
1992         list_del(&obj->gtt_list);
1993
1994         ops->put_pages(obj);
1995         obj->pages = NULL;
1996
1997         if (i915_gem_object_is_purgeable(obj))
1998                 i915_gem_object_truncate(obj);
1999
2000         return 0;
2001 }
2002
2003 static long
2004 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
2005                   bool purgeable_only)
2006 {
2007         struct drm_i915_gem_object *obj, *next;
2008         long count = 0;
2009
2010         list_for_each_entry_safe(obj, next,
2011                                  &dev_priv->mm.unbound_list,
2012                                  gtt_list) {
2013                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2014                     i915_gem_object_put_pages(obj) == 0) {
2015                         count += obj->base.size >> PAGE_SHIFT;
2016                         if (target != -1 && count >= target)
2017                                 return count;
2018                 }
2019         }
2020
2021         list_for_each_entry_safe(obj, next,
2022                                  &dev_priv->mm.inactive_list,
2023                                  mm_list) {
2024                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2025                     i915_gem_object_unbind(obj) == 0 &&
2026                     i915_gem_object_put_pages(obj) == 0) {
2027                         count += obj->base.size >> PAGE_SHIFT;
2028                         if (target != -1 && count >= target)
2029                                 return count;
2030                 }
2031         }
2032
2033         return count;
2034 }
2035
2036 static long
2037 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2038 {
2039         return __i915_gem_shrink(dev_priv, target, true);
2040 }
2041
2042 static void
2043 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2044 {
2045         struct drm_i915_gem_object *obj, *next;
2046
2047         i915_gem_evict_everything(dev_priv->dev);
2048
2049         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
2050                 i915_gem_object_put_pages(obj);
2051 }
2052
2053 static int
2054 i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
2055     off_t start, off_t end)
2056 {
2057         vm_object_t vm_obj;
2058         vm_page_t page;
2059         vm_pindex_t si, ei, i;
2060         bool need_swizzle, fresh;
2061
2062         need_swizzle = i915_gem_object_needs_bit17_swizzle(obj) != 0;
2063         vm_obj = obj->base.vm_obj;
2064         si = OFF_TO_IDX(trunc_page(start));
2065         ei = OFF_TO_IDX(round_page(end));
2066         VM_OBJECT_WLOCK(vm_obj);
2067         for (i = si; i < ei; i++) {
2068                 page = i915_gem_wire_page(vm_obj, i, &fresh);
2069                 if (page == NULL)
2070                         goto failed;
2071                 if (need_swizzle && fresh)
2072                         i915_gem_object_do_bit_17_swizzle_page(obj, page);
2073         }
2074         VM_OBJECT_WUNLOCK(vm_obj);
2075         return (0);
2076 failed:
2077         i915_gem_object_put_pages_range_locked(obj, si, i);
2078         VM_OBJECT_WUNLOCK(vm_obj);
2079         return (-EIO);
2080 }
2081
2082 static int
2083 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2084 {
2085         vm_object_t vm_obj;
2086         vm_page_t page;
2087         vm_pindex_t i, page_count;
2088         int res;
2089
2090         /* Assert that the object is not currently in any GPU domain. As it
2091          * wasn't in the GTT, there shouldn't be any way it could have been in
2092          * a GPU cache
2093          */
2094         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2095         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2096         KASSERT(obj->pages == NULL, ("Obj already has pages"));
2097
2098         page_count = OFF_TO_IDX(obj->base.size);
2099         obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
2100             M_WAITOK);
2101         res = i915_gem_object_get_pages_range(obj, 0, obj->base.size);
2102         if (res != 0) {
2103                 free(obj->pages, DRM_I915_GEM);
2104                 obj->pages = NULL;
2105                 return (res);
2106         }
2107         vm_obj = obj->base.vm_obj;
2108         VM_OBJECT_WLOCK(vm_obj);
2109         for (i = 0, page = vm_page_lookup(vm_obj, 0); i < page_count;
2110             i++, page = vm_page_next(page)) {
2111                 KASSERT(page->pindex == i, ("pindex %jx %jx",
2112                     (uintmax_t)page->pindex, (uintmax_t)i));
2113                 obj->pages[i] = page;
2114         }
2115         VM_OBJECT_WUNLOCK(vm_obj);
2116         return (0);
2117 }
2118
2119 /* Ensure that the associated pages are gathered from the backing storage
2120  * and pinned into our object. i915_gem_object_get_pages() may be called
2121  * multiple times before they are released by a single call to
2122  * i915_gem_object_put_pages() - once the pages are no longer referenced
2123  * either as a result of memory pressure (reaping pages under the shrinker)
2124  * or as the object is itself released.
2125  */
2126 int
2127 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2128 {
2129         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2130         const struct drm_i915_gem_object_ops *ops = obj->ops;
2131         int ret;
2132
2133         if (obj->pages)
2134                 return 0;
2135
2136         BUG_ON(obj->pages_pin_count);
2137
2138         ret = ops->get_pages(obj);
2139         if (ret)
2140                 return ret;
2141
2142         list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2143         return 0;
2144 }
2145
2146 void
2147 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2148                                struct intel_ring_buffer *ring)
2149 {
2150         struct drm_device *dev = obj->base.dev;
2151         struct drm_i915_private *dev_priv = dev->dev_private;
2152         u32 seqno = intel_ring_get_seqno(ring);
2153
2154         BUG_ON(ring == NULL);
2155         obj->ring = ring;
2156
2157         /* Add a reference if we're newly entering the active list. */
2158         if (!obj->active) {
2159                 drm_gem_object_reference(&obj->base);
2160                 obj->active = 1;
2161         }
2162
2163         /* Move from whatever list we were on to the tail of execution. */
2164         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
2165         list_move_tail(&obj->ring_list, &ring->active_list);
2166
2167         obj->last_read_seqno = seqno;
2168
2169         if (obj->fenced_gpu_access) {
2170                 obj->last_fenced_seqno = seqno;
2171
2172                 /* Bump MRU to take account of the delayed flush */
2173                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2174                         struct drm_i915_fence_reg *reg;
2175
2176                         reg = &dev_priv->fence_regs[obj->fence_reg];
2177                         list_move_tail(&reg->lru_list,
2178                                        &dev_priv->mm.fence_list);
2179                 }
2180         }
2181 }
2182
2183 static void
2184 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2185 {
2186         struct drm_device *dev = obj->base.dev;
2187         struct drm_i915_private *dev_priv = dev->dev_private;
2188
2189         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2190         BUG_ON(!obj->active);
2191
2192         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2193
2194         list_del_init(&obj->ring_list);
2195         obj->ring = NULL;
2196
2197         obj->last_read_seqno = 0;
2198         obj->last_write_seqno = 0;
2199         obj->base.write_domain = 0;
2200
2201         obj->last_fenced_seqno = 0;
2202         obj->fenced_gpu_access = false;
2203
2204         obj->active = 0;
2205         drm_gem_object_unreference(&obj->base);
2206
2207         WARN_ON(i915_verify_lists(dev));
2208 }
2209
2210 static int
2211 i915_gem_handle_seqno_wrap(struct drm_device *dev)
2212 {
2213         struct drm_i915_private *dev_priv = dev->dev_private;
2214         struct intel_ring_buffer *ring;
2215         int ret, i, j;
2216
2217         /* The hardware uses various monotonic 32-bit counters, if we
2218          * detect that they will wraparound we need to idle the GPU
2219          * and reset those counters.
2220          */
2221         ret = 0;
2222         for_each_ring(ring, dev_priv, i) {
2223                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2224                         ret |= ring->sync_seqno[j] != 0;
2225         }
2226         if (ret == 0)
2227                 return ret;
2228
2229         ret = i915_gpu_idle(dev);
2230         if (ret)
2231                 return ret;
2232
2233         i915_gem_retire_requests(dev);
2234         for_each_ring(ring, dev_priv, i) {
2235                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2236                         ring->sync_seqno[j] = 0;
2237         }
2238
2239         return 0;
2240 }
2241
2242 int
2243 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2244 {
2245         struct drm_i915_private *dev_priv = dev->dev_private;
2246
2247         /* reserve 0 for non-seqno */
2248         if (dev_priv->next_seqno == 0) {
2249                 int ret = i915_gem_handle_seqno_wrap(dev);
2250                 if (ret)
2251                         return ret;
2252
2253                 dev_priv->next_seqno = 1;
2254         }
2255
2256         *seqno = dev_priv->next_seqno++;
2257         return 0;
2258 }
2259
2260 int
2261 i915_add_request(struct intel_ring_buffer *ring,
2262                  struct drm_file *file,
2263                  u32 *out_seqno)
2264 {
2265         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2266         struct drm_i915_gem_request *request;
2267         u32 request_ring_position;
2268         int was_empty;
2269         int ret;
2270
2271         /*
2272          * Emit any outstanding flushes - execbuf can fail to emit the flush
2273          * after having emitted the batchbuffer command. Hence we need to fix
2274          * things up similar to emitting the lazy request. The difference here
2275          * is that the flush _must_ happen before the next request, no matter
2276          * what.
2277          */
2278         ret = intel_ring_flush_all_caches(ring);
2279         if (ret)
2280                 return ret;
2281
2282         request = malloc(sizeof(*request), DRM_I915_GEM, M_NOWAIT);
2283         if (request == NULL)
2284                 return -ENOMEM;
2285
2286
2287         /* Record the position of the start of the request so that
2288          * should we detect the updated seqno part-way through the
2289          * GPU processing the request, we never over-estimate the
2290          * position of the head.
2291          */
2292         request_ring_position = intel_ring_get_tail(ring);
2293
2294         ret = ring->add_request(ring);
2295         if (ret) {
2296                 free(request, DRM_I915_GEM);
2297                 return ret;
2298         }
2299
2300         request->seqno = intel_ring_get_seqno(ring);
2301         request->ring = ring;
2302         request->tail = request_ring_position;
2303         request->emitted_jiffies = jiffies;
2304         was_empty = list_empty(&ring->request_list);
2305         list_add_tail(&request->list, &ring->request_list);
2306         request->file_priv = NULL;
2307
2308         if (file) {
2309                 struct drm_i915_file_private *file_priv = file->driver_priv;
2310
2311                 mtx_lock(&file_priv->mm.lock);
2312                 request->file_priv = file_priv;
2313                 list_add_tail(&request->client_list,
2314                               &file_priv->mm.request_list);
2315                 mtx_unlock(&file_priv->mm.lock);
2316         }
2317
2318         CTR2(KTR_DRM, "request_add %s %d", ring->name, request->seqno);
2319         ring->outstanding_lazy_request = 0;
2320
2321         if (!dev_priv->mm.suspended) {
2322                 if (i915_enable_hangcheck) {
2323                         callout_schedule(&dev_priv->hangcheck_timer,
2324                             DRM_I915_HANGCHECK_PERIOD);
2325                 }
2326                 if (was_empty) {
2327                         taskqueue_enqueue_timeout(dev_priv->wq,
2328                             &dev_priv->mm.retire_work, hz);
2329                         intel_mark_busy(dev_priv->dev);
2330                 }
2331         }
2332
2333         if (out_seqno)
2334                 *out_seqno = request->seqno;
2335         return 0;
2336 }
2337
2338 static inline void
2339 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2340 {
2341         struct drm_i915_file_private *file_priv = request->file_priv;
2342
2343         if (!file_priv)
2344                 return;
2345
2346         mtx_lock(&file_priv->mm.lock);
2347         if (request->file_priv) {
2348                 list_del(&request->client_list);
2349                 request->file_priv = NULL;
2350         }
2351         mtx_unlock(&file_priv->mm.lock);
2352 }
2353
2354 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2355                                       struct intel_ring_buffer *ring)
2356 {
2357         if (ring->dev != NULL)
2358                 DRM_LOCK_ASSERT(ring->dev);
2359
2360         while (!list_empty(&ring->request_list)) {
2361                 struct drm_i915_gem_request *request;
2362
2363                 request = list_first_entry(&ring->request_list,
2364                                            struct drm_i915_gem_request,
2365                                            list);
2366
2367                 list_del(&request->list);
2368                 i915_gem_request_remove_from_client(request);
2369                 free(request, DRM_I915_GEM);
2370         }
2371
2372         while (!list_empty(&ring->active_list)) {
2373                 struct drm_i915_gem_object *obj;
2374
2375                 obj = list_first_entry(&ring->active_list,
2376                                        struct drm_i915_gem_object,
2377                                        ring_list);
2378
2379                 i915_gem_object_move_to_inactive(obj);
2380         }
2381 }
2382
2383 static void i915_gem_reset_fences(struct drm_device *dev)
2384 {
2385         struct drm_i915_private *dev_priv = dev->dev_private;
2386         int i;
2387
2388         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2389                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2390
2391                 i915_gem_write_fence(dev, i, NULL);
2392
2393                 if (reg->obj)
2394                         i915_gem_object_fence_lost(reg->obj);
2395
2396                 reg->pin_count = 0;
2397                 reg->obj = NULL;
2398                 INIT_LIST_HEAD(&reg->lru_list);
2399         }
2400
2401         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2402 }
2403
2404 void i915_gem_reset(struct drm_device *dev)
2405 {
2406         struct drm_i915_private *dev_priv = dev->dev_private;
2407         struct drm_i915_gem_object *obj;
2408         struct intel_ring_buffer *ring;
2409         int i;
2410
2411         for_each_ring(ring, dev_priv, i)
2412                 i915_gem_reset_ring_lists(dev_priv, ring);
2413
2414         /* Move everything out of the GPU domains to ensure we do any
2415          * necessary invalidation upon reuse.
2416          */
2417         list_for_each_entry(obj,
2418                             &dev_priv->mm.inactive_list,
2419                             mm_list)
2420         {
2421                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2422         }
2423
2424         /* The fence registers are invalidated so clear them out */
2425         i915_gem_reset_fences(dev);
2426 }
2427
2428 /**
2429  * This function clears the request list as sequence numbers are passed.
2430  */
2431 void
2432 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2433 {
2434         uint32_t seqno;
2435
2436         if (list_empty(&ring->request_list))
2437                 return;
2438
2439         WARN_ON(i915_verify_lists(ring->dev));
2440
2441         seqno = ring->get_seqno(ring, true);
2442         CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
2443
2444         while (!list_empty(&ring->request_list)) {
2445                 struct drm_i915_gem_request *request;
2446
2447                 request = list_first_entry(&ring->request_list,
2448                                            struct drm_i915_gem_request,
2449                                            list);
2450
2451                 if (!i915_seqno_passed(seqno, request->seqno))
2452                         break;
2453
2454                 CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
2455                     ring->name, seqno);
2456                 /* We know the GPU must have read the request to have
2457                  * sent us the seqno + interrupt, so use the position
2458                  * of tail of the request to update the last known position
2459                  * of the GPU head.
2460                  */
2461                 ring->last_retired_head = request->tail;
2462
2463                 list_del(&request->list);
2464                 i915_gem_request_remove_from_client(request);
2465                 free(request, DRM_I915_GEM);
2466         }
2467
2468         /* Move any buffers on the active list that are no longer referenced
2469          * by the ringbuffer to the flushing/inactive lists as appropriate.
2470          */
2471         while (!list_empty(&ring->active_list)) {
2472                 struct drm_i915_gem_object *obj;
2473
2474                 obj = list_first_entry(&ring->active_list,
2475                                       struct drm_i915_gem_object,
2476                                       ring_list);
2477
2478                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2479                         break;
2480
2481                 i915_gem_object_move_to_inactive(obj);
2482         }
2483
2484         if (unlikely(ring->trace_irq_seqno &&
2485                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2486                 ring->irq_put(ring);
2487                 ring->trace_irq_seqno = 0;
2488         }
2489
2490         WARN_ON(i915_verify_lists(ring->dev));
2491 }
2492
2493 void
2494 i915_gem_retire_requests(struct drm_device *dev)
2495 {
2496         drm_i915_private_t *dev_priv = dev->dev_private;
2497         struct intel_ring_buffer *ring;
2498         int i;
2499
2500         for_each_ring(ring, dev_priv, i)
2501                 i915_gem_retire_requests_ring(ring);
2502 }
2503
2504 static void
2505 i915_gem_retire_work_handler(void *arg, int pending)
2506 {
2507         drm_i915_private_t *dev_priv;
2508         struct drm_device *dev;
2509         struct intel_ring_buffer *ring;
2510         bool idle;
2511         int i;
2512
2513         dev_priv = arg;
2514         dev = dev_priv->dev;
2515
2516         /* Come back later if the device is busy... */
2517         if (!sx_try_xlock(&dev->dev_struct_lock)) {
2518                 taskqueue_enqueue_timeout(dev_priv->wq,
2519                     &dev_priv->mm.retire_work, hz);
2520                 return;
2521         }
2522
2523         CTR0(KTR_DRM, "retire_task");
2524
2525         i915_gem_retire_requests(dev);
2526
2527         /* Send a periodic flush down the ring so we don't hold onto GEM
2528          * objects indefinitely.
2529          */
2530         idle = true;
2531         for_each_ring(ring, dev_priv, i) {
2532                 if (ring->gpu_caches_dirty)
2533                         i915_add_request(ring, NULL, NULL);
2534
2535                 idle &= list_empty(&ring->request_list);
2536         }
2537
2538         if (!dev_priv->mm.suspended && !idle)
2539                 taskqueue_enqueue_timeout(dev_priv->wq,
2540                     &dev_priv->mm.retire_work, hz);
2541         if (idle)
2542                 intel_mark_idle(dev);
2543
2544         DRM_UNLOCK(dev);
2545 }
2546
2547 /**
2548  * Ensures that an object will eventually get non-busy by flushing any required
2549  * write domains, emitting any outstanding lazy request and retiring and
2550  * completed requests.
2551  */
2552 static int
2553 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2554 {
2555         int ret;
2556
2557         if (obj->active) {
2558                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2559                 if (ret)
2560                         return ret;
2561
2562                 i915_gem_retire_requests_ring(obj->ring);
2563         }
2564
2565         return 0;
2566 }
2567
2568 /**
2569  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2570  * @DRM_IOCTL_ARGS: standard ioctl arguments
2571  *
2572  * Returns 0 if successful, else an error is returned with the remaining time in
2573  * the timeout parameter.
2574  *  -ETIME: object is still busy after timeout
2575  *  -ERESTARTSYS: signal interrupted the wait
2576  *  -ENONENT: object doesn't exist
2577  * Also possible, but rare:
2578  *  -EAGAIN: GPU wedged
2579  *  -ENOMEM: damn
2580  *  -ENODEV: Internal IRQ fail
2581  *  -E?: The add request failed
2582  *
2583  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2584  * non-zero timeout parameter the wait ioctl will wait for the given number of
2585  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2586  * without holding struct_mutex the object may become re-busied before this
2587  * function completes. A similar but shorter * race condition exists in the busy
2588  * ioctl
2589  */
2590 int
2591 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2592 {
2593         struct drm_i915_gem_wait *args = data;
2594         struct drm_i915_gem_object *obj;
2595         struct intel_ring_buffer *ring = NULL;
2596         struct timespec timeout_stack, *timeout = NULL;
2597         u32 seqno = 0;
2598         int ret = 0;
2599
2600         if (args->timeout_ns >= 0) {
2601                 timeout_stack.tv_sec = args->timeout_ns / 1000000;
2602                 timeout_stack.tv_nsec = args->timeout_ns % 1000000;
2603                 timeout = &timeout_stack;
2604         }
2605
2606         ret = i915_mutex_lock_interruptible(dev);
2607         if (ret)
2608                 return ret;
2609
2610         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2611         if (&obj->base == NULL) {
2612                 DRM_UNLOCK(dev);
2613                 return -ENOENT;
2614         }
2615
2616         /* Need to make sure the object gets inactive eventually. */
2617         ret = i915_gem_object_flush_active(obj);
2618         if (ret)
2619                 goto out;
2620
2621         if (obj->active) {
2622                 seqno = obj->last_read_seqno;
2623                 ring = obj->ring;
2624         }
2625
2626         if (seqno == 0)
2627                  goto out;
2628
2629         /* Do this after OLR check to make sure we make forward progress polling
2630          * on this IOCTL with a 0 timeout (like busy ioctl)
2631          */
2632         if (!args->timeout_ns) {
2633                 ret = -ETIMEDOUT;
2634                 goto out;
2635         }
2636
2637         drm_gem_object_unreference(&obj->base);
2638         DRM_UNLOCK(dev);
2639
2640         ret = __wait_seqno(ring, seqno, true, timeout);
2641         if (timeout) {
2642                 args->timeout_ns = timeout->tv_sec * 1000000 + timeout->tv_nsec;
2643         }
2644         return ret;
2645
2646 out:
2647         drm_gem_object_unreference(&obj->base);
2648         DRM_UNLOCK(dev);
2649         return ret;
2650 }
2651
2652 /**
2653  * i915_gem_object_sync - sync an object to a ring.
2654  *
2655  * @obj: object which may be in use on another ring.
2656  * @to: ring we wish to use the object on. May be NULL.
2657  *
2658  * This code is meant to abstract object synchronization with the GPU.
2659  * Calling with NULL implies synchronizing the object with the CPU
2660  * rather than a particular GPU ring.
2661  *
2662  * Returns 0 if successful, else propagates up the lower layer error.
2663  */
2664 int
2665 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2666                      struct intel_ring_buffer *to)
2667 {
2668         struct intel_ring_buffer *from = obj->ring;
2669         u32 seqno;
2670         int ret, idx;
2671
2672         if (from == NULL || to == from)
2673                 return 0;
2674
2675         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2676                 return i915_gem_object_wait_rendering(obj, false);
2677
2678         idx = intel_ring_sync_index(from, to);
2679
2680         seqno = obj->last_read_seqno;
2681         if (seqno <= from->sync_seqno[idx])
2682                 return 0;
2683
2684         ret = i915_gem_check_olr(obj->ring, seqno);
2685         if (ret)
2686                 return ret;
2687
2688         ret = to->sync_to(to, from, seqno);
2689         if (!ret)
2690                 /* We use last_read_seqno because sync_to()
2691                  * might have just caused seqno wrap under
2692                  * the radar.
2693                  */
2694                 from->sync_seqno[idx] = obj->last_read_seqno;
2695
2696         return ret;
2697 }
2698
2699 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2700 {
2701         u32 old_write_domain, old_read_domains;
2702
2703         /* Act a barrier for all accesses through the GTT */
2704         mb();
2705
2706         /* Force a pagefault for domain tracking on next user access */
2707         i915_gem_release_mmap(obj);
2708
2709         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2710                 return;
2711
2712         old_read_domains = obj->base.read_domains;
2713         old_write_domain = obj->base.write_domain;
2714
2715         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2716         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2717
2718         CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x",
2719             obj, old_read_domains, old_write_domain);
2720 }
2721
2722 /**
2723  * Unbinds an object from the GTT aperture.
2724  */
2725 int
2726 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2727 {
2728         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2729         int ret = 0;
2730
2731         if (obj->gtt_space == NULL)
2732                 return 0;
2733
2734         if (obj->pin_count)
2735                 return -EBUSY;
2736
2737         BUG_ON(obj->pages == NULL);
2738
2739         ret = i915_gem_object_finish_gpu(obj);
2740         if (ret)
2741                 return ret;
2742         /* Continue on if we fail due to EIO, the GPU is hung so we
2743          * should be safe and we need to cleanup or else we might
2744          * cause memory corruption through use-after-free.
2745          */
2746
2747         i915_gem_object_finish_gtt(obj);
2748
2749         /* release the fence reg _after_ flushing */
2750         ret = i915_gem_object_put_fence(obj);
2751         if (ret)
2752                 return ret;
2753
2754         if (obj->has_global_gtt_mapping)
2755                 i915_gem_gtt_unbind_object(obj);
2756         if (obj->has_aliasing_ppgtt_mapping) {
2757                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2758                 obj->has_aliasing_ppgtt_mapping = 0;
2759         }
2760         i915_gem_gtt_finish_object(obj);
2761
2762         list_del(&obj->mm_list);
2763         list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2764         /* Avoid an unnecessary call to unbind on rebind. */
2765         obj->map_and_fenceable = true;
2766
2767         drm_mm_put_block(obj->gtt_space);
2768         obj->gtt_space = NULL;
2769         obj->gtt_offset = 0;
2770
2771         return 0;
2772 }
2773
2774 int i915_gpu_idle(struct drm_device *dev)
2775 {
2776         drm_i915_private_t *dev_priv = dev->dev_private;
2777         struct intel_ring_buffer *ring;
2778         int ret, i;
2779
2780         /* Flush everything onto the inactive list. */
2781         for_each_ring(ring, dev_priv, i) {
2782                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2783                 if (ret)
2784                         return ret;
2785
2786                 ret = intel_ring_idle(ring);
2787                 if (ret)
2788                         return ret;
2789         }
2790
2791         return 0;
2792 }
2793
2794 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2795                                         struct drm_i915_gem_object *obj)
2796 {
2797         drm_i915_private_t *dev_priv = dev->dev_private;
2798         uint64_t val;
2799
2800         if (obj) {
2801                 u32 size = obj->gtt_space->size;
2802
2803                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2804                                  0xfffff000) << 32;
2805                 val |= obj->gtt_offset & 0xfffff000;
2806                 val |= (uint64_t)((obj->stride / 128) - 1) <<
2807                         SANDYBRIDGE_FENCE_PITCH_SHIFT;
2808
2809                 if (obj->tiling_mode == I915_TILING_Y)
2810                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2811                 val |= I965_FENCE_REG_VALID;
2812         } else
2813                 val = 0;
2814
2815         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2816         POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2817 }
2818
2819 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2820                                  struct drm_i915_gem_object *obj)
2821 {
2822         drm_i915_private_t *dev_priv = dev->dev_private;
2823         uint64_t val;
2824
2825         if (obj) {
2826                 u32 size = obj->gtt_space->size;
2827
2828                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2829                                  0xfffff000) << 32;
2830                 val |= obj->gtt_offset & 0xfffff000;
2831                 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2832                 if (obj->tiling_mode == I915_TILING_Y)
2833                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2834                 val |= I965_FENCE_REG_VALID;
2835         } else
2836                 val = 0;
2837
2838         I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2839         POSTING_READ(FENCE_REG_965_0 + reg * 8);
2840 }
2841
2842 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2843                                  struct drm_i915_gem_object *obj)
2844 {
2845         drm_i915_private_t *dev_priv = dev->dev_private;
2846         u32 val;
2847
2848         if (obj) {
2849                 u32 size = obj->gtt_space->size;
2850                 int pitch_val;
2851                 int tile_width;
2852
2853                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2854                      (size & -size) != size ||
2855                      (obj->gtt_offset & (size - 1)),
2856                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2857                      obj->gtt_offset, obj->map_and_fenceable, size);
2858
2859                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2860                         tile_width = 128;
2861                 else
2862                         tile_width = 512;
2863
2864                 /* Note: pitch better be a power of two tile widths */
2865                 pitch_val = obj->stride / tile_width;
2866                 pitch_val = ffs(pitch_val) - 1;
2867
2868                 val = obj->gtt_offset;
2869                 if (obj->tiling_mode == I915_TILING_Y)
2870                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2871                 val |= I915_FENCE_SIZE_BITS(size);
2872                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2873                 val |= I830_FENCE_REG_VALID;
2874         } else
2875                 val = 0;
2876
2877         if (reg < 8)
2878                 reg = FENCE_REG_830_0 + reg * 4;
2879         else
2880                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2881
2882         I915_WRITE(reg, val);
2883         POSTING_READ(reg);
2884 }
2885
2886 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2887                                 struct drm_i915_gem_object *obj)
2888 {
2889         drm_i915_private_t *dev_priv = dev->dev_private;
2890         uint32_t val;
2891
2892         if (obj) {
2893                 u32 size = obj->gtt_space->size;
2894                 uint32_t pitch_val;
2895
2896                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2897                      (size & -size) != size ||
2898                      (obj->gtt_offset & (size - 1)),
2899                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2900                      obj->gtt_offset, size);
2901
2902                 pitch_val = obj->stride / 128;
2903                 pitch_val = ffs(pitch_val) - 1;
2904
2905                 val = obj->gtt_offset;
2906                 if (obj->tiling_mode == I915_TILING_Y)
2907                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2908                 val |= I830_FENCE_SIZE_BITS(size);
2909                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2910                 val |= I830_FENCE_REG_VALID;
2911         } else
2912                 val = 0;
2913
2914         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2915         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2916 }
2917
2918 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2919                                  struct drm_i915_gem_object *obj)
2920 {
2921         switch (INTEL_INFO(dev)->gen) {
2922         case 7:
2923         case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2924         case 5:
2925         case 4: i965_write_fence_reg(dev, reg, obj); break;
2926         case 3: i915_write_fence_reg(dev, reg, obj); break;
2927         case 2: i830_write_fence_reg(dev, reg, obj); break;
2928         default: break;
2929         }
2930 }
2931
2932 static inline int fence_number(struct drm_i915_private *dev_priv,
2933                                struct drm_i915_fence_reg *fence)
2934 {
2935         return fence - dev_priv->fence_regs;
2936 }
2937
2938 static void i915_gem_write_fence__ipi(void *data)
2939 {
2940         wbinvd();
2941 }
2942
2943 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2944                                          struct drm_i915_fence_reg *fence,
2945                                          bool enable)
2946 {
2947         struct drm_device *dev = obj->base.dev;
2948         struct drm_i915_private *dev_priv = dev->dev_private;
2949         int fence_reg = fence_number(dev_priv, fence);
2950
2951         /* In order to fully serialize access to the fenced region and
2952          * the update to the fence register we need to take extreme
2953          * measures on SNB+. In theory, the write to the fence register
2954          * flushes all memory transactions before, and coupled with the
2955          * mb() placed around the register write we serialise all memory
2956          * operations with respect to the changes in the tiler. Yet, on
2957          * SNB+ we need to take a step further and emit an explicit wbinvd()
2958          * on each processor in order to manually flush all memory
2959          * transactions before updating the fence register.
2960          */
2961         if (HAS_LLC(obj->base.dev))
2962                 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2963         i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2964
2965         if (enable) {
2966                 obj->fence_reg = fence_reg;
2967                 fence->obj = obj;
2968                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2969         } else {
2970                 obj->fence_reg = I915_FENCE_REG_NONE;
2971                 fence->obj = NULL;
2972                 list_del_init(&fence->lru_list);
2973         }
2974 }
2975
2976 static int
2977 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2978 {
2979         if (obj->last_fenced_seqno) {
2980                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2981                 if (ret)
2982                         return ret;
2983
2984                 obj->last_fenced_seqno = 0;
2985         }
2986
2987         /* Ensure that all CPU reads are completed before installing a fence
2988          * and all writes before removing the fence.
2989          */
2990         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2991                 mb();
2992
2993         obj->fenced_gpu_access = false;
2994         return 0;
2995 }
2996
2997 int
2998 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2999 {
3000         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3001         int ret;
3002
3003         ret = i915_gem_object_flush_fence(obj);
3004         if (ret)
3005                 return ret;
3006
3007         if (obj->fence_reg == I915_FENCE_REG_NONE)
3008                 return 0;
3009
3010         i915_gem_object_update_fence(obj,
3011                                      &dev_priv->fence_regs[obj->fence_reg],
3012                                      false);
3013         i915_gem_object_fence_lost(obj);
3014
3015         return 0;
3016 }
3017
3018 static struct drm_i915_fence_reg *
3019 i915_find_fence_reg(struct drm_device *dev)
3020 {
3021         struct drm_i915_private *dev_priv = dev->dev_private;
3022         struct drm_i915_fence_reg *reg, *avail;
3023         int i;
3024
3025         /* First try to find a free reg */
3026         avail = NULL;
3027         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3028                 reg = &dev_priv->fence_regs[i];
3029                 if (!reg->obj)
3030                         return reg;
3031
3032                 if (!reg->pin_count)
3033                         avail = reg;
3034         }
3035
3036         if (avail == NULL)
3037                 return NULL;
3038
3039         /* None available, try to steal one or wait for a user to finish */
3040         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3041                 if (reg->pin_count)
3042                         continue;
3043
3044                 return reg;
3045         }
3046
3047         return NULL;
3048 }
3049
3050 /**
3051  * i915_gem_object_get_fence - set up fencing for an object
3052  * @obj: object to map through a fence reg
3053  *
3054  * When mapping objects through the GTT, userspace wants to be able to write
3055  * to them without having to worry about swizzling if the object is tiled.
3056  * This function walks the fence regs looking for a free one for @obj,
3057  * stealing one if it can't find any.
3058  *
3059  * It then sets up the reg based on the object's properties: address, pitch
3060  * and tiling format.
3061  *
3062  * For an untiled surface, this removes any existing fence.
3063  */
3064 int
3065 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3066 {
3067         struct drm_device *dev = obj->base.dev;
3068         struct drm_i915_private *dev_priv = dev->dev_private;
3069         bool enable = obj->tiling_mode != I915_TILING_NONE;
3070         struct drm_i915_fence_reg *reg;
3071         int ret;
3072
3073         /* Have we updated the tiling parameters upon the object and so
3074          * will need to serialise the write to the associated fence register?
3075          */
3076         if (obj->fence_dirty) {
3077                 ret = i915_gem_object_flush_fence(obj);
3078                 if (ret)
3079                         return ret;
3080         }
3081
3082         /* Just update our place in the LRU if our fence is getting reused. */
3083         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3084                 reg = &dev_priv->fence_regs[obj->fence_reg];
3085                 if (!obj->fence_dirty) {
3086                         list_move_tail(&reg->lru_list,
3087                                        &dev_priv->mm.fence_list);
3088                         return 0;
3089                 }
3090         } else if (enable) {
3091                 reg = i915_find_fence_reg(dev);
3092                 if (reg == NULL)
3093                         return -EDEADLK;
3094
3095                 if (reg->obj) {
3096                         struct drm_i915_gem_object *old = reg->obj;
3097
3098                         ret = i915_gem_object_flush_fence(old);
3099                         if (ret)
3100                                 return ret;
3101
3102                         i915_gem_object_fence_lost(old);
3103                 }
3104         } else
3105                 return 0;
3106
3107         i915_gem_object_update_fence(obj, reg, enable);
3108         obj->fence_dirty = false;
3109
3110         return 0;
3111 }
3112
3113 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3114                                      struct drm_mm_node *gtt_space,
3115                                      unsigned long cache_level)
3116 {
3117         struct drm_mm_node *other;
3118
3119         /* On non-LLC machines we have to be careful when putting differing
3120          * types of snoopable memory together to avoid the prefetcher
3121          * crossing memory domains and dying.
3122          */
3123         if (HAS_LLC(dev))
3124                 return true;
3125
3126         if (gtt_space == NULL)
3127                 return true;
3128
3129         if (list_empty(&gtt_space->node_list))
3130                 return true;
3131
3132         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3133         if (other->allocated && !other->hole_follows && other->color != cache_level)
3134                 return false;
3135
3136         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3137         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3138                 return false;
3139
3140         return true;
3141 }
3142
3143 static void i915_gem_verify_gtt(struct drm_device *dev)
3144 {
3145 #if WATCH_GTT
3146         struct drm_i915_private *dev_priv = dev->dev_private;
3147         struct drm_i915_gem_object *obj;
3148         int err = 0;
3149
3150         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
3151                 if (obj->gtt_space == NULL) {
3152                         DRM_ERROR("object found on GTT list with no space reserved\n");
3153                         err++;
3154                         continue;
3155                 }
3156
3157                 if (obj->cache_level != obj->gtt_space->color) {
3158                         DRM_ERROR("object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3159                                obj->gtt_space->start,
3160                                obj->gtt_space->start + obj->gtt_space->size,
3161                                obj->cache_level,
3162                                obj->gtt_space->color);
3163                         err++;
3164                         continue;
3165                 }
3166
3167                 if (!i915_gem_valid_gtt_space(dev,
3168                                               obj->gtt_space,
3169                                               obj->cache_level)) {
3170                         DRM_ERROR("invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3171                                obj->gtt_space->start,
3172                                obj->gtt_space->start + obj->gtt_space->size,
3173                                obj->cache_level);
3174                         err++;
3175                         continue;
3176                 }
3177         }
3178
3179         WARN_ON(err);
3180 #endif
3181 }
3182
3183 /**
3184  * Finds free space in the GTT aperture and binds the object there.
3185  */
3186 static int
3187 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3188                             unsigned alignment,
3189                             bool map_and_fenceable,
3190                             bool nonblocking)
3191 {
3192         struct drm_device *dev = obj->base.dev;
3193         drm_i915_private_t *dev_priv = dev->dev_private;
3194         struct drm_mm_node *node;
3195         u32 size, fence_size, fence_alignment, unfenced_alignment;
3196         bool mappable, fenceable;
3197         int ret;
3198
3199         if (obj->madv != I915_MADV_WILLNEED) {
3200                 DRM_ERROR("Attempting to bind a purgeable object\n");
3201                 return -EINVAL;
3202         }
3203
3204         fence_size = i915_gem_get_gtt_size(dev,
3205                                            obj->base.size,
3206                                            obj->tiling_mode);
3207         fence_alignment = i915_gem_get_gtt_alignment(dev,
3208                                                      obj->base.size,
3209                                                      obj->tiling_mode);
3210         unfenced_alignment =
3211                 i915_gem_get_unfenced_gtt_alignment(dev,
3212                                                     obj->base.size,
3213                                                     obj->tiling_mode);
3214
3215         if (alignment == 0)
3216                 alignment = map_and_fenceable ? fence_alignment :
3217                                                 unfenced_alignment;
3218         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3219                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3220                 return -EINVAL;
3221         }
3222
3223         size = map_and_fenceable ? fence_size : obj->base.size;
3224
3225         /* If the object is bigger than the entire aperture, reject it early
3226          * before evicting everything in a vain attempt to find space.
3227          */
3228         if (obj->base.size >
3229             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
3230                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
3231                 return -E2BIG;
3232         }
3233
3234         ret = i915_gem_object_get_pages(obj);
3235         if (ret)
3236                 return ret;
3237
3238         i915_gem_object_pin_pages(obj);
3239
3240         node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
3241         if (node == NULL) {
3242                 i915_gem_object_unpin_pages(obj);
3243                 return -ENOMEM;
3244         }
3245
3246  search_free:
3247         if (map_and_fenceable)
3248                 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
3249                                                           size, alignment, obj->cache_level,
3250                                                           0, dev_priv->mm.gtt_mappable_end);
3251         else
3252                 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
3253                                                  size, alignment, obj->cache_level);
3254         if (ret) {
3255                 ret = i915_gem_evict_something(dev, size, alignment,
3256                                                obj->cache_level,
3257                                                map_and_fenceable,
3258                                                nonblocking);
3259                 if (ret == 0)
3260                         goto search_free;
3261
3262                 i915_gem_object_unpin_pages(obj);
3263                 free(node, DRM_MEM_MM);
3264                 return ret;
3265         }
3266         if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
3267                 i915_gem_object_unpin_pages(obj);
3268                 drm_mm_put_block(node);
3269                 return -EINVAL;
3270         }
3271
3272         ret = i915_gem_gtt_prepare_object(obj);
3273         if (ret) {
3274                 i915_gem_object_unpin_pages(obj);
3275                 drm_mm_put_block(node);
3276                 return ret;
3277         }
3278
3279         list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
3280         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3281
3282         obj->gtt_space = node;
3283         obj->gtt_offset = node->start;
3284
3285         fenceable =
3286                 node->size == fence_size &&
3287                 (node->start & (fence_alignment - 1)) == 0;
3288
3289         mappable =
3290                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
3291
3292         obj->map_and_fenceable = mappable && fenceable;
3293
3294         i915_gem_object_unpin_pages(obj);
3295         CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
3296             obj->base.size, map_and_fenceable);
3297         i915_gem_verify_gtt(dev);
3298         return 0;
3299 }
3300
3301 void
3302 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3303 {
3304         /* If we don't have a page list set up, then we're not pinned
3305          * to GPU, and we can ignore the cache flush because it'll happen
3306          * again at bind time.
3307          */
3308         if (obj->pages == NULL)
3309                 return;
3310
3311         /* If the GPU is snooping the contents of the CPU cache,
3312          * we do not need to manually clear the CPU cache lines.  However,
3313          * the caches are only snooped when the render cache is
3314          * flushed/invalidated.  As we always have to emit invalidations
3315          * and flushes when moving into and out of the RENDER domain, correct
3316          * snooping behaviour occurs naturally as the result of our domain
3317          * tracking.
3318          */
3319         if (obj->cache_level != I915_CACHE_NONE)
3320                 return;
3321
3322         CTR1(KTR_DRM, "object_clflush %p", obj);
3323
3324         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3325 }
3326
3327 /** Flushes the GTT write domain for the object if it's dirty. */
3328 static void
3329 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3330 {
3331         uint32_t old_write_domain;
3332
3333         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3334                 return;
3335
3336         /* No actual flushing is required for the GTT write domain.  Writes
3337          * to it immediately go to main memory as far as we know, so there's
3338          * no chipset flush.  It also doesn't land in render cache.
3339          *
3340          * However, we do have to enforce the order so that all writes through
3341          * the GTT land before any writes to the device, such as updates to
3342          * the GATT itself.
3343          */
3344         wmb();
3345
3346         old_write_domain = obj->base.write_domain;
3347         obj->base.write_domain = 0;
3348
3349         CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj,
3350             obj->base.read_domains, old_write_domain);
3351 }
3352
3353 /** Flushes the CPU write domain for the object if it's dirty. */
3354 static void
3355 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3356 {
3357         uint32_t old_write_domain;
3358
3359         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3360                 return;
3361
3362         i915_gem_clflush_object(obj);
3363         i915_gem_chipset_flush(obj->base.dev);
3364         old_write_domain = obj->base.write_domain;
3365         obj->base.write_domain = 0;
3366
3367         CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj,
3368             obj->base.read_domains, old_write_domain);
3369 }
3370
3371 /**
3372  * Moves a single object to the GTT read, and possibly write domain.
3373  *
3374  * This function returns when the move is complete, including waiting on
3375  * flushes to occur.
3376  */
3377 int
3378 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3379 {
3380         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3381         uint32_t old_write_domain, old_read_domains;
3382         int ret;
3383
3384         /* Not valid to be called on unbound objects. */
3385         if (obj->gtt_space == NULL)
3386                 return -EINVAL;
3387
3388         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3389                 return 0;
3390
3391         ret = i915_gem_object_wait_rendering(obj, !write);
3392         if (ret)
3393                 return ret;
3394
3395         i915_gem_object_flush_cpu_write_domain(obj);
3396
3397         old_write_domain = obj->base.write_domain;
3398         old_read_domains = obj->base.read_domains;
3399
3400         /* It should now be out of any other write domains, and we can update
3401          * the domain values for our changes.
3402          */
3403         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3404         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3405         if (write) {
3406                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3407                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3408                 obj->dirty = 1;
3409         }
3410
3411         CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
3412             old_read_domains, old_write_domain);
3413
3414         /* And bump the LRU for this access */
3415         if (i915_gem_object_is_inactive(obj))
3416                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3417
3418         return 0;
3419 }
3420
3421 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3422                                     enum i915_cache_level cache_level)
3423 {
3424         struct drm_device *dev = obj->base.dev;
3425         drm_i915_private_t *dev_priv = dev->dev_private;
3426         int ret;
3427
3428         if (obj->cache_level == cache_level)
3429                 return 0;
3430
3431         if (obj->pin_count) {
3432                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3433                 return -EBUSY;
3434         }
3435
3436         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3437                 ret = i915_gem_object_unbind(obj);
3438                 if (ret)
3439                         return ret;
3440         }
3441
3442         if (obj->gtt_space) {
3443                 ret = i915_gem_object_finish_gpu(obj);
3444                 if (ret)
3445                         return ret;
3446
3447                 i915_gem_object_finish_gtt(obj);
3448
3449                 /* Before SandyBridge, you could not use tiling or fence
3450                  * registers with snooped memory, so relinquish any fences
3451                  * currently pointing to our region in the aperture.
3452                  */
3453                 if (INTEL_INFO(dev)->gen < 6) {
3454                         ret = i915_gem_object_put_fence(obj);
3455                         if (ret)
3456                                 return ret;
3457                 }
3458
3459                 if (obj->has_global_gtt_mapping)
3460                         i915_gem_gtt_bind_object(obj, cache_level);
3461                 if (obj->has_aliasing_ppgtt_mapping)
3462                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3463                                                obj, cache_level);
3464
3465                 obj->gtt_space->color = cache_level;
3466         }
3467
3468         if (cache_level == I915_CACHE_NONE) {
3469                 u32 old_read_domains, old_write_domain;
3470
3471                 /* If we're coming from LLC cached, then we haven't
3472                  * actually been tracking whether the data is in the
3473                  * CPU cache or not, since we only allow one bit set
3474                  * in obj->write_domain and have been skipping the clflushes.
3475                  * Just set it to the CPU cache for now.
3476                  */
3477                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3478                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3479
3480                 old_read_domains = obj->base.read_domains;
3481                 old_write_domain = obj->base.write_domain;
3482
3483                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3484                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3485
3486                 CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x",
3487                     obj, old_read_domains, old_write_domain);
3488         }
3489
3490         obj->cache_level = cache_level;
3491         i915_gem_verify_gtt(dev);
3492         return 0;
3493 }
3494
3495 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3496                                struct drm_file *file)
3497 {
3498         struct drm_i915_gem_caching *args = data;
3499         struct drm_i915_gem_object *obj;
3500         int ret;
3501
3502         ret = i915_mutex_lock_interruptible(dev);
3503         if (ret)
3504                 return ret;
3505
3506         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3507         if (&obj->base == NULL) {
3508                 ret = -ENOENT;
3509                 goto unlock;
3510         }
3511
3512         args->caching = obj->cache_level != I915_CACHE_NONE;
3513
3514         drm_gem_object_unreference(&obj->base);
3515 unlock:
3516         DRM_UNLOCK(dev);
3517         return ret;
3518 }
3519
3520 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3521                                struct drm_file *file)
3522 {
3523         struct drm_i915_gem_caching *args = data;
3524         struct drm_i915_gem_object *obj;
3525         enum i915_cache_level level;
3526         int ret;
3527
3528         switch (args->caching) {
3529         case I915_CACHING_NONE:
3530                 level = I915_CACHE_NONE;
3531                 break;
3532         case I915_CACHING_CACHED:
3533                 level = I915_CACHE_LLC;
3534                 break;
3535         default:
3536                 return -EINVAL;
3537         }
3538
3539         ret = i915_mutex_lock_interruptible(dev);
3540         if (ret)
3541                 return ret;
3542
3543         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3544         if (&obj->base == NULL) {
3545                 ret = -ENOENT;
3546                 goto unlock;
3547         }
3548
3549         ret = i915_gem_object_set_cache_level(obj, level);
3550
3551         drm_gem_object_unreference(&obj->base);
3552 unlock:
3553         DRM_UNLOCK(dev);
3554         return ret;
3555 }
3556
3557 static bool is_pin_display(struct drm_i915_gem_object *obj)
3558 {
3559         /* There are 3 sources that pin objects:
3560          *   1. The display engine (scanouts, sprites, cursors);
3561          *   2. Reservations for execbuffer;
3562          *   3. The user.
3563          *
3564          * We can ignore reservations as we hold the struct_mutex and
3565          * are only called outside of the reservation path.  The user
3566          * can only increment pin_count once, and so if after
3567          * subtracting the potential reference by the user, any pin_count
3568          * remains, it must be due to another use by the display engine.
3569          */
3570         return obj->pin_count - !!obj->user_pin_count;
3571 }
3572
3573 /*
3574  * Prepare buffer for display plane (scanout, cursors, etc).
3575  * Can be called from an uninterruptible phase (modesetting) and allows
3576  * any flushes to be pipelined (for pageflips).
3577  */
3578 int
3579 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3580                                      u32 alignment,
3581                                      struct intel_ring_buffer *pipelined)
3582 {
3583         u32 old_read_domains, old_write_domain;
3584         int ret;
3585
3586         if (pipelined != obj->ring) {
3587                 ret = i915_gem_object_sync(obj, pipelined);
3588                 if (ret)
3589                         return ret;
3590         }
3591
3592         /* Mark the pin_display early so that we account for the
3593          * display coherency whilst setting up the cache domains.
3594          */
3595         obj->pin_display = true;
3596
3597         /* The display engine is not coherent with the LLC cache on gen6.  As
3598          * a result, we make sure that the pinning that is about to occur is
3599          * done with uncached PTEs. This is lowest common denominator for all
3600          * chipsets.
3601          *
3602          * However for gen6+, we could do better by using the GFDT bit instead
3603          * of uncaching, which would allow us to flush all the LLC-cached data
3604          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3605          */
3606         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3607         if (ret)
3608                 goto err_unpin_display;
3609
3610         /* As the user may map the buffer once pinned in the display plane
3611          * (e.g. libkms for the bootup splash), we have to ensure that we
3612          * always use map_and_fenceable for all scanout buffers.
3613          */
3614         ret = i915_gem_object_pin(obj, alignment, true, false);
3615         if (ret)
3616                 goto err_unpin_display;
3617
3618         i915_gem_object_flush_cpu_write_domain(obj);
3619
3620         old_write_domain = obj->base.write_domain;
3621         old_read_domains = obj->base.read_domains;
3622
3623         /* It should now be out of any other write domains, and we can update
3624          * the domain values for our changes.
3625          */
3626         obj->base.write_domain = 0;
3627         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3628
3629         CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
3630             obj, old_read_domains, old_write_domain);
3631
3632         return 0;
3633
3634 err_unpin_display:
3635         obj->pin_display = is_pin_display(obj);
3636         return ret;
3637 }
3638
3639 void
3640 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3641 {
3642         i915_gem_object_unpin(obj);
3643         obj->pin_display = is_pin_display(obj);
3644 }
3645
3646 int
3647 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3648 {
3649         int ret;
3650
3651         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3652                 return 0;
3653
3654         ret = i915_gem_object_wait_rendering(obj, false);
3655         if (ret)
3656                 return ret;
3657
3658         /* Ensure that we invalidate the GPU's caches and TLBs. */
3659         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3660         return 0;
3661 }
3662
3663 /**
3664  * Moves a single object to the CPU read, and possibly write domain.
3665  *
3666  * This function returns when the move is complete, including waiting on
3667  * flushes to occur.
3668  */
3669 int
3670 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3671 {
3672         uint32_t old_write_domain, old_read_domains;
3673         int ret;
3674
3675         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3676                 return 0;
3677
3678         ret = i915_gem_object_wait_rendering(obj, !write);
3679         if (ret)
3680                 return ret;
3681
3682         i915_gem_object_flush_gtt_write_domain(obj);
3683
3684         old_write_domain = obj->base.write_domain;
3685         old_read_domains = obj->base.read_domains;
3686
3687         /* Flush the CPU cache if it's still invalid. */
3688         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3689                 i915_gem_clflush_object(obj);
3690
3691                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3692         }
3693
3694         /* It should now be out of any other write domains, and we can update
3695          * the domain values for our changes.
3696          */
3697         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3698
3699         /* If we're writing through the CPU, then the GPU read domains will
3700          * need to be invalidated at next use.
3701          */
3702         if (write) {
3703                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3704                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3705         }
3706
3707         CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj,
3708             old_read_domains, old_write_domain);
3709
3710         return 0;
3711 }
3712
3713 /* Throttle our rendering by waiting until the ring has completed our requests
3714  * emitted over 20 msec ago.
3715  *
3716  * Note that if we were to use the current jiffies each time around the loop,
3717  * we wouldn't escape the function with any frames outstanding if the time to
3718  * render a frame was over 20ms.
3719  *
3720  * This should get us reasonable parallelism between CPU and GPU but also
3721  * relatively low latency when blocking on a particular request to finish.
3722  */
3723 static int
3724 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3725 {
3726         struct drm_i915_private *dev_priv = dev->dev_private;
3727         struct drm_i915_file_private *file_priv = file->driver_priv;
3728         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3729         struct drm_i915_gem_request *request;
3730         struct intel_ring_buffer *ring = NULL;
3731         u32 seqno = 0;
3732         int ret;
3733
3734         if (atomic_read(&dev_priv->mm.wedged))
3735                 return -EIO;
3736
3737         mtx_lock(&file_priv->mm.lock);
3738         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3739                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3740                         break;
3741
3742                 ring = request->ring;
3743                 seqno = request->seqno;
3744         }
3745         mtx_unlock(&file_priv->mm.lock);
3746
3747         if (seqno == 0)
3748                 return 0;
3749
3750         ret = __wait_seqno(ring, seqno, true, NULL);
3751         if (ret == 0)
3752                 taskqueue_enqueue_timeout(dev_priv->wq,
3753                     &dev_priv->mm.retire_work, 0);
3754
3755         return ret;
3756 }
3757
3758 int
3759 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3760                     uint32_t alignment,
3761                     bool map_and_fenceable,
3762                     bool nonblocking)
3763 {
3764         int ret;
3765
3766         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3767                 return -EBUSY;
3768
3769         if (obj->gtt_space != NULL) {
3770                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3771                     (map_and_fenceable && !obj->map_and_fenceable)) {
3772                         WARN(obj->pin_count,
3773                              "bo is already pinned with incorrect alignment:"
3774                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3775                              " obj->map_and_fenceable=%d\n",
3776                              obj->gtt_offset, alignment,
3777                              map_and_fenceable,
3778                              obj->map_and_fenceable);
3779                         ret = i915_gem_object_unbind(obj);
3780                         if (ret)
3781                                 return ret;
3782                 }
3783         }
3784
3785         if (obj->gtt_space == NULL) {
3786                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3787
3788                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3789                                                   map_and_fenceable,
3790                                                   nonblocking);
3791                 if (ret)
3792                         return ret;
3793
3794                 if (!dev_priv->mm.aliasing_ppgtt)
3795                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3796         }
3797
3798         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3799                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3800
3801         obj->pin_count++;
3802         obj->pin_mappable |= map_and_fenceable;
3803
3804         return 0;
3805 }
3806
3807 void
3808 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3809 {
3810         BUG_ON(obj->pin_count == 0);
3811         BUG_ON(obj->gtt_space == NULL);
3812
3813         if (--obj->pin_count == 0)
3814                 obj->pin_mappable = false;
3815 }
3816
3817 int
3818 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3819                    struct drm_file *file)
3820 {
3821         struct drm_i915_gem_pin *args = data;
3822         struct drm_i915_gem_object *obj;
3823         int ret;
3824
3825         ret = i915_mutex_lock_interruptible(dev);
3826         if (ret)
3827                 return ret;
3828
3829         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3830         if (&obj->base == NULL) {
3831                 ret = -ENOENT;
3832                 goto unlock;
3833         }
3834
3835         if (obj->madv != I915_MADV_WILLNEED) {
3836                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3837                 ret = -EINVAL;
3838                 goto out;
3839         }
3840
3841         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3842                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3843                           args->handle);
3844                 ret = -EINVAL;
3845                 goto out;
3846         }
3847
3848         if (obj->user_pin_count == 0) {
3849                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3850                 if (ret)
3851                         goto out;
3852         }
3853
3854         obj->user_pin_count++;
3855         obj->pin_filp = file;
3856
3857         /* XXX - flush the CPU caches for pinned objects
3858          * as the X server doesn't manage domains yet
3859          */
3860         i915_gem_object_flush_cpu_write_domain(obj);
3861         args->offset = obj->gtt_offset;
3862 out:
3863         drm_gem_object_unreference(&obj->base);
3864 unlock:
3865         DRM_UNLOCK(dev);
3866         return ret;
3867 }
3868
3869 int
3870 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3871                      struct drm_file *file)
3872 {
3873         struct drm_i915_gem_pin *args = data;
3874         struct drm_i915_gem_object *obj;
3875         int ret;
3876
3877         ret = i915_mutex_lock_interruptible(dev);
3878         if (ret)
3879                 return ret;
3880
3881         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3882         if (&obj->base == NULL) {
3883                 ret = -ENOENT;
3884                 goto unlock;
3885         }
3886
3887         if (obj->pin_filp != file) {
3888                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3889                           args->handle);
3890                 ret = -EINVAL;
3891                 goto out;
3892         }
3893         obj->user_pin_count--;
3894         if (obj->user_pin_count == 0) {
3895                 obj->pin_filp = NULL;
3896                 i915_gem_object_unpin(obj);
3897         }
3898
3899 out:
3900         drm_gem_object_unreference(&obj->base);
3901 unlock:
3902         DRM_UNLOCK(dev);
3903         return ret;
3904 }
3905
3906 int
3907 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3908                     struct drm_file *file)
3909 {
3910         struct drm_i915_gem_busy *args = data;
3911         struct drm_i915_gem_object *obj;
3912         int ret;
3913
3914         ret = i915_mutex_lock_interruptible(dev);
3915         if (ret)
3916                 return ret;
3917
3918         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3919         if (&obj->base == NULL) {
3920                 ret = -ENOENT;
3921                 goto unlock;
3922         }
3923
3924         /* Count all active objects as busy, even if they are currently not used
3925          * by the gpu. Users of this interface expect objects to eventually
3926          * become non-busy without any further actions, therefore emit any
3927          * necessary flushes here.
3928          */
3929         ret = i915_gem_object_flush_active(obj);
3930
3931         args->busy = obj->active;
3932         if (obj->ring) {
3933                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3934                 args->busy |= intel_ring_flag(obj->ring) << 16;
3935         }
3936
3937         drm_gem_object_unreference(&obj->base);
3938 unlock:
3939         DRM_UNLOCK(dev);
3940         return ret;
3941 }
3942
3943 int
3944 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3945                         struct drm_file *file_priv)
3946 {
3947         return i915_gem_ring_throttle(dev, file_priv);
3948 }
3949
3950 int
3951 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3952                        struct drm_file *file_priv)
3953 {
3954         struct drm_i915_gem_madvise *args = data;
3955         struct drm_i915_gem_object *obj;
3956         int ret;
3957
3958         switch (args->madv) {
3959         case I915_MADV_DONTNEED:
3960         case I915_MADV_WILLNEED:
3961             break;
3962         default:
3963             return -EINVAL;
3964         }
3965
3966         ret = i915_mutex_lock_interruptible(dev);
3967         if (ret)
3968                 return ret;
3969
3970         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3971         if (&obj->base == NULL) {
3972                 ret = -ENOENT;
3973                 goto unlock;
3974         }
3975
3976         if (obj->pin_count) {
3977                 ret = -EINVAL;
3978                 goto out;
3979         }
3980
3981         if (obj->madv != __I915_MADV_PURGED)
3982                 obj->madv = args->madv;
3983
3984         /* if the object is no longer attached, discard its backing storage */
3985         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3986                 i915_gem_object_truncate(obj);
3987
3988         args->retained = obj->madv != __I915_MADV_PURGED;
3989
3990 out:
3991         drm_gem_object_unreference(&obj->base);
3992 unlock:
3993         DRM_UNLOCK(dev);
3994         return ret;
3995 }
3996
3997 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3998                           const struct drm_i915_gem_object_ops *ops)
3999 {
4000         INIT_LIST_HEAD(&obj->mm_list);
4001         INIT_LIST_HEAD(&obj->gtt_list);
4002         INIT_LIST_HEAD(&obj->ring_list);
4003         INIT_LIST_HEAD(&obj->exec_list);
4004
4005         obj->ops = ops;
4006
4007         obj->fence_reg = I915_FENCE_REG_NONE;
4008         obj->madv = I915_MADV_WILLNEED;
4009         /* Avoid an unnecessary call to unbind on the first bind. */
4010         obj->map_and_fenceable = true;
4011
4012         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4013 }
4014
4015 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4016         .get_pages = i915_gem_object_get_pages_gtt,
4017         .put_pages = i915_gem_object_put_pages_gtt,
4018 };
4019
4020 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4021                                                   size_t size)
4022 {
4023         struct drm_i915_gem_object *obj;
4024
4025         obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
4026         if (obj == NULL)
4027                 return NULL;
4028
4029         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4030                 free(obj, DRM_I915_GEM);
4031                 return NULL;
4032         }
4033
4034 #ifdef FREEBSD_WIP
4035         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4036         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4037                 /* 965gm cannot relocate objects above 4GiB. */
4038                 mask &= ~__GFP_HIGHMEM;
4039                 mask |= __GFP_DMA32;
4040         }
4041
4042         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4043         mapping_set_gfp_mask(mapping, mask);
4044 #endif /* FREEBSD_WIP */
4045
4046         i915_gem_object_init(obj, &i915_gem_object_ops);
4047
4048         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4049         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4050
4051         if (HAS_LLC(dev)) {
4052                 /* On some devices, we can have the GPU use the LLC (the CPU
4053                  * cache) for about a 10% performance improvement
4054                  * compared to uncached.  Graphics requests other than
4055                  * display scanout are coherent with the CPU in
4056                  * accessing this cache.  This means in this mode we
4057                  * don't need to clflush on the CPU side, and on the
4058                  * GPU side we only need to flush internal caches to
4059                  * get data visible to the CPU.
4060                  *
4061                  * However, we maintain the display planes as UC, and so
4062                  * need to rebind when first used as such.
4063                  */
4064                 obj->cache_level = I915_CACHE_LLC;
4065         } else
4066                 obj->cache_level = I915_CACHE_NONE;
4067
4068         return obj;
4069 }
4070
4071 int i915_gem_init_object(struct drm_gem_object *obj)
4072 {
4073         printf("i915_gem_init_object called\n");
4074
4075         return 0;
4076 }
4077
4078 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4079 {
4080         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4081         struct drm_device *dev = obj->base.dev;
4082         drm_i915_private_t *dev_priv = dev->dev_private;
4083
4084         CTR1(KTR_DRM, "object_destroy_tail %p", obj);
4085
4086         if (obj->phys_obj)
4087                 i915_gem_detach_phys_object(dev, obj);
4088
4089         obj->pin_count = 0;
4090         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
4091                 bool was_interruptible;
4092
4093                 was_interruptible = dev_priv->mm.interruptible;
4094                 dev_priv->mm.interruptible = false;
4095
4096                 WARN_ON(i915_gem_object_unbind(obj));
4097
4098                 dev_priv->mm.interruptible = was_interruptible;
4099         }
4100
4101         obj->pages_pin_count = 0;
4102         i915_gem_object_put_pages(obj);
4103         i915_gem_object_free_mmap_offset(obj);
4104
4105         BUG_ON(obj->pages);
4106
4107 #ifdef FREEBSD_WIP
4108         if (obj->base.import_attach)
4109                 drm_prime_gem_destroy(&obj->base, NULL);
4110 #endif /* FREEBSD_WIP */
4111
4112         drm_gem_object_release(&obj->base);
4113         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4114
4115         free(obj->bit_17, DRM_I915_GEM);
4116         free(obj, DRM_I915_GEM);
4117 }
4118
4119 int
4120 i915_gem_idle(struct drm_device *dev)
4121 {
4122         drm_i915_private_t *dev_priv = dev->dev_private;
4123         int ret;
4124
4125         DRM_LOCK(dev);
4126
4127         if (dev_priv->mm.suspended) {
4128                 DRM_UNLOCK(dev);
4129                 return 0;
4130         }
4131
4132         ret = i915_gpu_idle(dev);
4133         if (ret) {
4134                 DRM_UNLOCK(dev);
4135                 return ret;
4136         }
4137         i915_gem_retire_requests(dev);
4138
4139         /* Under UMS, be paranoid and evict. */
4140         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4141                 i915_gem_evict_everything(dev);
4142
4143         i915_gem_reset_fences(dev);
4144
4145         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4146          * We need to replace this with a semaphore, or something.
4147          * And not confound mm.suspended!
4148          */
4149         dev_priv->mm.suspended = 1;
4150         callout_stop(&dev_priv->hangcheck_timer);
4151
4152         i915_kernel_lost_context(dev);
4153         i915_gem_cleanup_ringbuffer(dev);
4154
4155         DRM_UNLOCK(dev);
4156
4157         /* Cancel the retire work handler, which should be idle now. */
4158         taskqueue_cancel_timeout(dev_priv->wq, &dev_priv->mm.retire_work, NULL);
4159
4160         return 0;
4161 }
4162
4163 void i915_gem_l3_remap(struct drm_device *dev)
4164 {
4165         drm_i915_private_t *dev_priv = dev->dev_private;
4166         u32 misccpctl;
4167         int i;
4168
4169         if (!HAS_L3_GPU_CACHE(dev))
4170                 return;
4171
4172         if (!dev_priv->l3_parity.remap_info)
4173                 return;
4174
4175         misccpctl = I915_READ(GEN7_MISCCPCTL);
4176         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4177         POSTING_READ(GEN7_MISCCPCTL);
4178
4179         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4180                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4181                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4182                         DRM_DEBUG("0x%x was already programmed to %x\n",
4183                                   GEN7_L3LOG_BASE + i, remap);
4184                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4185                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4186                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4187         }
4188
4189         /* Make sure all the writes land before disabling dop clock gating */
4190         POSTING_READ(GEN7_L3LOG_BASE);
4191
4192         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4193 }
4194
4195 void i915_gem_init_swizzling(struct drm_device *dev)
4196 {
4197         drm_i915_private_t *dev_priv = dev->dev_private;
4198
4199         if (INTEL_INFO(dev)->gen < 5 ||
4200             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4201                 return;
4202
4203         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4204                                  DISP_TILE_SURFACE_SWIZZLING);
4205
4206         if (IS_GEN5(dev))
4207                 return;
4208
4209         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4210         if (IS_GEN6(dev))
4211                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4212         else
4213                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4214 }
4215
4216 static bool
4217 intel_enable_blt(struct drm_device *dev)
4218 {
4219         if (!HAS_BLT(dev))
4220                 return false;
4221
4222         /* The blitter was dysfunctional on early prototypes */
4223         if (IS_GEN6(dev) && pci_get_revid(dev->dev) < 8) {
4224                 DRM_INFO("BLT not supported on this pre-production hardware;"
4225                          " graphics performance will be degraded.\n");
4226                 return false;
4227         }
4228
4229         return true;
4230 }
4231
4232 int
4233 i915_gem_init_hw(struct drm_device *dev)
4234 {
4235         drm_i915_private_t *dev_priv = dev->dev_private;
4236         int ret;
4237
4238 #ifdef FREEBSD_WIP
4239         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4240                 return -EIO;
4241 #endif /* FREEBSD_WIP */
4242
4243         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4244                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4245
4246         i915_gem_l3_remap(dev);
4247
4248         i915_gem_init_swizzling(dev);
4249
4250         ret = intel_init_render_ring_buffer(dev);
4251         if (ret)
4252                 return ret;
4253
4254         if (HAS_BSD(dev)) {
4255                 ret = intel_init_bsd_ring_buffer(dev);
4256                 if (ret)
4257                         goto cleanup_render_ring;
4258         }
4259
4260         if (intel_enable_blt(dev)) {
4261                 ret = intel_init_blt_ring_buffer(dev);
4262                 if (ret)
4263                         goto cleanup_bsd_ring;
4264         }
4265
4266         dev_priv->next_seqno = 1;
4267
4268         /*
4269          * XXX: There was some w/a described somewhere suggesting loading
4270          * contexts before PPGTT.
4271          */
4272         i915_gem_context_init(dev);
4273         i915_gem_init_ppgtt(dev);
4274
4275         return 0;
4276
4277 cleanup_bsd_ring:
4278         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4279 cleanup_render_ring:
4280         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4281         return ret;
4282 }
4283
4284 static bool
4285 intel_enable_ppgtt(struct drm_device *dev)
4286 {
4287         if (i915_enable_ppgtt >= 0)
4288                 return i915_enable_ppgtt;
4289
4290 #ifdef CONFIG_INTEL_IOMMU
4291         /* Disable ppgtt on SNB if VT-d is on. */
4292         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
4293                 return false;
4294 #endif
4295
4296         return true;
4297 }
4298
4299 int i915_gem_init(struct drm_device *dev)
4300 {
4301         struct drm_i915_private *dev_priv = dev->dev_private;
4302         unsigned long gtt_size, mappable_size;
4303         int ret;
4304
4305         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
4306         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
4307
4308         DRM_LOCK(dev);
4309         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
4310                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
4311                  * aperture accordingly when using aliasing ppgtt. */
4312                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
4313
4314                 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
4315
4316                 ret = i915_gem_init_aliasing_ppgtt(dev);
4317                 if (ret) {
4318                         DRM_UNLOCK(dev);
4319                         return ret;
4320                 }
4321         } else {
4322                 /* Let GEM Manage all of the aperture.
4323                  *
4324                  * However, leave one page at the end still bound to the scratch
4325                  * page.  There are a number of places where the hardware
4326                  * apparently prefetches past the end of the object, and we've
4327                  * seen multiple hangs with the GPU head pointer stuck in a
4328                  * batchbuffer bound at the last page of the aperture.  One page
4329                  * should be enough to keep any prefetching inside of the
4330                  * aperture.
4331                  */
4332                 i915_gem_init_global_gtt(dev, 0, mappable_size,
4333                                          gtt_size);
4334         }
4335
4336         ret = i915_gem_init_hw(dev);
4337         DRM_UNLOCK(dev);
4338         if (ret) {
4339                 i915_gem_cleanup_aliasing_ppgtt(dev);
4340                 return ret;
4341         }
4342
4343         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4344         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4345                 dev_priv->dri1.allow_batchbuffer = 1;
4346         return 0;
4347 }
4348
4349 void
4350 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4351 {
4352         drm_i915_private_t *dev_priv = dev->dev_private;
4353         struct intel_ring_buffer *ring;
4354         int i;
4355
4356         for_each_ring(ring, dev_priv, i)
4357                 intel_cleanup_ring_buffer(ring);
4358 }
4359
4360 int
4361 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4362                        struct drm_file *file_priv)
4363 {
4364         drm_i915_private_t *dev_priv = dev->dev_private;
4365         int ret;
4366
4367         if (drm_core_check_feature(dev, DRIVER_MODESET))
4368                 return 0;
4369
4370         if (atomic_read(&dev_priv->mm.wedged)) {
4371                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4372                 atomic_set(&dev_priv->mm.wedged, 0);
4373         }
4374
4375         DRM_LOCK(dev);
4376         dev_priv->mm.suspended = 0;
4377
4378         ret = i915_gem_init_hw(dev);
4379         if (ret != 0) {
4380                 DRM_UNLOCK(dev);
4381                 return ret;
4382         }
4383
4384         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4385         DRM_UNLOCK(dev);
4386
4387         ret = drm_irq_install(dev);
4388         if (ret)
4389                 goto cleanup_ringbuffer;
4390
4391         return 0;
4392
4393 cleanup_ringbuffer:
4394         DRM_LOCK(dev);
4395         i915_gem_cleanup_ringbuffer(dev);
4396         dev_priv->mm.suspended = 1;
4397         DRM_UNLOCK(dev);
4398
4399         return ret;
4400 }
4401
4402 int
4403 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4404                        struct drm_file *file_priv)
4405 {
4406         if (drm_core_check_feature(dev, DRIVER_MODESET))
4407                 return 0;
4408
4409         drm_irq_uninstall(dev);
4410         return i915_gem_idle(dev);
4411 }
4412
4413 void
4414 i915_gem_lastclose(struct drm_device *dev)
4415 {
4416         int ret;
4417
4418         if (drm_core_check_feature(dev, DRIVER_MODESET))
4419                 return;
4420
4421         ret = i915_gem_idle(dev);
4422         if (ret)
4423                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4424 }
4425
4426 static void
4427 init_ring_lists(struct intel_ring_buffer *ring)
4428 {
4429         INIT_LIST_HEAD(&ring->active_list);
4430         INIT_LIST_HEAD(&ring->request_list);
4431 }
4432
4433 void
4434 i915_gem_load(struct drm_device *dev)
4435 {
4436         int i;
4437         drm_i915_private_t *dev_priv = dev->dev_private;
4438
4439         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4440         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4441         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4442         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4443         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4444         for (i = 0; i < I915_NUM_RINGS; i++)
4445                 init_ring_lists(&dev_priv->ring[i]);
4446         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4447                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4448         TIMEOUT_TASK_INIT(dev_priv->wq, &dev_priv->mm.retire_work, 0,
4449             i915_gem_retire_work_handler, dev_priv);
4450         init_completion(&dev_priv->error_completion);
4451
4452         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4453         if (IS_GEN3(dev)) {
4454                 I915_WRITE(MI_ARB_STATE,
4455                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4456         }
4457
4458         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4459
4460         /* Old X drivers will take 0-2 for front, back, depth buffers */
4461         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4462                 dev_priv->fence_reg_start = 3;
4463
4464         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4465                 dev_priv->num_fence_regs = 16;
4466         else
4467                 dev_priv->num_fence_regs = 8;
4468
4469         /* Initialize fence registers to zero */
4470         i915_gem_reset_fences(dev);
4471
4472         i915_gem_detect_bit_6_swizzle(dev);
4473         DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue);
4474
4475         dev_priv->mm.interruptible = true;
4476
4477         dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4478             i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
4479 }
4480
4481 /*
4482  * Create a physically contiguous memory object for this object
4483  * e.g. for cursor + overlay regs
4484  */
4485 static int i915_gem_init_phys_object(struct drm_device *dev,
4486                                      int id, int size, int align)
4487 {
4488         drm_i915_private_t *dev_priv = dev->dev_private;
4489         struct drm_i915_gem_phys_object *phys_obj;
4490         int ret;
4491
4492         if (dev_priv->mm.phys_objs[id - 1] || !size)
4493                 return 0;
4494
4495         phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object),
4496             DRM_I915_GEM, M_WAITOK | M_ZERO);
4497         if (!phys_obj)
4498                 return -ENOMEM;
4499
4500         phys_obj->id = id;
4501
4502         phys_obj->handle = drm_pci_alloc(dev, size, align, BUS_SPACE_MAXADDR);
4503         if (!phys_obj->handle) {
4504                 ret = -ENOMEM;
4505                 goto kfree_obj;
4506         }
4507 #ifdef CONFIG_X86
4508         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4509             size / PAGE_SIZE, PAT_WRITE_COMBINING);
4510 #endif
4511
4512         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4513
4514         return 0;
4515 kfree_obj:
4516         free(phys_obj, DRM_I915_GEM);
4517         return ret;
4518 }
4519
4520 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4521 {
4522         drm_i915_private_t *dev_priv = dev->dev_private;
4523         struct drm_i915_gem_phys_object *phys_obj;
4524
4525         if (!dev_priv->mm.phys_objs[id - 1])
4526                 return;
4527
4528         phys_obj = dev_priv->mm.phys_objs[id - 1];
4529         if (phys_obj->cur_obj) {
4530                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4531         }
4532
4533 #ifdef FREEBSD_WIP
4534 #ifdef CONFIG_X86
4535         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4536 #endif
4537 #endif /* FREEBSD_WIP */
4538
4539         drm_pci_free(dev, phys_obj->handle);
4540         free(phys_obj, DRM_I915_GEM);
4541         dev_priv->mm.phys_objs[id - 1] = NULL;
4542 }
4543
4544 void i915_gem_free_all_phys_object(struct drm_device *dev)
4545 {
4546         int i;
4547
4548         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4549                 i915_gem_free_phys_object(dev, i);
4550 }
4551
4552 void i915_gem_detach_phys_object(struct drm_device *dev,
4553                                  struct drm_i915_gem_object *obj)
4554 {
4555         struct sf_buf *sf;
4556         char *vaddr;
4557         char *dst;
4558         int i;
4559         int page_count;
4560
4561         if (!obj->phys_obj)
4562                 return;
4563         vaddr = obj->phys_obj->handle->vaddr;
4564
4565         page_count = obj->base.size / PAGE_SIZE;
4566         VM_OBJECT_WLOCK(obj->base.vm_obj);
4567         for (i = 0; i < page_count; i++) {
4568                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4569                 if (page == NULL)
4570                         continue; /* XXX */
4571
4572                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4573                 sf = sf_buf_alloc(page, 0);
4574                 if (sf != NULL) {
4575                         dst = (char *)sf_buf_kva(sf);
4576                         memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
4577                         sf_buf_free(sf);
4578                 }
4579                 drm_clflush_pages(&page, 1);
4580
4581                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4582                 vm_page_reference(page);
4583                 vm_page_lock(page);
4584                 vm_page_dirty(page);
4585                 vm_page_unwire(page, PQ_INACTIVE);
4586                 vm_page_unlock(page);
4587                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4588         }
4589         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4590         i915_gem_chipset_flush(dev);
4591
4592         obj->phys_obj->cur_obj = NULL;
4593         obj->phys_obj = NULL;
4594 }
4595
4596 int
4597 i915_gem_attach_phys_object(struct drm_device *dev,
4598                             struct drm_i915_gem_object *obj,
4599                             int id,
4600                             int align)
4601 {
4602         drm_i915_private_t *dev_priv = dev->dev_private;
4603         struct sf_buf *sf;
4604         char *dst, *src;
4605         int ret = 0;
4606         int page_count;
4607         int i;
4608
4609         if (id > I915_MAX_PHYS_OBJECT)
4610                 return -EINVAL;
4611
4612         if (obj->phys_obj) {
4613                 if (obj->phys_obj->id == id)
4614                         return 0;
4615                 i915_gem_detach_phys_object(dev, obj);
4616         }
4617
4618         /* create a new object */
4619         if (!dev_priv->mm.phys_objs[id - 1]) {
4620                 ret = i915_gem_init_phys_object(dev, id,
4621                                                 obj->base.size, align);
4622                 if (ret) {
4623                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4624                                   id, obj->base.size);
4625                         return ret;
4626                 }
4627         }
4628
4629         /* bind to the object */
4630         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4631         obj->phys_obj->cur_obj = obj;
4632
4633         page_count = obj->base.size / PAGE_SIZE;
4634
4635         VM_OBJECT_WLOCK(obj->base.vm_obj);
4636         for (i = 0; i < page_count; i++) {
4637                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4638                 if (page == NULL) {
4639                         ret = -EIO;
4640                         break;
4641                 }
4642                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4643                 sf = sf_buf_alloc(page, 0);
4644                 src = (char *)sf_buf_kva(sf);
4645                 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
4646                 memcpy(dst, src, PAGE_SIZE);
4647                 sf_buf_free(sf);
4648
4649                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4650
4651                 vm_page_reference(page);
4652                 vm_page_lock(page);
4653                 vm_page_unwire(page, PQ_INACTIVE);
4654                 vm_page_unlock(page);
4655                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4656         }
4657         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4658
4659         return ret;
4660 }
4661
4662 static int
4663 i915_gem_phys_pwrite(struct drm_device *dev,
4664                      struct drm_i915_gem_object *obj,
4665                      struct drm_i915_gem_pwrite *args,
4666                      struct drm_file *file_priv)
4667 {
4668         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4669         char __user *user_data = to_user_ptr(args->data_ptr);
4670
4671         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4672                 unsigned long unwritten;
4673
4674                 /* The physical object once assigned is fixed for the lifetime
4675                  * of the obj, so we can safely drop the lock and continue
4676                  * to access vaddr.
4677                  */
4678                 DRM_UNLOCK(dev);
4679                 unwritten = copy_from_user(vaddr, user_data, args->size);
4680                 DRM_LOCK(dev);
4681                 if (unwritten)
4682                         return -EFAULT;
4683         }
4684
4685         i915_gem_chipset_flush(dev);
4686         return 0;
4687 }
4688
4689 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4690 {
4691         struct drm_i915_file_private *file_priv = file->driver_priv;
4692
4693         /* Clean up our request list when the client is going away, so that
4694          * later retire_requests won't dereference our soon-to-be-gone
4695          * file_priv.
4696          */
4697         mtx_lock(&file_priv->mm.lock);
4698         while (!list_empty(&file_priv->mm.request_list)) {
4699                 struct drm_i915_gem_request *request;
4700
4701                 request = list_first_entry(&file_priv->mm.request_list,
4702                                            struct drm_i915_gem_request,
4703                                            client_list);
4704                 list_del(&request->client_list);
4705                 request->file_priv = NULL;
4706         }
4707         mtx_unlock(&file_priv->mm.lock);
4708 }
4709
4710 static void
4711 i915_gem_inactive_shrink(void *arg)
4712 {
4713         struct drm_device *dev = arg;
4714         struct drm_i915_private *dev_priv = dev->dev_private;
4715         int pass1, pass2;
4716
4717         if (!sx_try_xlock(&dev->dev_struct_lock)) {
4718                 return;
4719         }
4720
4721         CTR0(KTR_DRM, "gem_lowmem");
4722
4723         pass1 = i915_gem_purge(dev_priv, -1);
4724         pass2 = __i915_gem_shrink(dev_priv, -1, false);
4725
4726         if (pass2 <= pass1 / 100)
4727                 i915_gem_shrink_all(dev_priv);
4728
4729         DRM_UNLOCK(dev);
4730 }
4731
4732 static vm_page_t
4733 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, bool *fresh)
4734 {
4735         vm_page_t page;
4736         int rv;
4737
4738         VM_OBJECT_ASSERT_WLOCKED(object);
4739         page = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
4740         if (page->valid != VM_PAGE_BITS_ALL) {
4741                 if (vm_pager_has_page(object, pindex, NULL, NULL)) {
4742                         rv = vm_pager_get_pages(object, &page, 1, NULL, NULL);
4743                         if (rv != VM_PAGER_OK) {
4744                                 vm_page_lock(page);
4745                                 vm_page_free(page);
4746                                 vm_page_unlock(page);
4747                                 return (NULL);
4748                         }
4749                         if (fresh != NULL)
4750                                 *fresh = true;
4751                 } else {
4752                         pmap_zero_page(page);
4753                         page->valid = VM_PAGE_BITS_ALL;
4754                         page->dirty = 0;
4755                         if (fresh != NULL)
4756                                 *fresh = false;
4757                 }
4758         } else if (fresh != NULL) {
4759                 *fresh = false;
4760         }
4761         vm_page_lock(page);
4762         vm_page_wire(page);
4763         vm_page_unlock(page);
4764         vm_page_xunbusy(page);
4765         atomic_add_long(&i915_gem_wired_pages_cnt, 1);
4766         return (page);
4767 }