]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/i915/i915_gem.c
Merge ACPICA 20170929 (take 2).
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
56
57 #include <dev/drm2/drmP.h>
58 #include <dev/drm2/i915/i915_drm.h>
59 #include <dev/drm2/i915/i915_drv.h>
60 #include <dev/drm2/i915/intel_drv.h>
61
62 #include <sys/resourcevar.h>
63 #include <sys/sched.h>
64 #include <sys/sf_buf.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_pageout.h>
68
69 #include <machine/md_var.h>
70
71 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
72 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
73 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
74                                                     unsigned alignment,
75                                                     bool map_and_fenceable,
76                                                     bool nonblocking);
77 static int i915_gem_phys_pwrite(struct drm_device *dev,
78                                 struct drm_i915_gem_object *obj,
79                                 struct drm_i915_gem_pwrite *args,
80                                 struct drm_file *file);
81
82 static void i915_gem_write_fence(struct drm_device *dev, int reg,
83                                  struct drm_i915_gem_object *obj);
84 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
85                                          struct drm_i915_fence_reg *fence,
86                                          bool enable);
87
88 static void i915_gem_inactive_shrink(void *);
89 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
90 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
91 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
92
93 static int i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
94     off_t start, off_t end);
95
96 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex,
97     bool *fresh);
98
99 MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
100 long i915_gem_wired_pages_cnt;
101
102 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
103 {
104         if (obj->tiling_mode)
105                 i915_gem_release_mmap(obj);
106
107         /* As we do not have an associated fence register, we will force
108          * a tiling change if we ever need to acquire one.
109          */
110         obj->fence_dirty = false;
111         obj->fence_reg = I915_FENCE_REG_NONE;
112 }
113
114 /* some bookkeeping */
115 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
116                                   size_t size)
117 {
118         dev_priv->mm.object_count++;
119         dev_priv->mm.object_memory += size;
120 }
121
122 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
123                                      size_t size)
124 {
125         dev_priv->mm.object_count--;
126         dev_priv->mm.object_memory -= size;
127 }
128
129 static int
130 i915_gem_wait_for_error(struct drm_device *dev)
131 {
132         struct drm_i915_private *dev_priv = dev->dev_private;
133         struct completion *x = &dev_priv->error_completion;
134         int ret;
135
136         if (!atomic_read(&dev_priv->mm.wedged))
137                 return 0;
138
139         /*
140          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
141          * userspace. If it takes that long something really bad is going on and
142          * we should simply try to bail out and fail as gracefully as possible.
143          */
144         ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
145         if (ret == 0) {
146                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
147                 return -EIO;
148         } else if (ret < 0) {
149                 return ret;
150         }
151
152         if (atomic_read(&dev_priv->mm.wedged)) {
153                 /* GPU is hung, bump the completion count to account for
154                  * the token we just consumed so that we never hit zero and
155                  * end up waiting upon a subsequent completion event that
156                  * will never happen.
157                  */
158                 mtx_lock(&x->lock);
159                 x->done++;
160                 mtx_unlock(&x->lock);
161         }
162         return 0;
163 }
164
165 int i915_mutex_lock_interruptible(struct drm_device *dev)
166 {
167         int ret;
168
169         ret = i915_gem_wait_for_error(dev);
170         if (ret)
171                 return ret;
172
173         /*
174          * interruptible shall it be. might indeed be if dev_lock is
175          * changed to sx
176          */
177         ret = sx_xlock_sig(&dev->dev_struct_lock);
178         if (ret)
179                 return -EINTR;
180
181         WARN_ON(i915_verify_lists(dev));
182         return 0;
183 }
184
185 static inline bool
186 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
187 {
188         return obj->gtt_space && !obj->active;
189 }
190
191 int
192 i915_gem_init_ioctl(struct drm_device *dev, void *data,
193                     struct drm_file *file)
194 {
195         struct drm_i915_gem_init *args = data;
196
197         if (drm_core_check_feature(dev, DRIVER_MODESET))
198                 return -ENODEV;
199
200         if (args->gtt_start >= args->gtt_end ||
201             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
202                 return -EINVAL;
203
204         /* GEM with user mode setting was never supported on ilk and later. */
205         if (INTEL_INFO(dev)->gen >= 5)
206                 return -ENODEV;
207
208         /*
209          * XXXKIB. The second-time initialization should be guarded
210          * against.
211          */
212         DRM_LOCK(dev);
213         i915_gem_init_global_gtt(dev, args->gtt_start,
214                                  args->gtt_end, args->gtt_end);
215         DRM_UNLOCK(dev);
216
217         return 0;
218 }
219
220 int
221 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
222                             struct drm_file *file)
223 {
224         struct drm_i915_private *dev_priv = dev->dev_private;
225         struct drm_i915_gem_get_aperture *args = data;
226         struct drm_i915_gem_object *obj;
227         size_t pinned;
228
229         pinned = 0;
230         DRM_LOCK(dev);
231         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
232                 if (obj->pin_count)
233                         pinned += obj->gtt_space->size;
234         DRM_UNLOCK(dev);
235
236         args->aper_size = dev_priv->mm.gtt_total;
237         args->aper_available_size = args->aper_size - pinned;
238
239         return 0;
240 }
241
242 static int
243 i915_gem_create(struct drm_file *file,
244                 struct drm_device *dev,
245                 uint64_t size,
246                 uint32_t *handle_p)
247 {
248         struct drm_i915_gem_object *obj;
249         int ret;
250         u32 handle;
251
252         size = roundup(size, PAGE_SIZE);
253         if (size == 0)
254                 return -EINVAL;
255
256         /* Allocate the new object */
257         obj = i915_gem_alloc_object(dev, size);
258         if (obj == NULL)
259                 return -ENOMEM;
260
261         ret = drm_gem_handle_create(file, &obj->base, &handle);
262         if (ret) {
263                 drm_gem_object_release(&obj->base);
264                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
265                 free(obj, DRM_I915_GEM);
266                 return ret;
267         }
268
269         /* drop reference from allocate - handle holds it now */
270         drm_gem_object_unreference(&obj->base);
271         CTR2(KTR_DRM, "object_create %p %x", obj, size);
272
273         *handle_p = handle;
274         return 0;
275 }
276
277 int
278 i915_gem_dumb_create(struct drm_file *file,
279                      struct drm_device *dev,
280                      struct drm_mode_create_dumb *args)
281 {
282         /* have to work out size/pitch and return them */
283         args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
284         args->size = args->pitch * args->height;
285         return i915_gem_create(file, dev,
286                                args->size, &args->handle);
287 }
288
289 int i915_gem_dumb_destroy(struct drm_file *file,
290                           struct drm_device *dev,
291                           uint32_t handle)
292 {
293         return drm_gem_handle_delete(file, handle);
294 }
295
296 /**
297  * Creates a new mm object and returns a handle to it.
298  */
299 int
300 i915_gem_create_ioctl(struct drm_device *dev, void *data,
301                       struct drm_file *file)
302 {
303         struct drm_i915_gem_create *args = data;
304
305         return i915_gem_create(file, dev,
306                                args->size, &args->handle);
307 }
308
309 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
310 {
311         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
312
313         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
314                 obj->tiling_mode != I915_TILING_NONE;
315 }
316
317 static inline int
318 __copy_to_user_swizzled(char __user *cpu_vaddr,
319                         const char *gpu_vaddr, int gpu_offset,
320                         int length)
321 {
322         int ret, cpu_offset = 0;
323
324         while (length > 0) {
325                 int cacheline_end = roundup2(gpu_offset + 1, 64);
326                 int this_length = min(cacheline_end - gpu_offset, length);
327                 int swizzled_gpu_offset = gpu_offset ^ 64;
328
329                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
330                                      gpu_vaddr + swizzled_gpu_offset,
331                                      this_length);
332                 if (ret)
333                         return ret + length;
334
335                 cpu_offset += this_length;
336                 gpu_offset += this_length;
337                 length -= this_length;
338         }
339
340         return 0;
341 }
342
343 static inline int
344 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
345                           const char __user *cpu_vaddr,
346                           int length)
347 {
348         int ret, cpu_offset = 0;
349
350         while (length > 0) {
351                 int cacheline_end = roundup2(gpu_offset + 1, 64);
352                 int this_length = min(cacheline_end - gpu_offset, length);
353                 int swizzled_gpu_offset = gpu_offset ^ 64;
354
355                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
356                                        cpu_vaddr + cpu_offset,
357                                        this_length);
358                 if (ret)
359                         return ret + length;
360
361                 cpu_offset += this_length;
362                 gpu_offset += this_length;
363                 length -= this_length;
364         }
365
366         return 0;
367 }
368
369 /* Per-page copy function for the shmem pread fastpath.
370  * Flushes invalid cachelines before reading the target if
371  * needs_clflush is set. */
372 static int
373 shmem_pread_fast(vm_page_t page, int shmem_page_offset, int page_length,
374                  char __user *user_data,
375                  bool page_do_bit17_swizzling, bool needs_clflush)
376 {
377         char *vaddr;
378         struct sf_buf *sf;
379         int ret;
380
381         if (unlikely(page_do_bit17_swizzling))
382                 return -EINVAL;
383
384         sched_pin();
385         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
386         if (sf == NULL) {
387                 sched_unpin();
388                 return (-EFAULT);
389         }
390         vaddr = (char *)sf_buf_kva(sf);
391         if (needs_clflush)
392                 drm_clflush_virt_range(vaddr + shmem_page_offset,
393                                        page_length);
394         ret = __copy_to_user_inatomic(user_data,
395                                       vaddr + shmem_page_offset,
396                                       page_length);
397         sf_buf_free(sf);
398         sched_unpin();
399
400         return ret ? -EFAULT : 0;
401 }
402
403 static void
404 shmem_clflush_swizzled_range(char *addr, unsigned long length,
405                              bool swizzled)
406 {
407         if (unlikely(swizzled)) {
408                 unsigned long start = (unsigned long) addr;
409                 unsigned long end = (unsigned long) addr + length;
410
411                 /* For swizzling simply ensure that we always flush both
412                  * channels. Lame, but simple and it works. Swizzled
413                  * pwrite/pread is far from a hotpath - current userspace
414                  * doesn't use it at all. */
415                 start = round_down(start, 128);
416                 end = round_up(end, 128);
417
418                 drm_clflush_virt_range((void *)start, end - start);
419         } else {
420                 drm_clflush_virt_range(addr, length);
421         }
422
423 }
424
425 /* Only difference to the fast-path function is that this can handle bit17
426  * and uses non-atomic copy and kmap functions. */
427 static int
428 shmem_pread_slow(vm_page_t page, int shmem_page_offset, int page_length,
429                  char __user *user_data,
430                  bool page_do_bit17_swizzling, bool needs_clflush)
431 {
432         char *vaddr;
433         struct sf_buf *sf;
434         int ret;
435
436         sf = sf_buf_alloc(page, 0);
437         vaddr = (char *)sf_buf_kva(sf);
438         if (needs_clflush)
439                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
440                                              page_length,
441                                              page_do_bit17_swizzling);
442
443         if (page_do_bit17_swizzling)
444                 ret = __copy_to_user_swizzled(user_data,
445                                               vaddr, shmem_page_offset,
446                                               page_length);
447         else
448                 ret = __copy_to_user(user_data,
449                                      vaddr + shmem_page_offset,
450                                      page_length);
451         sf_buf_free(sf);
452
453         return ret ? - EFAULT : 0;
454 }
455
456 static int
457 i915_gem_shmem_pread(struct drm_device *dev,
458                      struct drm_i915_gem_object *obj,
459                      struct drm_i915_gem_pread *args,
460                      struct drm_file *file)
461 {
462         char __user *user_data;
463         ssize_t remain;
464         off_t offset;
465         int shmem_page_offset, page_length, ret = 0;
466         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
467         int hit_slowpath = 0;
468         int prefaulted = 0;
469         int needs_clflush = 0;
470
471         user_data = to_user_ptr(args->data_ptr);
472         remain = args->size;
473
474         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
475
476         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
477                 /* If we're not in the cpu read domain, set ourself into the gtt
478                  * read domain and manually flush cachelines (if required). This
479                  * optimizes for the case when the gpu will dirty the data
480                  * anyway again before the next pread happens. */
481                 if (obj->cache_level == I915_CACHE_NONE)
482                         needs_clflush = 1;
483                 if (obj->gtt_space) {
484                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
485                         if (ret)
486                                 return ret;
487                 }
488         }
489
490         ret = i915_gem_object_get_pages(obj);
491         if (ret)
492                 return ret;
493
494         i915_gem_object_pin_pages(obj);
495
496         offset = args->offset;
497
498         VM_OBJECT_WLOCK(obj->base.vm_obj);
499         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
500             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
501                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
502
503                 if (remain <= 0)
504                         break;
505
506                 /* Operation in this page
507                  *
508                  * shmem_page_offset = offset within page in shmem file
509                  * page_length = bytes to copy for this page
510                  */
511                 shmem_page_offset = offset_in_page(offset);
512                 page_length = remain;
513                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
514                         page_length = PAGE_SIZE - shmem_page_offset;
515
516                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
517                         (page_to_phys(page) & (1 << 17)) != 0;
518
519                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
520                                        user_data, page_do_bit17_swizzling,
521                                        needs_clflush);
522                 if (ret == 0)
523                         goto next_page;
524
525                 hit_slowpath = 1;
526                 DRM_UNLOCK(dev);
527
528                 if (!prefaulted) {
529                         ret = fault_in_multipages_writeable(user_data, remain);
530                         /* Userspace is tricking us, but we've already clobbered
531                          * its pages with the prefault and promised to write the
532                          * data up to the first fault. Hence ignore any errors
533                          * and just continue. */
534                         (void)ret;
535                         prefaulted = 1;
536                 }
537
538                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
539                                        user_data, page_do_bit17_swizzling,
540                                        needs_clflush);
541
542                 DRM_LOCK(dev);
543
544 next_page:
545                 vm_page_reference(page);
546
547                 if (ret)
548                         goto out;
549
550                 remain -= page_length;
551                 user_data += page_length;
552                 offset += page_length;
553                 VM_OBJECT_WLOCK(obj->base.vm_obj);
554         }
555
556 out:
557         i915_gem_object_unpin_pages(obj);
558
559         if (hit_slowpath) {
560                 /* Fixup: Kill any reinstated backing storage pages */
561                 if (obj->madv == __I915_MADV_PURGED)
562                         i915_gem_object_truncate(obj);
563         }
564
565         return ret;
566 }
567
568 /**
569  * Reads data from the object referenced by handle.
570  *
571  * On error, the contents of *data are undefined.
572  */
573 int
574 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
575                      struct drm_file *file)
576 {
577         struct drm_i915_gem_pread *args = data;
578         struct drm_i915_gem_object *obj;
579         int ret = 0;
580
581         if (args->size == 0)
582                 return 0;
583
584         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_WRITE))
585                 return -EFAULT;
586
587         ret = i915_mutex_lock_interruptible(dev);
588         if (ret)
589                 return ret;
590
591         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
592         if (&obj->base == NULL) {
593                 ret = -ENOENT;
594                 goto unlock;
595         }
596
597         /* Bounds check source.  */
598         if (args->offset > obj->base.size ||
599             args->size > obj->base.size - args->offset) {
600                 ret = -EINVAL;
601                 goto out;
602         }
603
604 #ifdef FREEBSD_WIP
605         /* prime objects have no backing filp to GEM pread/pwrite
606          * pages from.
607          */
608         if (!obj->base.filp) {
609                 ret = -EINVAL;
610                 goto out;
611         }
612 #endif /* FREEBSD_WIP */
613
614         CTR3(KTR_DRM, "pread %p %jx %jx", obj, args->offset, args->size);
615
616         ret = i915_gem_shmem_pread(dev, obj, args, file);
617
618 out:
619         drm_gem_object_unreference(&obj->base);
620 unlock:
621         DRM_UNLOCK(dev);
622         return ret;
623 }
624
625 /* This is the fast write path which cannot handle
626  * page faults in the source data
627  */
628
629 static inline int
630 fast_user_write(vm_paddr_t mapping_addr,
631                 off_t page_base, int page_offset,
632                 char __user *user_data,
633                 int length)
634 {
635         void __iomem *vaddr_atomic;
636         void *vaddr;
637         unsigned long unwritten;
638
639         vaddr_atomic = pmap_mapdev_attr(mapping_addr + page_base,
640             length, PAT_WRITE_COMBINING);
641         /* We can use the cpu mem copy function because this is X86. */
642         vaddr = (char __force*)vaddr_atomic + page_offset;
643         unwritten = __copy_from_user_inatomic_nocache(vaddr,
644                                                       user_data, length);
645         pmap_unmapdev((vm_offset_t)vaddr_atomic, length);
646         return unwritten;
647 }
648
649 /**
650  * This is the fast pwrite path, where we copy the data directly from the
651  * user into the GTT, uncached.
652  */
653 static int
654 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
655                          struct drm_i915_gem_object *obj,
656                          struct drm_i915_gem_pwrite *args,
657                          struct drm_file *file)
658 {
659         drm_i915_private_t *dev_priv = dev->dev_private;
660         ssize_t remain;
661         off_t offset, page_base;
662         char __user *user_data;
663         int page_offset, page_length, ret;
664
665         ret = i915_gem_object_pin(obj, 0, true, true);
666         if (ret)
667                 goto out;
668
669         ret = i915_gem_object_set_to_gtt_domain(obj, true);
670         if (ret)
671                 goto out_unpin;
672
673         ret = i915_gem_object_put_fence(obj);
674         if (ret)
675                 goto out_unpin;
676
677         user_data = to_user_ptr(args->data_ptr);
678         remain = args->size;
679
680         offset = obj->gtt_offset + args->offset;
681
682         while (remain > 0) {
683                 /* Operation in this page
684                  *
685                  * page_base = page offset within aperture
686                  * page_offset = offset within page
687                  * page_length = bytes to copy for this page
688                  */
689                 page_base = offset & ~PAGE_MASK;
690                 page_offset = offset_in_page(offset);
691                 page_length = remain;
692                 if ((page_offset + remain) > PAGE_SIZE)
693                         page_length = PAGE_SIZE - page_offset;
694
695                 /* If we get a fault while copying data, then (presumably) our
696                  * source page isn't available.  Return the error and we'll
697                  * retry in the slow path.
698                  */
699                 if (fast_user_write(dev_priv->mm.gtt_base_addr, page_base,
700                                     page_offset, user_data, page_length)) {
701                         ret = -EFAULT;
702                         goto out_unpin;
703                 }
704
705                 remain -= page_length;
706                 user_data += page_length;
707                 offset += page_length;
708         }
709
710 out_unpin:
711         i915_gem_object_unpin(obj);
712 out:
713         return ret;
714 }
715
716 /* Per-page copy function for the shmem pwrite fastpath.
717  * Flushes invalid cachelines before writing to the target if
718  * needs_clflush_before is set and flushes out any written cachelines after
719  * writing if needs_clflush is set. */
720 static int
721 shmem_pwrite_fast(vm_page_t page, int shmem_page_offset, int page_length,
722                   char __user *user_data,
723                   bool page_do_bit17_swizzling,
724                   bool needs_clflush_before,
725                   bool needs_clflush_after)
726 {
727         char *vaddr;
728         struct sf_buf *sf;
729         int ret;
730
731         if (unlikely(page_do_bit17_swizzling))
732                 return -EINVAL;
733
734         sched_pin();
735         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
736         if (sf == NULL) {
737                 sched_unpin();
738                 return (-EFAULT);
739         }
740         vaddr = (char *)sf_buf_kva(sf);
741         if (needs_clflush_before)
742                 drm_clflush_virt_range(vaddr + shmem_page_offset,
743                                        page_length);
744         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
745                                                 user_data,
746                                                 page_length);
747         if (needs_clflush_after)
748                 drm_clflush_virt_range(vaddr + shmem_page_offset,
749                                        page_length);
750         sf_buf_free(sf);
751         sched_unpin();
752
753         return ret ? -EFAULT : 0;
754 }
755
756 /* Only difference to the fast-path function is that this can handle bit17
757  * and uses non-atomic copy and kmap functions. */
758 static int
759 shmem_pwrite_slow(vm_page_t page, int shmem_page_offset, int page_length,
760                   char __user *user_data,
761                   bool page_do_bit17_swizzling,
762                   bool needs_clflush_before,
763                   bool needs_clflush_after)
764 {
765         char *vaddr;
766         struct sf_buf *sf;
767         int ret;
768
769         sf = sf_buf_alloc(page, 0);
770         vaddr = (char *)sf_buf_kva(sf);
771         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
772                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
773                                              page_length,
774                                              page_do_bit17_swizzling);
775         if (page_do_bit17_swizzling)
776                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
777                                                 user_data,
778                                                 page_length);
779         else
780                 ret = __copy_from_user(vaddr + shmem_page_offset,
781                                        user_data,
782                                        page_length);
783         if (needs_clflush_after)
784                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
785                                              page_length,
786                                              page_do_bit17_swizzling);
787         sf_buf_free(sf);
788
789         return ret ? -EFAULT : 0;
790 }
791
792 static int
793 i915_gem_shmem_pwrite(struct drm_device *dev,
794                       struct drm_i915_gem_object *obj,
795                       struct drm_i915_gem_pwrite *args,
796                       struct drm_file *file)
797 {
798         ssize_t remain;
799         off_t offset;
800         char __user *user_data;
801         int shmem_page_offset, page_length, ret = 0;
802         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
803         int hit_slowpath = 0;
804         int needs_clflush_after = 0;
805         int needs_clflush_before = 0;
806
807         user_data = to_user_ptr(args->data_ptr);
808         remain = args->size;
809
810         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
811
812         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
813                 /* If we're not in the cpu write domain, set ourself into the gtt
814                  * write domain and manually flush cachelines (if required). This
815                  * optimizes for the case when the gpu will use the data
816                  * right away and we therefore have to clflush anyway. */
817                 if (obj->cache_level == I915_CACHE_NONE)
818                         needs_clflush_after = 1;
819                 if (obj->gtt_space) {
820                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
821                         if (ret)
822                                 return ret;
823                 }
824         }
825         /* Same trick applies for invalidate partially written cachelines before
826          * writing.  */
827         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
828             && obj->cache_level == I915_CACHE_NONE)
829                 needs_clflush_before = 1;
830
831         ret = i915_gem_object_get_pages(obj);
832         if (ret)
833                 return ret;
834
835         i915_gem_object_pin_pages(obj);
836
837         offset = args->offset;
838         obj->dirty = 1;
839
840         VM_OBJECT_WLOCK(obj->base.vm_obj);
841         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
842             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
843                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
844                 int partial_cacheline_write;
845
846                 if (remain <= 0)
847                         break;
848
849                 /* Operation in this page
850                  *
851                  * shmem_page_offset = offset within page in shmem file
852                  * page_length = bytes to copy for this page
853                  */
854                 shmem_page_offset = offset_in_page(offset);
855
856                 page_length = remain;
857                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
858                         page_length = PAGE_SIZE - shmem_page_offset;
859
860                 /* If we don't overwrite a cacheline completely we need to be
861                  * careful to have up-to-date data by first clflushing. Don't
862                  * overcomplicate things and flush the entire patch. */
863                 partial_cacheline_write = needs_clflush_before &&
864                         ((shmem_page_offset | page_length)
865                                 & (cpu_clflush_line_size - 1));
866
867                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
868                         (page_to_phys(page) & (1 << 17)) != 0;
869
870                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
871                                         user_data, page_do_bit17_swizzling,
872                                         partial_cacheline_write,
873                                         needs_clflush_after);
874                 if (ret == 0)
875                         goto next_page;
876
877                 hit_slowpath = 1;
878                 DRM_UNLOCK(dev);
879                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
880                                         user_data, page_do_bit17_swizzling,
881                                         partial_cacheline_write,
882                                         needs_clflush_after);
883
884                 DRM_LOCK(dev);
885
886 next_page:
887                 vm_page_dirty(page);
888                 vm_page_reference(page);
889
890                 if (ret)
891                         goto out;
892
893                 remain -= page_length;
894                 user_data += page_length;
895                 offset += page_length;
896                 VM_OBJECT_WLOCK(obj->base.vm_obj);
897         }
898
899 out:
900         i915_gem_object_unpin_pages(obj);
901
902         if (hit_slowpath) {
903                 /* Fixup: Kill any reinstated backing storage pages */
904                 if (obj->madv == __I915_MADV_PURGED)
905                         i915_gem_object_truncate(obj);
906                 /* and flush dirty cachelines in case the object isn't in the cpu write
907                  * domain anymore. */
908                 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
909                         i915_gem_clflush_object(obj);
910                         i915_gem_chipset_flush(dev);
911                 }
912         }
913
914         if (needs_clflush_after)
915                 i915_gem_chipset_flush(dev);
916
917         return ret;
918 }
919
920 /**
921  * Writes data to the object referenced by handle.
922  *
923  * On error, the contents of the buffer that were to be modified are undefined.
924  */
925 int
926 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
927                       struct drm_file *file)
928 {
929         struct drm_i915_gem_pwrite *args = data;
930         struct drm_i915_gem_object *obj;
931         int ret;
932
933         if (args->size == 0)
934                 return 0;
935
936         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_READ))
937                 return -EFAULT;
938
939         ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
940                                            args->size);
941         if (ret)
942                 return -EFAULT;
943
944         ret = i915_mutex_lock_interruptible(dev);
945         if (ret)
946                 return ret;
947
948         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
949         if (&obj->base == NULL) {
950                 ret = -ENOENT;
951                 goto unlock;
952         }
953
954         /* Bounds check destination. */
955         if (args->offset > obj->base.size ||
956             args->size > obj->base.size - args->offset) {
957                 ret = -EINVAL;
958                 goto out;
959         }
960
961 #ifdef FREEBSD_WIP
962         /* prime objects have no backing filp to GEM pread/pwrite
963          * pages from.
964          */
965         if (!obj->base.filp) {
966                 ret = -EINVAL;
967                 goto out;
968         }
969 #endif /* FREEBSD_WIP */
970
971         CTR3(KTR_DRM, "pwrite %p %jx %jx", obj, args->offset, args->size);
972
973         ret = -EFAULT;
974         /* We can only do the GTT pwrite on untiled buffers, as otherwise
975          * it would end up going through the fenced access, and we'll get
976          * different detiling behavior between reading and writing.
977          * pread/pwrite currently are reading and writing from the CPU
978          * perspective, requiring manual detiling by the client.
979          */
980         if (obj->phys_obj) {
981                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
982                 goto out;
983         }
984
985         if (obj->cache_level == I915_CACHE_NONE &&
986             obj->tiling_mode == I915_TILING_NONE &&
987             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
988                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
989                 /* Note that the gtt paths might fail with non-page-backed user
990                  * pointers (e.g. gtt mappings when moving data between
991                  * textures). Fallback to the shmem path in that case. */
992         }
993
994         if (ret == -EFAULT || ret == -ENOSPC)
995                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
996
997 out:
998         drm_gem_object_unreference(&obj->base);
999 unlock:
1000         DRM_UNLOCK(dev);
1001         return ret;
1002 }
1003
1004 int
1005 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1006                      bool interruptible)
1007 {
1008         if (atomic_read(&dev_priv->mm.wedged)) {
1009                 struct completion *x = &dev_priv->error_completion;
1010                 bool recovery_complete;
1011
1012                 /* Give the error handler a chance to run. */
1013                 mtx_lock(&x->lock);
1014                 recovery_complete = x->done > 0;
1015                 mtx_unlock(&x->lock);
1016
1017                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1018                  * -EIO unconditionally for these. */
1019                 if (!interruptible)
1020                         return -EIO;
1021
1022                 /* Recovery complete, but still wedged means reset failure. */
1023                 if (recovery_complete)
1024                         return -EIO;
1025
1026                 return -EAGAIN;
1027         }
1028
1029         return 0;
1030 }
1031
1032 /*
1033  * Compare seqno against outstanding lazy request. Emit a request if they are
1034  * equal.
1035  */
1036 static int
1037 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1038 {
1039         int ret;
1040
1041         DRM_LOCK_ASSERT(ring->dev);
1042
1043         ret = 0;
1044         if (seqno == ring->outstanding_lazy_request)
1045                 ret = i915_add_request(ring, NULL, NULL);
1046
1047         return ret;
1048 }
1049
1050 /**
1051  * __wait_seqno - wait until execution of seqno has finished
1052  * @ring: the ring expected to report seqno
1053  * @seqno: duh!
1054  * @interruptible: do an interruptible wait (normally yes)
1055  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1056  *
1057  * Returns 0 if the seqno was found within the alloted time. Else returns the
1058  * errno with remaining time filled in timeout argument.
1059  */
1060 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1061                         bool interruptible, struct timespec *timeout)
1062 {
1063         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1064         struct timespec before, now, wait_time={1,0};
1065         sbintime_t timeout_sbt;
1066         long end;
1067         bool wait_forever = true;
1068         int ret, flags;
1069
1070         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1071                 return 0;
1072
1073         CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
1074
1075         if (timeout != NULL) {
1076                 wait_time = *timeout;
1077                 wait_forever = false;
1078         }
1079
1080         timeout_sbt = tstosbt(wait_time);
1081
1082         if (WARN_ON(!ring->irq_get(ring)))
1083                 return -ENODEV;
1084
1085         /* Record current time in case interrupted by signal, or wedged * */
1086         getrawmonotonic(&before);
1087
1088 #define EXIT_COND \
1089         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1090         atomic_read(&dev_priv->mm.wedged))
1091         flags = interruptible ? PCATCH : 0;
1092         mtx_lock(&dev_priv->irq_lock);
1093         do {
1094                 if (EXIT_COND) {
1095                         end = 1;
1096                 } else {
1097                         ret = -msleep_sbt(&ring->irq_queue, &dev_priv->irq_lock, flags,
1098                             "915gwr", timeout_sbt, 0, 0);
1099
1100                         /*
1101                          * NOTE Linux<->FreeBSD: Convert msleep_sbt() return
1102                          * value to something close to wait_event*_timeout()
1103                          * functions used on Linux.
1104                          *
1105                          * >0 -> condition is true (end = time remaining)
1106                          * =0 -> sleep timed out
1107                          * <0 -> error (interrupted)
1108                          *
1109                          * We fake the remaining time by returning 1. We
1110                          * compute a proper value later.
1111                          */
1112                         if (EXIT_COND)
1113                                 /* We fake a remaining time of 1 tick. */
1114                                 end = 1;
1115                         else if (ret == -EINTR || ret == -ERESTART)
1116                                 /* Interrupted. */
1117                                 end = -ERESTARTSYS;
1118                         else
1119                                 /* Timeout. */
1120                                 end = 0;
1121                 }
1122
1123                 ret = i915_gem_check_wedge(dev_priv, interruptible);
1124                 if (ret)
1125                         end = ret;
1126         } while (end == 0 && wait_forever);
1127         mtx_unlock(&dev_priv->irq_lock);
1128
1129         getrawmonotonic(&now);
1130
1131         ring->irq_put(ring);
1132         CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, end);
1133 #undef EXIT_COND
1134
1135         if (timeout) {
1136                 timespecsub(&now, &before);
1137                 timespecsub(timeout, &now);
1138         }
1139
1140         switch (end) {
1141         case -EIO:
1142         case -EAGAIN: /* Wedged */
1143         case -ERESTARTSYS: /* Signal */
1144         case -ETIMEDOUT: /* Timeout */
1145                 return (int)end;
1146         case 0: /* Timeout */
1147                 return -ETIMEDOUT;
1148         default: /* Completed */
1149                 WARN_ON(end < 0); /* We're not aware of other errors */
1150                 return 0;
1151         }
1152 }
1153
1154 /**
1155  * Waits for a sequence number to be signaled, and cleans up the
1156  * request and object lists appropriately for that event.
1157  */
1158 int
1159 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1160 {
1161         struct drm_device *dev = ring->dev;
1162         struct drm_i915_private *dev_priv = dev->dev_private;
1163         bool interruptible = dev_priv->mm.interruptible;
1164         int ret;
1165
1166         DRM_LOCK_ASSERT(dev);
1167         BUG_ON(seqno == 0);
1168
1169         ret = i915_gem_check_wedge(dev_priv, interruptible);
1170         if (ret)
1171                 return ret;
1172
1173         ret = i915_gem_check_olr(ring, seqno);
1174         if (ret)
1175                 return ret;
1176
1177         return __wait_seqno(ring, seqno, interruptible, NULL);
1178 }
1179
1180 /**
1181  * Ensures that all rendering to the object has completed and the object is
1182  * safe to unbind from the GTT or access from the CPU.
1183  */
1184 static __must_check int
1185 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1186                                bool readonly)
1187 {
1188         struct intel_ring_buffer *ring = obj->ring;
1189         u32 seqno;
1190         int ret;
1191
1192         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1193         if (seqno == 0)
1194                 return 0;
1195
1196         ret = i915_wait_seqno(ring, seqno);
1197         if (ret)
1198                 return ret;
1199
1200         i915_gem_retire_requests_ring(ring);
1201
1202         /* Manually manage the write flush as we may have not yet
1203          * retired the buffer.
1204          */
1205         if (obj->last_write_seqno &&
1206             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1207                 obj->last_write_seqno = 0;
1208                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1209         }
1210
1211         return 0;
1212 }
1213
1214 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1215  * as the object state may change during this call.
1216  */
1217 static __must_check int
1218 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1219                                             bool readonly)
1220 {
1221         struct drm_device *dev = obj->base.dev;
1222         struct drm_i915_private *dev_priv = dev->dev_private;
1223         struct intel_ring_buffer *ring = obj->ring;
1224         u32 seqno;
1225         int ret;
1226
1227         DRM_LOCK_ASSERT(dev);
1228         BUG_ON(!dev_priv->mm.interruptible);
1229
1230         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1231         if (seqno == 0)
1232                 return 0;
1233
1234         ret = i915_gem_check_wedge(dev_priv, true);
1235         if (ret)
1236                 return ret;
1237
1238         ret = i915_gem_check_olr(ring, seqno);
1239         if (ret)
1240                 return ret;
1241
1242         DRM_UNLOCK(dev);
1243         ret = __wait_seqno(ring, seqno, true, NULL);
1244         DRM_LOCK(dev);
1245
1246         i915_gem_retire_requests_ring(ring);
1247
1248         /* Manually manage the write flush as we may have not yet
1249          * retired the buffer.
1250          */
1251         if (ret == 0 &&
1252             obj->last_write_seqno &&
1253             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1254                 obj->last_write_seqno = 0;
1255                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1256         }
1257
1258         return ret;
1259 }
1260
1261 /**
1262  * Called when user space prepares to use an object with the CPU, either
1263  * through the mmap ioctl's mapping or a GTT mapping.
1264  */
1265 int
1266 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1267                           struct drm_file *file)
1268 {
1269         struct drm_i915_gem_set_domain *args = data;
1270         struct drm_i915_gem_object *obj;
1271         uint32_t read_domains = args->read_domains;
1272         uint32_t write_domain = args->write_domain;
1273         int ret;
1274
1275         /* Only handle setting domains to types used by the CPU. */
1276         if (write_domain & I915_GEM_GPU_DOMAINS)
1277                 return -EINVAL;
1278
1279         if (read_domains & I915_GEM_GPU_DOMAINS)
1280                 return -EINVAL;
1281
1282         /* Having something in the write domain implies it's in the read
1283          * domain, and only that read domain.  Enforce that in the request.
1284          */
1285         if (write_domain != 0 && read_domains != write_domain)
1286                 return -EINVAL;
1287
1288         ret = i915_mutex_lock_interruptible(dev);
1289         if (ret)
1290                 return ret;
1291
1292         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1293         if (&obj->base == NULL) {
1294                 ret = -ENOENT;
1295                 goto unlock;
1296         }
1297
1298         /* Try to flush the object off the GPU without holding the lock.
1299          * We will repeat the flush holding the lock in the normal manner
1300          * to catch cases where we are gazumped.
1301          */
1302         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1303         if (ret)
1304                 goto unref;
1305
1306         if (read_domains & I915_GEM_DOMAIN_GTT) {
1307                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1308
1309                 /* Silently promote "you're not bound, there was nothing to do"
1310                  * to success, since the client was just asking us to
1311                  * make sure everything was done.
1312                  */
1313                 if (ret == -EINVAL)
1314                         ret = 0;
1315         } else {
1316                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1317         }
1318
1319 unref:
1320         drm_gem_object_unreference(&obj->base);
1321 unlock:
1322         DRM_UNLOCK(dev);
1323         return ret;
1324 }
1325
1326 /**
1327  * Called when user space has done writes to this buffer
1328  */
1329 int
1330 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1331                          struct drm_file *file)
1332 {
1333         struct drm_i915_gem_sw_finish *args = data;
1334         struct drm_i915_gem_object *obj;
1335         int ret = 0;
1336
1337         ret = i915_mutex_lock_interruptible(dev);
1338         if (ret)
1339                 return ret;
1340
1341         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1342         if (&obj->base == NULL) {
1343                 ret = -ENOENT;
1344                 goto unlock;
1345         }
1346
1347         /* Pinned buffers may be scanout, so flush the cache */
1348         if (obj->pin_count)
1349                 i915_gem_object_flush_cpu_write_domain(obj);
1350
1351         drm_gem_object_unreference(&obj->base);
1352 unlock:
1353         DRM_UNLOCK(dev);
1354         return ret;
1355 }
1356
1357 /**
1358  * Maps the contents of an object, returning the address it is mapped
1359  * into.
1360  *
1361  * While the mapping holds a reference on the contents of the object, it doesn't
1362  * imply a ref on the object itself.
1363  */
1364 int
1365 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1366                     struct drm_file *file)
1367 {
1368         struct drm_i915_gem_mmap *args = data;
1369         struct drm_gem_object *obj;
1370         struct proc *p;
1371         vm_map_t map;
1372         vm_offset_t addr;
1373         vm_size_t size;
1374         int error, rv;
1375
1376         obj = drm_gem_object_lookup(dev, file, args->handle);
1377         if (obj == NULL)
1378                 return -ENOENT;
1379
1380 #ifdef FREEBSD_WIP
1381         /* prime objects have no backing filp to GEM mmap
1382          * pages from.
1383          */
1384         if (!obj->filp) {
1385                 drm_gem_object_unreference_unlocked(obj);
1386                 return -EINVAL;
1387         }
1388 #endif /* FREEBSD_WIP */
1389
1390         error = 0;
1391         if (args->size == 0)
1392                 goto out;
1393         p = curproc;
1394         map = &p->p_vmspace->vm_map;
1395         size = round_page(args->size);
1396         PROC_LOCK(p);
1397         if (map->size + size > lim_cur_proc(p, RLIMIT_VMEM)) {
1398                 PROC_UNLOCK(p);
1399                 error = -ENOMEM;
1400                 goto out;
1401         }
1402         PROC_UNLOCK(p);
1403
1404         addr = 0;
1405         vm_object_reference(obj->vm_obj);
1406         rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
1407             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1408             VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
1409         if (rv != KERN_SUCCESS) {
1410                 vm_object_deallocate(obj->vm_obj);
1411                 error = -vm_mmap_to_errno(rv);
1412         } else {
1413                 args->addr_ptr = (uint64_t)addr;
1414         }
1415 out:
1416         drm_gem_object_unreference_unlocked(obj);
1417         return (error);
1418 }
1419
1420 static int
1421 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1422     vm_ooffset_t foff, struct ucred *cred, u_short *color)
1423 {
1424
1425         /*
1426          * NOTE Linux<->FreeBSD: drm_gem_mmap_single() takes care of
1427          * calling drm_gem_object_reference(). That's why we don't
1428          * do this here. i915_gem_pager_dtor(), below, will call
1429          * drm_gem_object_unreference().
1430          *
1431          * On Linux, drm_gem_vm_open() references the object because
1432          * it's called the mapping is copied. drm_gem_vm_open() is not
1433          * called when the mapping is created. So the possible sequences
1434          * are:
1435          *     1. drm_gem_mmap():     ref++
1436          *     2. drm_gem_vm_close(): ref--
1437          *
1438          *     1. drm_gem_mmap():     ref++
1439          *     2. drm_gem_vm_open():  ref++ (for the copied vma)
1440          *     3. drm_gem_vm_close(): ref-- (for the copied vma)
1441          *     4. drm_gem_vm_close(): ref-- (for the initial vma)
1442          *
1443          * On FreeBSD, i915_gem_pager_ctor() is called once during the
1444          * creation of the mapping. No callback is called when the
1445          * mapping is shared during a fork(). i915_gem_pager_dtor() is
1446          * called when the last reference to the mapping is dropped. So
1447          * the only sequence is:
1448          *     1. drm_gem_mmap_single(): ref++
1449          *     2. i915_gem_pager_ctor(): <noop>
1450          *     3. i915_gem_pager_dtor(): ref--
1451          */
1452
1453         *color = 0; /* XXXKIB */
1454         return (0);
1455 }
1456
1457 /**
1458  * i915_gem_fault - fault a page into the GTT
1459  * vma: VMA in question
1460  * vmf: fault info
1461  *
1462  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1463  * from userspace.  The fault handler takes care of binding the object to
1464  * the GTT (if needed), allocating and programming a fence register (again,
1465  * only if needed based on whether the old reg is still valid or the object
1466  * is tiled) and inserting a new PTE into the faulting process.
1467  *
1468  * Note that the faulting process may involve evicting existing objects
1469  * from the GTT and/or fence registers to make room.  So performance may
1470  * suffer if the GTT working set is large or there are few fence registers
1471  * left.
1472  */
1473
1474 int i915_intr_pf;
1475
1476 static int
1477 i915_gem_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
1478     vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
1479 {
1480         struct drm_gem_object *gem_obj = vm_obj->handle;
1481         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
1482         struct drm_device *dev = obj->base.dev;
1483         drm_i915_private_t *dev_priv = dev->dev_private;
1484         vm_page_t page;
1485         int ret = 0;
1486         bool write = (max_prot & VM_PROT_WRITE) != 0;
1487         bool pinned;
1488
1489         VM_OBJECT_WUNLOCK(vm_obj);
1490 retry:
1491         ret = 0;
1492         pinned = 0;
1493         page = NULL;
1494
1495         if (i915_intr_pf) {
1496                 ret = i915_mutex_lock_interruptible(dev);
1497                 if (ret != 0)
1498                         goto out;
1499         } else
1500                 DRM_LOCK(dev);
1501
1502         /*
1503          * Since the object lock was dropped, other thread might have
1504          * faulted on the same GTT address and instantiated the
1505          * mapping for the page.  Recheck.
1506          */
1507         VM_OBJECT_WLOCK(vm_obj);
1508         page = vm_page_lookup(vm_obj, pidx);
1509         if (page != NULL) {
1510                 if (vm_page_busied(page)) {
1511                         DRM_UNLOCK(dev);
1512                         vm_page_lock(page);
1513                         VM_OBJECT_WUNLOCK(vm_obj);
1514                         vm_page_busy_sleep(page, "915pee", false);
1515                         goto retry;
1516                 }
1517                 goto have_page;
1518         } else
1519                 VM_OBJECT_WUNLOCK(vm_obj);
1520
1521         /* Now bind it into the GTT if needed */
1522         ret = i915_gem_object_pin(obj, 0, true, false);
1523         if (ret)
1524                 goto unlock;
1525         pinned = 1;
1526
1527         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1528         if (ret)
1529                 goto unpin;
1530
1531         ret = i915_gem_object_get_fence(obj);
1532         if (ret)
1533                 goto unpin;
1534
1535         obj->fault_mappable = true;
1536
1537         page = PHYS_TO_VM_PAGE(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
1538             IDX_TO_OFF(pidx));
1539         if (page == NULL) {
1540                 ret = -EFAULT;
1541                 goto unpin;
1542         }
1543         KASSERT((page->flags & PG_FICTITIOUS) != 0,
1544             ("physical address %#jx not fictitious, page %p",
1545             (uintmax_t)(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
1546             IDX_TO_OFF(pidx)), page));
1547         KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
1548
1549         VM_OBJECT_WLOCK(vm_obj);
1550         if (vm_page_busied(page)) {
1551                 i915_gem_object_unpin(obj);
1552                 DRM_UNLOCK(dev);
1553                 vm_page_lock(page);
1554                 VM_OBJECT_WUNLOCK(vm_obj);
1555                 vm_page_busy_sleep(page, "915pbs", false);
1556                 goto retry;
1557         }
1558         if (vm_page_insert(page, vm_obj, pidx)) {
1559                 i915_gem_object_unpin(obj);
1560                 DRM_UNLOCK(dev);
1561                 VM_OBJECT_WUNLOCK(vm_obj);
1562                 VM_WAIT;
1563                 goto retry;
1564         }
1565         page->valid = VM_PAGE_BITS_ALL;
1566 have_page:
1567         vm_page_xbusy(page);
1568
1569         CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, pidx, fault_type,
1570             page->phys_addr);
1571         if (pinned) {
1572                 /*
1573                  * We may have not pinned the object if the page was
1574                  * found by the call to vm_page_lookup().
1575                  */
1576                 i915_gem_object_unpin(obj);
1577         }
1578         DRM_UNLOCK(dev);
1579         *first = *last = pidx;
1580         return (VM_PAGER_OK);
1581
1582 unpin:
1583         i915_gem_object_unpin(obj);
1584 unlock:
1585         DRM_UNLOCK(dev);
1586 out:
1587         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1588         CTR4(KTR_DRM, "fault_fail %p %jx %x err %d", gem_obj, pidx, fault_type,
1589             -ret);
1590         if (ret == -ERESTARTSYS) {
1591                 /*
1592                  * NOTE Linux<->FreeBSD: Convert Linux' -ERESTARTSYS to
1593                  * the more common -EINTR, so the page fault is retried.
1594                  */
1595                 ret = -EINTR;
1596         }
1597         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1598                 kern_yield(PRI_USER);
1599                 goto retry;
1600         }
1601         VM_OBJECT_WLOCK(vm_obj);
1602         return (VM_PAGER_ERROR);
1603 }
1604
1605 static void
1606 i915_gem_pager_dtor(void *handle)
1607 {
1608         struct drm_gem_object *obj = handle;
1609         struct drm_device *dev = obj->dev;
1610
1611         DRM_LOCK(dev);
1612         drm_gem_object_unreference(obj);
1613         DRM_UNLOCK(dev);
1614 }
1615
1616 struct cdev_pager_ops i915_gem_pager_ops = {
1617         .cdev_pg_populate       = i915_gem_pager_populate,
1618         .cdev_pg_ctor           = i915_gem_pager_ctor,
1619         .cdev_pg_dtor           = i915_gem_pager_dtor,
1620 };
1621
1622 /**
1623  * i915_gem_release_mmap - remove physical page mappings
1624  * @obj: obj in question
1625  *
1626  * Preserve the reservation of the mmapping with the DRM core code, but
1627  * relinquish ownership of the pages back to the system.
1628  *
1629  * It is vital that we remove the page mapping if we have mapped a tiled
1630  * object through the GTT and then lose the fence register due to
1631  * resource pressure. Similarly if the object has been moved out of the
1632  * aperture, than pages mapped into userspace must be revoked. Removing the
1633  * mapping will then trigger a page fault on the next user access, allowing
1634  * fixup by i915_gem_fault().
1635  */
1636 void
1637 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1638 {
1639         vm_object_t devobj;
1640         vm_page_t page;
1641         int i, page_count;
1642
1643         if (!obj->fault_mappable)
1644                 return;
1645
1646         CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset,
1647             OFF_TO_IDX(obj->base.size));
1648         devobj = cdev_pager_lookup(obj);
1649         if (devobj != NULL) {
1650                 page_count = OFF_TO_IDX(obj->base.size);
1651
1652                 VM_OBJECT_WLOCK(devobj);
1653 retry:
1654                 for (i = 0; i < page_count; i++) {
1655                         page = vm_page_lookup(devobj, i);
1656                         if (page == NULL)
1657                                 continue;
1658                         if (vm_page_sleep_if_busy(page, "915unm"))
1659                                 goto retry;
1660                         cdev_pager_free_page(devobj, page);
1661                 }
1662                 VM_OBJECT_WUNLOCK(devobj);
1663                 vm_object_deallocate(devobj);
1664         }
1665
1666         obj->fault_mappable = false;
1667 }
1668
1669 static uint32_t
1670 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1671 {
1672         uint32_t gtt_size;
1673
1674         if (INTEL_INFO(dev)->gen >= 4 ||
1675             tiling_mode == I915_TILING_NONE)
1676                 return size;
1677
1678         /* Previous chips need a power-of-two fence region when tiling */
1679         if (INTEL_INFO(dev)->gen == 3)
1680                 gtt_size = 1024*1024;
1681         else
1682                 gtt_size = 512*1024;
1683
1684         while (gtt_size < size)
1685                 gtt_size <<= 1;
1686
1687         return gtt_size;
1688 }
1689
1690 /**
1691  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1692  * @obj: object to check
1693  *
1694  * Return the required GTT alignment for an object, taking into account
1695  * potential fence register mapping.
1696  */
1697 static uint32_t
1698 i915_gem_get_gtt_alignment(struct drm_device *dev,
1699                            uint32_t size,
1700                            int tiling_mode)
1701 {
1702         /*
1703          * Minimum alignment is 4k (GTT page size), but might be greater
1704          * if a fence register is needed for the object.
1705          */
1706         if (INTEL_INFO(dev)->gen >= 4 ||
1707             tiling_mode == I915_TILING_NONE)
1708                 return 4096;
1709
1710         /*
1711          * Previous chips need to be aligned to the size of the smallest
1712          * fence register that can contain the object.
1713          */
1714         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1715 }
1716
1717 /**
1718  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1719  *                                       unfenced object
1720  * @dev: the device
1721  * @size: size of the object
1722  * @tiling_mode: tiling mode of the object
1723  *
1724  * Return the required GTT alignment for an object, only taking into account
1725  * unfenced tiled surface requirements.
1726  */
1727 uint32_t
1728 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1729                                     uint32_t size,
1730                                     int tiling_mode)
1731 {
1732         /*
1733          * Minimum alignment is 4k (GTT page size) for sane hw.
1734          */
1735         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1736             tiling_mode == I915_TILING_NONE)
1737                 return 4096;
1738
1739         /* Previous hardware however needs to be aligned to a power-of-two
1740          * tile height. The simplest method for determining this is to reuse
1741          * the power-of-tile object size.
1742          */
1743         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1744 }
1745
1746 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1747 {
1748         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1749         int ret;
1750
1751         if (obj->base.on_map)
1752                 return 0;
1753
1754         dev_priv->mm.shrinker_no_lock_stealing = true;
1755
1756         ret = drm_gem_create_mmap_offset(&obj->base);
1757         if (ret != -ENOSPC)
1758                 goto out;
1759
1760         /* Badly fragmented mmap space? The only way we can recover
1761          * space is by destroying unwanted objects. We can't randomly release
1762          * mmap_offsets as userspace expects them to be persistent for the
1763          * lifetime of the objects. The closest we can is to release the
1764          * offsets on purgeable objects by truncating it and marking it purged,
1765          * which prevents userspace from ever using that object again.
1766          */
1767         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1768         ret = drm_gem_create_mmap_offset(&obj->base);
1769         if (ret != -ENOSPC)
1770                 goto out;
1771
1772         i915_gem_shrink_all(dev_priv);
1773         ret = drm_gem_create_mmap_offset(&obj->base);
1774 out:
1775         dev_priv->mm.shrinker_no_lock_stealing = false;
1776
1777         return ret;
1778 }
1779
1780 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1781 {
1782         if (!obj->base.on_map)
1783                 return;
1784
1785         drm_gem_free_mmap_offset(&obj->base);
1786 }
1787
1788 int
1789 i915_gem_mmap_gtt(struct drm_file *file,
1790                   struct drm_device *dev,
1791                   uint32_t handle,
1792                   uint64_t *offset)
1793 {
1794         struct drm_i915_private *dev_priv = dev->dev_private;
1795         struct drm_i915_gem_object *obj;
1796         int ret;
1797
1798         ret = i915_mutex_lock_interruptible(dev);
1799         if (ret)
1800                 return ret;
1801
1802         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1803         if (&obj->base == NULL) {
1804                 ret = -ENOENT;
1805                 goto unlock;
1806         }
1807
1808         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1809                 ret = -E2BIG;
1810                 goto out;
1811         }
1812
1813         if (obj->madv != I915_MADV_WILLNEED) {
1814                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1815                 ret = -EINVAL;
1816                 goto out;
1817         }
1818
1819         ret = i915_gem_object_create_mmap_offset(obj);
1820         if (ret)
1821                 goto out;
1822
1823         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1824             DRM_GEM_MAPPING_KEY;
1825
1826 out:
1827         drm_gem_object_unreference(&obj->base);
1828 unlock:
1829         DRM_UNLOCK(dev);
1830         return ret;
1831 }
1832
1833 /**
1834  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1835  * @dev: DRM device
1836  * @data: GTT mapping ioctl data
1837  * @file: GEM object info
1838  *
1839  * Simply returns the fake offset to userspace so it can mmap it.
1840  * The mmap call will end up in drm_gem_mmap(), which will set things
1841  * up so we can get faults in the handler above.
1842  *
1843  * The fault handler will take care of binding the object into the GTT
1844  * (since it may have been evicted to make room for something), allocating
1845  * a fence register, and mapping the appropriate aperture address into
1846  * userspace.
1847  */
1848 int
1849 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1850                         struct drm_file *file)
1851 {
1852         struct drm_i915_gem_mmap_gtt *args = data;
1853
1854         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1855 }
1856
1857 /* Immediately discard the backing storage */
1858 static void
1859 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1860 {
1861         vm_object_t vm_obj;
1862
1863         vm_obj = obj->base.vm_obj;
1864         VM_OBJECT_WLOCK(vm_obj);
1865         vm_object_page_remove(vm_obj, 0, 0, false);
1866         VM_OBJECT_WUNLOCK(vm_obj);
1867         i915_gem_object_free_mmap_offset(obj);
1868
1869         obj->madv = __I915_MADV_PURGED;
1870 }
1871
1872 static inline int
1873 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1874 {
1875         return obj->madv == I915_MADV_DONTNEED;
1876 }
1877
1878 static void
1879 i915_gem_object_put_pages_range_locked(struct drm_i915_gem_object *obj,
1880     vm_pindex_t si, vm_pindex_t ei)
1881 {
1882         vm_object_t vm_obj;
1883         vm_page_t page;
1884         vm_pindex_t i;
1885
1886         vm_obj = obj->base.vm_obj;
1887         VM_OBJECT_ASSERT_LOCKED(vm_obj);
1888         for (i = si,  page = vm_page_lookup(vm_obj, i); i < ei;
1889             page = vm_page_next(page), i++) {
1890                 KASSERT(page->pindex == i, ("pindex %jx %jx",
1891                     (uintmax_t)page->pindex, (uintmax_t)i));
1892                 vm_page_lock(page);
1893                 if (vm_page_unwire(page, PQ_INACTIVE))
1894                         atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1895                 vm_page_unlock(page);
1896         }
1897 }
1898
1899 #define GEM_PARANOID_CHECK_GTT 0
1900 #if GEM_PARANOID_CHECK_GTT
1901 static void
1902 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
1903     int page_count)
1904 {
1905         struct drm_i915_private *dev_priv;
1906         vm_paddr_t pa;
1907         unsigned long start, end;
1908         u_int i;
1909         int j;
1910
1911         dev_priv = dev->dev_private;
1912         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
1913         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
1914         for (i = start; i < end; i++) {
1915                 pa = intel_gtt_read_pte_paddr(i);
1916                 for (j = 0; j < page_count; j++) {
1917                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
1918                                 panic("Page %p in GTT pte index %d pte %x",
1919                                     ma[i], i, intel_gtt_read_pte(i));
1920                         }
1921                 }
1922         }
1923 }
1924 #endif
1925
1926 static void
1927 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1928 {
1929         int page_count = obj->base.size / PAGE_SIZE;
1930         int ret, i;
1931
1932         BUG_ON(obj->madv == __I915_MADV_PURGED);
1933
1934         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1935         if (ret) {
1936                 /* In the event of a disaster, abandon all caches and
1937                  * hope for the best.
1938                  */
1939                 WARN_ON(ret != -EIO);
1940                 i915_gem_clflush_object(obj);
1941                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1942         }
1943
1944         if (i915_gem_object_needs_bit17_swizzle(obj))
1945                 i915_gem_object_save_bit_17_swizzle(obj);
1946
1947         if (obj->madv == I915_MADV_DONTNEED)
1948                 obj->dirty = 0;
1949
1950         VM_OBJECT_WLOCK(obj->base.vm_obj);
1951 #if GEM_PARANOID_CHECK_GTT
1952         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1953 #endif
1954         for (i = 0; i < page_count; i++) {
1955                 vm_page_t page = obj->pages[i];
1956
1957                 if (obj->dirty)
1958                         vm_page_dirty(page);
1959
1960                 if (obj->madv == I915_MADV_WILLNEED)
1961                         vm_page_reference(page);
1962
1963                 vm_page_lock(page);
1964                 vm_page_unwire(obj->pages[i], PQ_ACTIVE);
1965                 vm_page_unlock(page);
1966                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1967         }
1968         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
1969         obj->dirty = 0;
1970
1971         free(obj->pages, DRM_I915_GEM);
1972         obj->pages = NULL;
1973 }
1974
1975 static int
1976 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1977 {
1978         const struct drm_i915_gem_object_ops *ops = obj->ops;
1979
1980         if (obj->pages == NULL)
1981                 return 0;
1982
1983         BUG_ON(obj->gtt_space);
1984
1985         if (obj->pages_pin_count)
1986                 return -EBUSY;
1987
1988         /* ->put_pages might need to allocate memory for the bit17 swizzle
1989          * array, hence protect them from being reaped by removing them from gtt
1990          * lists early. */
1991         list_del(&obj->gtt_list);
1992
1993         ops->put_pages(obj);
1994         obj->pages = NULL;
1995
1996         if (i915_gem_object_is_purgeable(obj))
1997                 i915_gem_object_truncate(obj);
1998
1999         return 0;
2000 }
2001
2002 static long
2003 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
2004                   bool purgeable_only)
2005 {
2006         struct drm_i915_gem_object *obj, *next;
2007         long count = 0;
2008
2009         list_for_each_entry_safe(obj, next,
2010                                  &dev_priv->mm.unbound_list,
2011                                  gtt_list) {
2012                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2013                     i915_gem_object_put_pages(obj) == 0) {
2014                         count += obj->base.size >> PAGE_SHIFT;
2015                         if (target != -1 && count >= target)
2016                                 return count;
2017                 }
2018         }
2019
2020         list_for_each_entry_safe(obj, next,
2021                                  &dev_priv->mm.inactive_list,
2022                                  mm_list) {
2023                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2024                     i915_gem_object_unbind(obj) == 0 &&
2025                     i915_gem_object_put_pages(obj) == 0) {
2026                         count += obj->base.size >> PAGE_SHIFT;
2027                         if (target != -1 && count >= target)
2028                                 return count;
2029                 }
2030         }
2031
2032         return count;
2033 }
2034
2035 static long
2036 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2037 {
2038         return __i915_gem_shrink(dev_priv, target, true);
2039 }
2040
2041 static void
2042 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2043 {
2044         struct drm_i915_gem_object *obj, *next;
2045
2046         i915_gem_evict_everything(dev_priv->dev);
2047
2048         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
2049                 i915_gem_object_put_pages(obj);
2050 }
2051
2052 static int
2053 i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
2054     off_t start, off_t end)
2055 {
2056         vm_object_t vm_obj;
2057         vm_page_t page;
2058         vm_pindex_t si, ei, i;
2059         bool need_swizzle, fresh;
2060
2061         need_swizzle = i915_gem_object_needs_bit17_swizzle(obj) != 0;
2062         vm_obj = obj->base.vm_obj;
2063         si = OFF_TO_IDX(trunc_page(start));
2064         ei = OFF_TO_IDX(round_page(end));
2065         VM_OBJECT_WLOCK(vm_obj);
2066         for (i = si; i < ei; i++) {
2067                 page = i915_gem_wire_page(vm_obj, i, &fresh);
2068                 if (page == NULL)
2069                         goto failed;
2070                 if (need_swizzle && fresh)
2071                         i915_gem_object_do_bit_17_swizzle_page(obj, page);
2072         }
2073         VM_OBJECT_WUNLOCK(vm_obj);
2074         return (0);
2075 failed:
2076         i915_gem_object_put_pages_range_locked(obj, si, i);
2077         VM_OBJECT_WUNLOCK(vm_obj);
2078         return (-EIO);
2079 }
2080
2081 static int
2082 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2083 {
2084         vm_object_t vm_obj;
2085         vm_page_t page;
2086         vm_pindex_t i, page_count;
2087         int res;
2088
2089         /* Assert that the object is not currently in any GPU domain. As it
2090          * wasn't in the GTT, there shouldn't be any way it could have been in
2091          * a GPU cache
2092          */
2093         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2094         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2095         KASSERT(obj->pages == NULL, ("Obj already has pages"));
2096
2097         page_count = OFF_TO_IDX(obj->base.size);
2098         obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
2099             M_WAITOK);
2100         res = i915_gem_object_get_pages_range(obj, 0, obj->base.size);
2101         if (res != 0) {
2102                 free(obj->pages, DRM_I915_GEM);
2103                 obj->pages = NULL;
2104                 return (res);
2105         }
2106         vm_obj = obj->base.vm_obj;
2107         VM_OBJECT_WLOCK(vm_obj);
2108         for (i = 0, page = vm_page_lookup(vm_obj, 0); i < page_count;
2109             i++, page = vm_page_next(page)) {
2110                 KASSERT(page->pindex == i, ("pindex %jx %jx",
2111                     (uintmax_t)page->pindex, (uintmax_t)i));
2112                 obj->pages[i] = page;
2113         }
2114         VM_OBJECT_WUNLOCK(vm_obj);
2115         return (0);
2116 }
2117
2118 /* Ensure that the associated pages are gathered from the backing storage
2119  * and pinned into our object. i915_gem_object_get_pages() may be called
2120  * multiple times before they are released by a single call to
2121  * i915_gem_object_put_pages() - once the pages are no longer referenced
2122  * either as a result of memory pressure (reaping pages under the shrinker)
2123  * or as the object is itself released.
2124  */
2125 int
2126 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2127 {
2128         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2129         const struct drm_i915_gem_object_ops *ops = obj->ops;
2130         int ret;
2131
2132         if (obj->pages)
2133                 return 0;
2134
2135         BUG_ON(obj->pages_pin_count);
2136
2137         ret = ops->get_pages(obj);
2138         if (ret)
2139                 return ret;
2140
2141         list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2142         return 0;
2143 }
2144
2145 void
2146 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2147                                struct intel_ring_buffer *ring)
2148 {
2149         struct drm_device *dev = obj->base.dev;
2150         struct drm_i915_private *dev_priv = dev->dev_private;
2151         u32 seqno = intel_ring_get_seqno(ring);
2152
2153         BUG_ON(ring == NULL);
2154         obj->ring = ring;
2155
2156         /* Add a reference if we're newly entering the active list. */
2157         if (!obj->active) {
2158                 drm_gem_object_reference(&obj->base);
2159                 obj->active = 1;
2160         }
2161
2162         /* Move from whatever list we were on to the tail of execution. */
2163         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
2164         list_move_tail(&obj->ring_list, &ring->active_list);
2165
2166         obj->last_read_seqno = seqno;
2167
2168         if (obj->fenced_gpu_access) {
2169                 obj->last_fenced_seqno = seqno;
2170
2171                 /* Bump MRU to take account of the delayed flush */
2172                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2173                         struct drm_i915_fence_reg *reg;
2174
2175                         reg = &dev_priv->fence_regs[obj->fence_reg];
2176                         list_move_tail(&reg->lru_list,
2177                                        &dev_priv->mm.fence_list);
2178                 }
2179         }
2180 }
2181
2182 static void
2183 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2184 {
2185         struct drm_device *dev = obj->base.dev;
2186         struct drm_i915_private *dev_priv = dev->dev_private;
2187
2188         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2189         BUG_ON(!obj->active);
2190
2191         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2192
2193         list_del_init(&obj->ring_list);
2194         obj->ring = NULL;
2195
2196         obj->last_read_seqno = 0;
2197         obj->last_write_seqno = 0;
2198         obj->base.write_domain = 0;
2199
2200         obj->last_fenced_seqno = 0;
2201         obj->fenced_gpu_access = false;
2202
2203         obj->active = 0;
2204         drm_gem_object_unreference(&obj->base);
2205
2206         WARN_ON(i915_verify_lists(dev));
2207 }
2208
2209 static int
2210 i915_gem_handle_seqno_wrap(struct drm_device *dev)
2211 {
2212         struct drm_i915_private *dev_priv = dev->dev_private;
2213         struct intel_ring_buffer *ring;
2214         int ret, i, j;
2215
2216         /* The hardware uses various monotonic 32-bit counters, if we
2217          * detect that they will wraparound we need to idle the GPU
2218          * and reset those counters.
2219          */
2220         ret = 0;
2221         for_each_ring(ring, dev_priv, i) {
2222                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2223                         ret |= ring->sync_seqno[j] != 0;
2224         }
2225         if (ret == 0)
2226                 return ret;
2227
2228         ret = i915_gpu_idle(dev);
2229         if (ret)
2230                 return ret;
2231
2232         i915_gem_retire_requests(dev);
2233         for_each_ring(ring, dev_priv, i) {
2234                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2235                         ring->sync_seqno[j] = 0;
2236         }
2237
2238         return 0;
2239 }
2240
2241 int
2242 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2243 {
2244         struct drm_i915_private *dev_priv = dev->dev_private;
2245
2246         /* reserve 0 for non-seqno */
2247         if (dev_priv->next_seqno == 0) {
2248                 int ret = i915_gem_handle_seqno_wrap(dev);
2249                 if (ret)
2250                         return ret;
2251
2252                 dev_priv->next_seqno = 1;
2253         }
2254
2255         *seqno = dev_priv->next_seqno++;
2256         return 0;
2257 }
2258
2259 int
2260 i915_add_request(struct intel_ring_buffer *ring,
2261                  struct drm_file *file,
2262                  u32 *out_seqno)
2263 {
2264         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2265         struct drm_i915_gem_request *request;
2266         u32 request_ring_position;
2267         int was_empty;
2268         int ret;
2269
2270         /*
2271          * Emit any outstanding flushes - execbuf can fail to emit the flush
2272          * after having emitted the batchbuffer command. Hence we need to fix
2273          * things up similar to emitting the lazy request. The difference here
2274          * is that the flush _must_ happen before the next request, no matter
2275          * what.
2276          */
2277         ret = intel_ring_flush_all_caches(ring);
2278         if (ret)
2279                 return ret;
2280
2281         request = malloc(sizeof(*request), DRM_I915_GEM, M_NOWAIT);
2282         if (request == NULL)
2283                 return -ENOMEM;
2284
2285
2286         /* Record the position of the start of the request so that
2287          * should we detect the updated seqno part-way through the
2288          * GPU processing the request, we never over-estimate the
2289          * position of the head.
2290          */
2291         request_ring_position = intel_ring_get_tail(ring);
2292
2293         ret = ring->add_request(ring);
2294         if (ret) {
2295                 free(request, DRM_I915_GEM);
2296                 return ret;
2297         }
2298
2299         request->seqno = intel_ring_get_seqno(ring);
2300         request->ring = ring;
2301         request->tail = request_ring_position;
2302         request->emitted_jiffies = jiffies;
2303         was_empty = list_empty(&ring->request_list);
2304         list_add_tail(&request->list, &ring->request_list);
2305         request->file_priv = NULL;
2306
2307         if (file) {
2308                 struct drm_i915_file_private *file_priv = file->driver_priv;
2309
2310                 mtx_lock(&file_priv->mm.lock);
2311                 request->file_priv = file_priv;
2312                 list_add_tail(&request->client_list,
2313                               &file_priv->mm.request_list);
2314                 mtx_unlock(&file_priv->mm.lock);
2315         }
2316
2317         CTR2(KTR_DRM, "request_add %s %d", ring->name, request->seqno);
2318         ring->outstanding_lazy_request = 0;
2319
2320         if (!dev_priv->mm.suspended) {
2321                 if (i915_enable_hangcheck) {
2322                         callout_schedule(&dev_priv->hangcheck_timer,
2323                             DRM_I915_HANGCHECK_PERIOD);
2324                 }
2325                 if (was_empty) {
2326                         taskqueue_enqueue_timeout(dev_priv->wq,
2327                             &dev_priv->mm.retire_work, hz);
2328                         intel_mark_busy(dev_priv->dev);
2329                 }
2330         }
2331
2332         if (out_seqno)
2333                 *out_seqno = request->seqno;
2334         return 0;
2335 }
2336
2337 static inline void
2338 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2339 {
2340         struct drm_i915_file_private *file_priv = request->file_priv;
2341
2342         if (!file_priv)
2343                 return;
2344
2345         mtx_lock(&file_priv->mm.lock);
2346         if (request->file_priv) {
2347                 list_del(&request->client_list);
2348                 request->file_priv = NULL;
2349         }
2350         mtx_unlock(&file_priv->mm.lock);
2351 }
2352
2353 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2354                                       struct intel_ring_buffer *ring)
2355 {
2356         if (ring->dev != NULL)
2357                 DRM_LOCK_ASSERT(ring->dev);
2358
2359         while (!list_empty(&ring->request_list)) {
2360                 struct drm_i915_gem_request *request;
2361
2362                 request = list_first_entry(&ring->request_list,
2363                                            struct drm_i915_gem_request,
2364                                            list);
2365
2366                 list_del(&request->list);
2367                 i915_gem_request_remove_from_client(request);
2368                 free(request, DRM_I915_GEM);
2369         }
2370
2371         while (!list_empty(&ring->active_list)) {
2372                 struct drm_i915_gem_object *obj;
2373
2374                 obj = list_first_entry(&ring->active_list,
2375                                        struct drm_i915_gem_object,
2376                                        ring_list);
2377
2378                 i915_gem_object_move_to_inactive(obj);
2379         }
2380 }
2381
2382 static void i915_gem_reset_fences(struct drm_device *dev)
2383 {
2384         struct drm_i915_private *dev_priv = dev->dev_private;
2385         int i;
2386
2387         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2388                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2389
2390                 i915_gem_write_fence(dev, i, NULL);
2391
2392                 if (reg->obj)
2393                         i915_gem_object_fence_lost(reg->obj);
2394
2395                 reg->pin_count = 0;
2396                 reg->obj = NULL;
2397                 INIT_LIST_HEAD(&reg->lru_list);
2398         }
2399
2400         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2401 }
2402
2403 void i915_gem_reset(struct drm_device *dev)
2404 {
2405         struct drm_i915_private *dev_priv = dev->dev_private;
2406         struct drm_i915_gem_object *obj;
2407         struct intel_ring_buffer *ring;
2408         int i;
2409
2410         for_each_ring(ring, dev_priv, i)
2411                 i915_gem_reset_ring_lists(dev_priv, ring);
2412
2413         /* Move everything out of the GPU domains to ensure we do any
2414          * necessary invalidation upon reuse.
2415          */
2416         list_for_each_entry(obj,
2417                             &dev_priv->mm.inactive_list,
2418                             mm_list)
2419         {
2420                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2421         }
2422
2423         /* The fence registers are invalidated so clear them out */
2424         i915_gem_reset_fences(dev);
2425 }
2426
2427 /**
2428  * This function clears the request list as sequence numbers are passed.
2429  */
2430 void
2431 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2432 {
2433         uint32_t seqno;
2434
2435         if (list_empty(&ring->request_list))
2436                 return;
2437
2438         WARN_ON(i915_verify_lists(ring->dev));
2439
2440         seqno = ring->get_seqno(ring, true);
2441         CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
2442
2443         while (!list_empty(&ring->request_list)) {
2444                 struct drm_i915_gem_request *request;
2445
2446                 request = list_first_entry(&ring->request_list,
2447                                            struct drm_i915_gem_request,
2448                                            list);
2449
2450                 if (!i915_seqno_passed(seqno, request->seqno))
2451                         break;
2452
2453                 CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
2454                     ring->name, seqno);
2455                 /* We know the GPU must have read the request to have
2456                  * sent us the seqno + interrupt, so use the position
2457                  * of tail of the request to update the last known position
2458                  * of the GPU head.
2459                  */
2460                 ring->last_retired_head = request->tail;
2461
2462                 list_del(&request->list);
2463                 i915_gem_request_remove_from_client(request);
2464                 free(request, DRM_I915_GEM);
2465         }
2466
2467         /* Move any buffers on the active list that are no longer referenced
2468          * by the ringbuffer to the flushing/inactive lists as appropriate.
2469          */
2470         while (!list_empty(&ring->active_list)) {
2471                 struct drm_i915_gem_object *obj;
2472
2473                 obj = list_first_entry(&ring->active_list,
2474                                       struct drm_i915_gem_object,
2475                                       ring_list);
2476
2477                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2478                         break;
2479
2480                 i915_gem_object_move_to_inactive(obj);
2481         }
2482
2483         if (unlikely(ring->trace_irq_seqno &&
2484                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2485                 ring->irq_put(ring);
2486                 ring->trace_irq_seqno = 0;
2487         }
2488
2489         WARN_ON(i915_verify_lists(ring->dev));
2490 }
2491
2492 void
2493 i915_gem_retire_requests(struct drm_device *dev)
2494 {
2495         drm_i915_private_t *dev_priv = dev->dev_private;
2496         struct intel_ring_buffer *ring;
2497         int i;
2498
2499         for_each_ring(ring, dev_priv, i)
2500                 i915_gem_retire_requests_ring(ring);
2501 }
2502
2503 static void
2504 i915_gem_retire_work_handler(void *arg, int pending)
2505 {
2506         drm_i915_private_t *dev_priv;
2507         struct drm_device *dev;
2508         struct intel_ring_buffer *ring;
2509         bool idle;
2510         int i;
2511
2512         dev_priv = arg;
2513         dev = dev_priv->dev;
2514
2515         /* Come back later if the device is busy... */
2516         if (!sx_try_xlock(&dev->dev_struct_lock)) {
2517                 taskqueue_enqueue_timeout(dev_priv->wq,
2518                     &dev_priv->mm.retire_work, hz);
2519                 return;
2520         }
2521
2522         CTR0(KTR_DRM, "retire_task");
2523
2524         i915_gem_retire_requests(dev);
2525
2526         /* Send a periodic flush down the ring so we don't hold onto GEM
2527          * objects indefinitely.
2528          */
2529         idle = true;
2530         for_each_ring(ring, dev_priv, i) {
2531                 if (ring->gpu_caches_dirty)
2532                         i915_add_request(ring, NULL, NULL);
2533
2534                 idle &= list_empty(&ring->request_list);
2535         }
2536
2537         if (!dev_priv->mm.suspended && !idle)
2538                 taskqueue_enqueue_timeout(dev_priv->wq,
2539                     &dev_priv->mm.retire_work, hz);
2540         if (idle)
2541                 intel_mark_idle(dev);
2542
2543         DRM_UNLOCK(dev);
2544 }
2545
2546 /**
2547  * Ensures that an object will eventually get non-busy by flushing any required
2548  * write domains, emitting any outstanding lazy request and retiring and
2549  * completed requests.
2550  */
2551 static int
2552 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2553 {
2554         int ret;
2555
2556         if (obj->active) {
2557                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2558                 if (ret)
2559                         return ret;
2560
2561                 i915_gem_retire_requests_ring(obj->ring);
2562         }
2563
2564         return 0;
2565 }
2566
2567 /**
2568  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2569  * @DRM_IOCTL_ARGS: standard ioctl arguments
2570  *
2571  * Returns 0 if successful, else an error is returned with the remaining time in
2572  * the timeout parameter.
2573  *  -ETIME: object is still busy after timeout
2574  *  -ERESTARTSYS: signal interrupted the wait
2575  *  -ENONENT: object doesn't exist
2576  * Also possible, but rare:
2577  *  -EAGAIN: GPU wedged
2578  *  -ENOMEM: damn
2579  *  -ENODEV: Internal IRQ fail
2580  *  -E?: The add request failed
2581  *
2582  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2583  * non-zero timeout parameter the wait ioctl will wait for the given number of
2584  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2585  * without holding struct_mutex the object may become re-busied before this
2586  * function completes. A similar but shorter * race condition exists in the busy
2587  * ioctl
2588  */
2589 int
2590 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2591 {
2592         struct drm_i915_gem_wait *args = data;
2593         struct drm_i915_gem_object *obj;
2594         struct intel_ring_buffer *ring = NULL;
2595         struct timespec timeout_stack, *timeout = NULL;
2596         u32 seqno = 0;
2597         int ret = 0;
2598
2599         if (args->timeout_ns >= 0) {
2600                 timeout_stack.tv_sec = args->timeout_ns / 1000000;
2601                 timeout_stack.tv_nsec = args->timeout_ns % 1000000;
2602                 timeout = &timeout_stack;
2603         }
2604
2605         ret = i915_mutex_lock_interruptible(dev);
2606         if (ret)
2607                 return ret;
2608
2609         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2610         if (&obj->base == NULL) {
2611                 DRM_UNLOCK(dev);
2612                 return -ENOENT;
2613         }
2614
2615         /* Need to make sure the object gets inactive eventually. */
2616         ret = i915_gem_object_flush_active(obj);
2617         if (ret)
2618                 goto out;
2619
2620         if (obj->active) {
2621                 seqno = obj->last_read_seqno;
2622                 ring = obj->ring;
2623         }
2624
2625         if (seqno == 0)
2626                  goto out;
2627
2628         /* Do this after OLR check to make sure we make forward progress polling
2629          * on this IOCTL with a 0 timeout (like busy ioctl)
2630          */
2631         if (!args->timeout_ns) {
2632                 ret = -ETIMEDOUT;
2633                 goto out;
2634         }
2635
2636         drm_gem_object_unreference(&obj->base);
2637         DRM_UNLOCK(dev);
2638
2639         ret = __wait_seqno(ring, seqno, true, timeout);
2640         if (timeout) {
2641                 args->timeout_ns = timeout->tv_sec * 1000000 + timeout->tv_nsec;
2642         }
2643         return ret;
2644
2645 out:
2646         drm_gem_object_unreference(&obj->base);
2647         DRM_UNLOCK(dev);
2648         return ret;
2649 }
2650
2651 /**
2652  * i915_gem_object_sync - sync an object to a ring.
2653  *
2654  * @obj: object which may be in use on another ring.
2655  * @to: ring we wish to use the object on. May be NULL.
2656  *
2657  * This code is meant to abstract object synchronization with the GPU.
2658  * Calling with NULL implies synchronizing the object with the CPU
2659  * rather than a particular GPU ring.
2660  *
2661  * Returns 0 if successful, else propagates up the lower layer error.
2662  */
2663 int
2664 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2665                      struct intel_ring_buffer *to)
2666 {
2667         struct intel_ring_buffer *from = obj->ring;
2668         u32 seqno;
2669         int ret, idx;
2670
2671         if (from == NULL || to == from)
2672                 return 0;
2673
2674         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2675                 return i915_gem_object_wait_rendering(obj, false);
2676
2677         idx = intel_ring_sync_index(from, to);
2678
2679         seqno = obj->last_read_seqno;
2680         if (seqno <= from->sync_seqno[idx])
2681                 return 0;
2682
2683         ret = i915_gem_check_olr(obj->ring, seqno);
2684         if (ret)
2685                 return ret;
2686
2687         ret = to->sync_to(to, from, seqno);
2688         if (!ret)
2689                 /* We use last_read_seqno because sync_to()
2690                  * might have just caused seqno wrap under
2691                  * the radar.
2692                  */
2693                 from->sync_seqno[idx] = obj->last_read_seqno;
2694
2695         return ret;
2696 }
2697
2698 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2699 {
2700         u32 old_write_domain, old_read_domains;
2701
2702         /* Act a barrier for all accesses through the GTT */
2703         mb();
2704
2705         /* Force a pagefault for domain tracking on next user access */
2706         i915_gem_release_mmap(obj);
2707
2708         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2709                 return;
2710
2711         old_read_domains = obj->base.read_domains;
2712         old_write_domain = obj->base.write_domain;
2713
2714         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2715         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2716
2717         CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x",
2718             obj, old_read_domains, old_write_domain);
2719 }
2720
2721 /**
2722  * Unbinds an object from the GTT aperture.
2723  */
2724 int
2725 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2726 {
2727         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2728         int ret = 0;
2729
2730         if (obj->gtt_space == NULL)
2731                 return 0;
2732
2733         if (obj->pin_count)
2734                 return -EBUSY;
2735
2736         BUG_ON(obj->pages == NULL);
2737
2738         ret = i915_gem_object_finish_gpu(obj);
2739         if (ret)
2740                 return ret;
2741         /* Continue on if we fail due to EIO, the GPU is hung so we
2742          * should be safe and we need to cleanup or else we might
2743          * cause memory corruption through use-after-free.
2744          */
2745
2746         i915_gem_object_finish_gtt(obj);
2747
2748         /* release the fence reg _after_ flushing */
2749         ret = i915_gem_object_put_fence(obj);
2750         if (ret)
2751                 return ret;
2752
2753         if (obj->has_global_gtt_mapping)
2754                 i915_gem_gtt_unbind_object(obj);
2755         if (obj->has_aliasing_ppgtt_mapping) {
2756                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2757                 obj->has_aliasing_ppgtt_mapping = 0;
2758         }
2759         i915_gem_gtt_finish_object(obj);
2760
2761         list_del(&obj->mm_list);
2762         list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2763         /* Avoid an unnecessary call to unbind on rebind. */
2764         obj->map_and_fenceable = true;
2765
2766         drm_mm_put_block(obj->gtt_space);
2767         obj->gtt_space = NULL;
2768         obj->gtt_offset = 0;
2769
2770         return 0;
2771 }
2772
2773 int i915_gpu_idle(struct drm_device *dev)
2774 {
2775         drm_i915_private_t *dev_priv = dev->dev_private;
2776         struct intel_ring_buffer *ring;
2777         int ret, i;
2778
2779         /* Flush everything onto the inactive list. */
2780         for_each_ring(ring, dev_priv, i) {
2781                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2782                 if (ret)
2783                         return ret;
2784
2785                 ret = intel_ring_idle(ring);
2786                 if (ret)
2787                         return ret;
2788         }
2789
2790         return 0;
2791 }
2792
2793 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2794                                         struct drm_i915_gem_object *obj)
2795 {
2796         drm_i915_private_t *dev_priv = dev->dev_private;
2797         uint64_t val;
2798
2799         if (obj) {
2800                 u32 size = obj->gtt_space->size;
2801
2802                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2803                                  0xfffff000) << 32;
2804                 val |= obj->gtt_offset & 0xfffff000;
2805                 val |= (uint64_t)((obj->stride / 128) - 1) <<
2806                         SANDYBRIDGE_FENCE_PITCH_SHIFT;
2807
2808                 if (obj->tiling_mode == I915_TILING_Y)
2809                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2810                 val |= I965_FENCE_REG_VALID;
2811         } else
2812                 val = 0;
2813
2814         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2815         POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2816 }
2817
2818 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2819                                  struct drm_i915_gem_object *obj)
2820 {
2821         drm_i915_private_t *dev_priv = dev->dev_private;
2822         uint64_t val;
2823
2824         if (obj) {
2825                 u32 size = obj->gtt_space->size;
2826
2827                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2828                                  0xfffff000) << 32;
2829                 val |= obj->gtt_offset & 0xfffff000;
2830                 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2831                 if (obj->tiling_mode == I915_TILING_Y)
2832                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2833                 val |= I965_FENCE_REG_VALID;
2834         } else
2835                 val = 0;
2836
2837         I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2838         POSTING_READ(FENCE_REG_965_0 + reg * 8);
2839 }
2840
2841 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2842                                  struct drm_i915_gem_object *obj)
2843 {
2844         drm_i915_private_t *dev_priv = dev->dev_private;
2845         u32 val;
2846
2847         if (obj) {
2848                 u32 size = obj->gtt_space->size;
2849                 int pitch_val;
2850                 int tile_width;
2851
2852                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2853                      (size & -size) != size ||
2854                      (obj->gtt_offset & (size - 1)),
2855                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2856                      obj->gtt_offset, obj->map_and_fenceable, size);
2857
2858                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2859                         tile_width = 128;
2860                 else
2861                         tile_width = 512;
2862
2863                 /* Note: pitch better be a power of two tile widths */
2864                 pitch_val = obj->stride / tile_width;
2865                 pitch_val = ffs(pitch_val) - 1;
2866
2867                 val = obj->gtt_offset;
2868                 if (obj->tiling_mode == I915_TILING_Y)
2869                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2870                 val |= I915_FENCE_SIZE_BITS(size);
2871                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2872                 val |= I830_FENCE_REG_VALID;
2873         } else
2874                 val = 0;
2875
2876         if (reg < 8)
2877                 reg = FENCE_REG_830_0 + reg * 4;
2878         else
2879                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2880
2881         I915_WRITE(reg, val);
2882         POSTING_READ(reg);
2883 }
2884
2885 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2886                                 struct drm_i915_gem_object *obj)
2887 {
2888         drm_i915_private_t *dev_priv = dev->dev_private;
2889         uint32_t val;
2890
2891         if (obj) {
2892                 u32 size = obj->gtt_space->size;
2893                 uint32_t pitch_val;
2894
2895                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2896                      (size & -size) != size ||
2897                      (obj->gtt_offset & (size - 1)),
2898                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2899                      obj->gtt_offset, size);
2900
2901                 pitch_val = obj->stride / 128;
2902                 pitch_val = ffs(pitch_val) - 1;
2903
2904                 val = obj->gtt_offset;
2905                 if (obj->tiling_mode == I915_TILING_Y)
2906                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2907                 val |= I830_FENCE_SIZE_BITS(size);
2908                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2909                 val |= I830_FENCE_REG_VALID;
2910         } else
2911                 val = 0;
2912
2913         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2914         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2915 }
2916
2917 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2918                                  struct drm_i915_gem_object *obj)
2919 {
2920         switch (INTEL_INFO(dev)->gen) {
2921         case 7:
2922         case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2923         case 5:
2924         case 4: i965_write_fence_reg(dev, reg, obj); break;
2925         case 3: i915_write_fence_reg(dev, reg, obj); break;
2926         case 2: i830_write_fence_reg(dev, reg, obj); break;
2927         default: break;
2928         }
2929 }
2930
2931 static inline int fence_number(struct drm_i915_private *dev_priv,
2932                                struct drm_i915_fence_reg *fence)
2933 {
2934         return fence - dev_priv->fence_regs;
2935 }
2936
2937 static void i915_gem_write_fence__ipi(void *data)
2938 {
2939         wbinvd();
2940 }
2941
2942 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2943                                          struct drm_i915_fence_reg *fence,
2944                                          bool enable)
2945 {
2946         struct drm_device *dev = obj->base.dev;
2947         struct drm_i915_private *dev_priv = dev->dev_private;
2948         int fence_reg = fence_number(dev_priv, fence);
2949
2950         /* In order to fully serialize access to the fenced region and
2951          * the update to the fence register we need to take extreme
2952          * measures on SNB+. In theory, the write to the fence register
2953          * flushes all memory transactions before, and coupled with the
2954          * mb() placed around the register write we serialise all memory
2955          * operations with respect to the changes in the tiler. Yet, on
2956          * SNB+ we need to take a step further and emit an explicit wbinvd()
2957          * on each processor in order to manually flush all memory
2958          * transactions before updating the fence register.
2959          */
2960         if (HAS_LLC(obj->base.dev))
2961                 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2962         i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2963
2964         if (enable) {
2965                 obj->fence_reg = fence_reg;
2966                 fence->obj = obj;
2967                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2968         } else {
2969                 obj->fence_reg = I915_FENCE_REG_NONE;
2970                 fence->obj = NULL;
2971                 list_del_init(&fence->lru_list);
2972         }
2973 }
2974
2975 static int
2976 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2977 {
2978         if (obj->last_fenced_seqno) {
2979                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2980                 if (ret)
2981                         return ret;
2982
2983                 obj->last_fenced_seqno = 0;
2984         }
2985
2986         /* Ensure that all CPU reads are completed before installing a fence
2987          * and all writes before removing the fence.
2988          */
2989         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2990                 mb();
2991
2992         obj->fenced_gpu_access = false;
2993         return 0;
2994 }
2995
2996 int
2997 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2998 {
2999         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3000         int ret;
3001
3002         ret = i915_gem_object_flush_fence(obj);
3003         if (ret)
3004                 return ret;
3005
3006         if (obj->fence_reg == I915_FENCE_REG_NONE)
3007                 return 0;
3008
3009         i915_gem_object_update_fence(obj,
3010                                      &dev_priv->fence_regs[obj->fence_reg],
3011                                      false);
3012         i915_gem_object_fence_lost(obj);
3013
3014         return 0;
3015 }
3016
3017 static struct drm_i915_fence_reg *
3018 i915_find_fence_reg(struct drm_device *dev)
3019 {
3020         struct drm_i915_private *dev_priv = dev->dev_private;
3021         struct drm_i915_fence_reg *reg, *avail;
3022         int i;
3023
3024         /* First try to find a free reg */
3025         avail = NULL;
3026         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3027                 reg = &dev_priv->fence_regs[i];
3028                 if (!reg->obj)
3029                         return reg;
3030
3031                 if (!reg->pin_count)
3032                         avail = reg;
3033         }
3034
3035         if (avail == NULL)
3036                 return NULL;
3037
3038         /* None available, try to steal one or wait for a user to finish */
3039         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3040                 if (reg->pin_count)
3041                         continue;
3042
3043                 return reg;
3044         }
3045
3046         return NULL;
3047 }
3048
3049 /**
3050  * i915_gem_object_get_fence - set up fencing for an object
3051  * @obj: object to map through a fence reg
3052  *
3053  * When mapping objects through the GTT, userspace wants to be able to write
3054  * to them without having to worry about swizzling if the object is tiled.
3055  * This function walks the fence regs looking for a free one for @obj,
3056  * stealing one if it can't find any.
3057  *
3058  * It then sets up the reg based on the object's properties: address, pitch
3059  * and tiling format.
3060  *
3061  * For an untiled surface, this removes any existing fence.
3062  */
3063 int
3064 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3065 {
3066         struct drm_device *dev = obj->base.dev;
3067         struct drm_i915_private *dev_priv = dev->dev_private;
3068         bool enable = obj->tiling_mode != I915_TILING_NONE;
3069         struct drm_i915_fence_reg *reg;
3070         int ret;
3071
3072         /* Have we updated the tiling parameters upon the object and so
3073          * will need to serialise the write to the associated fence register?
3074          */
3075         if (obj->fence_dirty) {
3076                 ret = i915_gem_object_flush_fence(obj);
3077                 if (ret)
3078                         return ret;
3079         }
3080
3081         /* Just update our place in the LRU if our fence is getting reused. */
3082         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3083                 reg = &dev_priv->fence_regs[obj->fence_reg];
3084                 if (!obj->fence_dirty) {
3085                         list_move_tail(&reg->lru_list,
3086                                        &dev_priv->mm.fence_list);
3087                         return 0;
3088                 }
3089         } else if (enable) {
3090                 reg = i915_find_fence_reg(dev);
3091                 if (reg == NULL)
3092                         return -EDEADLK;
3093
3094                 if (reg->obj) {
3095                         struct drm_i915_gem_object *old = reg->obj;
3096
3097                         ret = i915_gem_object_flush_fence(old);
3098                         if (ret)
3099                                 return ret;
3100
3101                         i915_gem_object_fence_lost(old);
3102                 }
3103         } else
3104                 return 0;
3105
3106         i915_gem_object_update_fence(obj, reg, enable);
3107         obj->fence_dirty = false;
3108
3109         return 0;
3110 }
3111
3112 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3113                                      struct drm_mm_node *gtt_space,
3114                                      unsigned long cache_level)
3115 {
3116         struct drm_mm_node *other;
3117
3118         /* On non-LLC machines we have to be careful when putting differing
3119          * types of snoopable memory together to avoid the prefetcher
3120          * crossing memory domains and dying.
3121          */
3122         if (HAS_LLC(dev))
3123                 return true;
3124
3125         if (gtt_space == NULL)
3126                 return true;
3127
3128         if (list_empty(&gtt_space->node_list))
3129                 return true;
3130
3131         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3132         if (other->allocated && !other->hole_follows && other->color != cache_level)
3133                 return false;
3134
3135         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3136         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3137                 return false;
3138
3139         return true;
3140 }
3141
3142 static void i915_gem_verify_gtt(struct drm_device *dev)
3143 {
3144 #if WATCH_GTT
3145         struct drm_i915_private *dev_priv = dev->dev_private;
3146         struct drm_i915_gem_object *obj;
3147         int err = 0;
3148
3149         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
3150                 if (obj->gtt_space == NULL) {
3151                         DRM_ERROR("object found on GTT list with no space reserved\n");
3152                         err++;
3153                         continue;
3154                 }
3155
3156                 if (obj->cache_level != obj->gtt_space->color) {
3157                         DRM_ERROR("object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3158                                obj->gtt_space->start,
3159                                obj->gtt_space->start + obj->gtt_space->size,
3160                                obj->cache_level,
3161                                obj->gtt_space->color);
3162                         err++;
3163                         continue;
3164                 }
3165
3166                 if (!i915_gem_valid_gtt_space(dev,
3167                                               obj->gtt_space,
3168                                               obj->cache_level)) {
3169                         DRM_ERROR("invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3170                                obj->gtt_space->start,
3171                                obj->gtt_space->start + obj->gtt_space->size,
3172                                obj->cache_level);
3173                         err++;
3174                         continue;
3175                 }
3176         }
3177
3178         WARN_ON(err);
3179 #endif
3180 }
3181
3182 /**
3183  * Finds free space in the GTT aperture and binds the object there.
3184  */
3185 static int
3186 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3187                             unsigned alignment,
3188                             bool map_and_fenceable,
3189                             bool nonblocking)
3190 {
3191         struct drm_device *dev = obj->base.dev;
3192         drm_i915_private_t *dev_priv = dev->dev_private;
3193         struct drm_mm_node *node;
3194         u32 size, fence_size, fence_alignment, unfenced_alignment;
3195         bool mappable, fenceable;
3196         int ret;
3197
3198         if (obj->madv != I915_MADV_WILLNEED) {
3199                 DRM_ERROR("Attempting to bind a purgeable object\n");
3200                 return -EINVAL;
3201         }
3202
3203         fence_size = i915_gem_get_gtt_size(dev,
3204                                            obj->base.size,
3205                                            obj->tiling_mode);
3206         fence_alignment = i915_gem_get_gtt_alignment(dev,
3207                                                      obj->base.size,
3208                                                      obj->tiling_mode);
3209         unfenced_alignment =
3210                 i915_gem_get_unfenced_gtt_alignment(dev,
3211                                                     obj->base.size,
3212                                                     obj->tiling_mode);
3213
3214         if (alignment == 0)
3215                 alignment = map_and_fenceable ? fence_alignment :
3216                                                 unfenced_alignment;
3217         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3218                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3219                 return -EINVAL;
3220         }
3221
3222         size = map_and_fenceable ? fence_size : obj->base.size;
3223
3224         /* If the object is bigger than the entire aperture, reject it early
3225          * before evicting everything in a vain attempt to find space.
3226          */
3227         if (obj->base.size >
3228             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
3229                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
3230                 return -E2BIG;
3231         }
3232
3233         ret = i915_gem_object_get_pages(obj);
3234         if (ret)
3235                 return ret;
3236
3237         i915_gem_object_pin_pages(obj);
3238
3239         node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
3240         if (node == NULL) {
3241                 i915_gem_object_unpin_pages(obj);
3242                 return -ENOMEM;
3243         }
3244
3245  search_free:
3246         if (map_and_fenceable)
3247                 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
3248                                                           size, alignment, obj->cache_level,
3249                                                           0, dev_priv->mm.gtt_mappable_end);
3250         else
3251                 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
3252                                                  size, alignment, obj->cache_level);
3253         if (ret) {
3254                 ret = i915_gem_evict_something(dev, size, alignment,
3255                                                obj->cache_level,
3256                                                map_and_fenceable,
3257                                                nonblocking);
3258                 if (ret == 0)
3259                         goto search_free;
3260
3261                 i915_gem_object_unpin_pages(obj);
3262                 free(node, DRM_MEM_MM);
3263                 return ret;
3264         }
3265         if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
3266                 i915_gem_object_unpin_pages(obj);
3267                 drm_mm_put_block(node);
3268                 return -EINVAL;
3269         }
3270
3271         ret = i915_gem_gtt_prepare_object(obj);
3272         if (ret) {
3273                 i915_gem_object_unpin_pages(obj);
3274                 drm_mm_put_block(node);
3275                 return ret;
3276         }
3277
3278         list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
3279         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3280
3281         obj->gtt_space = node;
3282         obj->gtt_offset = node->start;
3283
3284         fenceable =
3285                 node->size == fence_size &&
3286                 (node->start & (fence_alignment - 1)) == 0;
3287
3288         mappable =
3289                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
3290
3291         obj->map_and_fenceable = mappable && fenceable;
3292
3293         i915_gem_object_unpin_pages(obj);
3294         CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
3295             obj->base.size, map_and_fenceable);
3296         i915_gem_verify_gtt(dev);
3297         return 0;
3298 }
3299
3300 void
3301 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3302 {
3303         /* If we don't have a page list set up, then we're not pinned
3304          * to GPU, and we can ignore the cache flush because it'll happen
3305          * again at bind time.
3306          */
3307         if (obj->pages == NULL)
3308                 return;
3309
3310         /* If the GPU is snooping the contents of the CPU cache,
3311          * we do not need to manually clear the CPU cache lines.  However,
3312          * the caches are only snooped when the render cache is
3313          * flushed/invalidated.  As we always have to emit invalidations
3314          * and flushes when moving into and out of the RENDER domain, correct
3315          * snooping behaviour occurs naturally as the result of our domain
3316          * tracking.
3317          */
3318         if (obj->cache_level != I915_CACHE_NONE)
3319                 return;
3320
3321         CTR1(KTR_DRM, "object_clflush %p", obj);
3322
3323         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3324 }
3325
3326 /** Flushes the GTT write domain for the object if it's dirty. */
3327 static void
3328 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3329 {
3330         uint32_t old_write_domain;
3331
3332         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3333                 return;
3334
3335         /* No actual flushing is required for the GTT write domain.  Writes
3336          * to it immediately go to main memory as far as we know, so there's
3337          * no chipset flush.  It also doesn't land in render cache.
3338          *
3339          * However, we do have to enforce the order so that all writes through
3340          * the GTT land before any writes to the device, such as updates to
3341          * the GATT itself.
3342          */
3343         wmb();
3344
3345         old_write_domain = obj->base.write_domain;
3346         obj->base.write_domain = 0;
3347
3348         CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj,
3349             obj->base.read_domains, old_write_domain);
3350 }
3351
3352 /** Flushes the CPU write domain for the object if it's dirty. */
3353 static void
3354 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3355 {
3356         uint32_t old_write_domain;
3357
3358         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3359                 return;
3360
3361         i915_gem_clflush_object(obj);
3362         i915_gem_chipset_flush(obj->base.dev);
3363         old_write_domain = obj->base.write_domain;
3364         obj->base.write_domain = 0;
3365
3366         CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj,
3367             obj->base.read_domains, old_write_domain);
3368 }
3369
3370 /**
3371  * Moves a single object to the GTT read, and possibly write domain.
3372  *
3373  * This function returns when the move is complete, including waiting on
3374  * flushes to occur.
3375  */
3376 int
3377 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3378 {
3379         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3380         uint32_t old_write_domain, old_read_domains;
3381         int ret;
3382
3383         /* Not valid to be called on unbound objects. */
3384         if (obj->gtt_space == NULL)
3385                 return -EINVAL;
3386
3387         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3388                 return 0;
3389
3390         ret = i915_gem_object_wait_rendering(obj, !write);
3391         if (ret)
3392                 return ret;
3393
3394         i915_gem_object_flush_cpu_write_domain(obj);
3395
3396         old_write_domain = obj->base.write_domain;
3397         old_read_domains = obj->base.read_domains;
3398
3399         /* It should now be out of any other write domains, and we can update
3400          * the domain values for our changes.
3401          */
3402         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3403         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3404         if (write) {
3405                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3406                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3407                 obj->dirty = 1;
3408         }
3409
3410         CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
3411             old_read_domains, old_write_domain);
3412
3413         /* And bump the LRU for this access */
3414         if (i915_gem_object_is_inactive(obj))
3415                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3416
3417         return 0;
3418 }
3419
3420 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3421                                     enum i915_cache_level cache_level)
3422 {
3423         struct drm_device *dev = obj->base.dev;
3424         drm_i915_private_t *dev_priv = dev->dev_private;
3425         int ret;
3426
3427         if (obj->cache_level == cache_level)
3428                 return 0;
3429
3430         if (obj->pin_count) {
3431                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3432                 return -EBUSY;
3433         }
3434
3435         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3436                 ret = i915_gem_object_unbind(obj);
3437                 if (ret)
3438                         return ret;
3439         }
3440
3441         if (obj->gtt_space) {
3442                 ret = i915_gem_object_finish_gpu(obj);
3443                 if (ret)
3444                         return ret;
3445
3446                 i915_gem_object_finish_gtt(obj);
3447
3448                 /* Before SandyBridge, you could not use tiling or fence
3449                  * registers with snooped memory, so relinquish any fences
3450                  * currently pointing to our region in the aperture.
3451                  */
3452                 if (INTEL_INFO(dev)->gen < 6) {
3453                         ret = i915_gem_object_put_fence(obj);
3454                         if (ret)
3455                                 return ret;
3456                 }
3457
3458                 if (obj->has_global_gtt_mapping)
3459                         i915_gem_gtt_bind_object(obj, cache_level);
3460                 if (obj->has_aliasing_ppgtt_mapping)
3461                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3462                                                obj, cache_level);
3463
3464                 obj->gtt_space->color = cache_level;
3465         }
3466
3467         if (cache_level == I915_CACHE_NONE) {
3468                 u32 old_read_domains, old_write_domain;
3469
3470                 /* If we're coming from LLC cached, then we haven't
3471                  * actually been tracking whether the data is in the
3472                  * CPU cache or not, since we only allow one bit set
3473                  * in obj->write_domain and have been skipping the clflushes.
3474                  * Just set it to the CPU cache for now.
3475                  */
3476                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3477                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3478
3479                 old_read_domains = obj->base.read_domains;
3480                 old_write_domain = obj->base.write_domain;
3481
3482                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3483                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3484
3485                 CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x",
3486                     obj, old_read_domains, old_write_domain);
3487         }
3488
3489         obj->cache_level = cache_level;
3490         i915_gem_verify_gtt(dev);
3491         return 0;
3492 }
3493
3494 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3495                                struct drm_file *file)
3496 {
3497         struct drm_i915_gem_caching *args = data;
3498         struct drm_i915_gem_object *obj;
3499         int ret;
3500
3501         ret = i915_mutex_lock_interruptible(dev);
3502         if (ret)
3503                 return ret;
3504
3505         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3506         if (&obj->base == NULL) {
3507                 ret = -ENOENT;
3508                 goto unlock;
3509         }
3510
3511         args->caching = obj->cache_level != I915_CACHE_NONE;
3512
3513         drm_gem_object_unreference(&obj->base);
3514 unlock:
3515         DRM_UNLOCK(dev);
3516         return ret;
3517 }
3518
3519 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3520                                struct drm_file *file)
3521 {
3522         struct drm_i915_gem_caching *args = data;
3523         struct drm_i915_gem_object *obj;
3524         enum i915_cache_level level;
3525         int ret;
3526
3527         switch (args->caching) {
3528         case I915_CACHING_NONE:
3529                 level = I915_CACHE_NONE;
3530                 break;
3531         case I915_CACHING_CACHED:
3532                 level = I915_CACHE_LLC;
3533                 break;
3534         default:
3535                 return -EINVAL;
3536         }
3537
3538         ret = i915_mutex_lock_interruptible(dev);
3539         if (ret)
3540                 return ret;
3541
3542         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3543         if (&obj->base == NULL) {
3544                 ret = -ENOENT;
3545                 goto unlock;
3546         }
3547
3548         ret = i915_gem_object_set_cache_level(obj, level);
3549
3550         drm_gem_object_unreference(&obj->base);
3551 unlock:
3552         DRM_UNLOCK(dev);
3553         return ret;
3554 }
3555
3556 static bool is_pin_display(struct drm_i915_gem_object *obj)
3557 {
3558         /* There are 3 sources that pin objects:
3559          *   1. The display engine (scanouts, sprites, cursors);
3560          *   2. Reservations for execbuffer;
3561          *   3. The user.
3562          *
3563          * We can ignore reservations as we hold the struct_mutex and
3564          * are only called outside of the reservation path.  The user
3565          * can only increment pin_count once, and so if after
3566          * subtracting the potential reference by the user, any pin_count
3567          * remains, it must be due to another use by the display engine.
3568          */
3569         return obj->pin_count - !!obj->user_pin_count;
3570 }
3571
3572 /*
3573  * Prepare buffer for display plane (scanout, cursors, etc).
3574  * Can be called from an uninterruptible phase (modesetting) and allows
3575  * any flushes to be pipelined (for pageflips).
3576  */
3577 int
3578 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3579                                      u32 alignment,
3580                                      struct intel_ring_buffer *pipelined)
3581 {
3582         u32 old_read_domains, old_write_domain;
3583         int ret;
3584
3585         if (pipelined != obj->ring) {
3586                 ret = i915_gem_object_sync(obj, pipelined);
3587                 if (ret)
3588                         return ret;
3589         }
3590
3591         /* Mark the pin_display early so that we account for the
3592          * display coherency whilst setting up the cache domains.
3593          */
3594         obj->pin_display = true;
3595
3596         /* The display engine is not coherent with the LLC cache on gen6.  As
3597          * a result, we make sure that the pinning that is about to occur is
3598          * done with uncached PTEs. This is lowest common denominator for all
3599          * chipsets.
3600          *
3601          * However for gen6+, we could do better by using the GFDT bit instead
3602          * of uncaching, which would allow us to flush all the LLC-cached data
3603          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3604          */
3605         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3606         if (ret)
3607                 goto err_unpin_display;
3608
3609         /* As the user may map the buffer once pinned in the display plane
3610          * (e.g. libkms for the bootup splash), we have to ensure that we
3611          * always use map_and_fenceable for all scanout buffers.
3612          */
3613         ret = i915_gem_object_pin(obj, alignment, true, false);
3614         if (ret)
3615                 goto err_unpin_display;
3616
3617         i915_gem_object_flush_cpu_write_domain(obj);
3618
3619         old_write_domain = obj->base.write_domain;
3620         old_read_domains = obj->base.read_domains;
3621
3622         /* It should now be out of any other write domains, and we can update
3623          * the domain values for our changes.
3624          */
3625         obj->base.write_domain = 0;
3626         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3627
3628         CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
3629             obj, old_read_domains, old_write_domain);
3630
3631         return 0;
3632
3633 err_unpin_display:
3634         obj->pin_display = is_pin_display(obj);
3635         return ret;
3636 }
3637
3638 void
3639 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3640 {
3641         i915_gem_object_unpin(obj);
3642         obj->pin_display = is_pin_display(obj);
3643 }
3644
3645 int
3646 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3647 {
3648         int ret;
3649
3650         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3651                 return 0;
3652
3653         ret = i915_gem_object_wait_rendering(obj, false);
3654         if (ret)
3655                 return ret;
3656
3657         /* Ensure that we invalidate the GPU's caches and TLBs. */
3658         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3659         return 0;
3660 }
3661
3662 /**
3663  * Moves a single object to the CPU read, and possibly write domain.
3664  *
3665  * This function returns when the move is complete, including waiting on
3666  * flushes to occur.
3667  */
3668 int
3669 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3670 {
3671         uint32_t old_write_domain, old_read_domains;
3672         int ret;
3673
3674         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3675                 return 0;
3676
3677         ret = i915_gem_object_wait_rendering(obj, !write);
3678         if (ret)
3679                 return ret;
3680
3681         i915_gem_object_flush_gtt_write_domain(obj);
3682
3683         old_write_domain = obj->base.write_domain;
3684         old_read_domains = obj->base.read_domains;
3685
3686         /* Flush the CPU cache if it's still invalid. */
3687         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3688                 i915_gem_clflush_object(obj);
3689
3690                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3691         }
3692
3693         /* It should now be out of any other write domains, and we can update
3694          * the domain values for our changes.
3695          */
3696         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3697
3698         /* If we're writing through the CPU, then the GPU read domains will
3699          * need to be invalidated at next use.
3700          */
3701         if (write) {
3702                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3703                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3704         }
3705
3706         CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj,
3707             old_read_domains, old_write_domain);
3708
3709         return 0;
3710 }
3711
3712 /* Throttle our rendering by waiting until the ring has completed our requests
3713  * emitted over 20 msec ago.
3714  *
3715  * Note that if we were to use the current jiffies each time around the loop,
3716  * we wouldn't escape the function with any frames outstanding if the time to
3717  * render a frame was over 20ms.
3718  *
3719  * This should get us reasonable parallelism between CPU and GPU but also
3720  * relatively low latency when blocking on a particular request to finish.
3721  */
3722 static int
3723 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3724 {
3725         struct drm_i915_private *dev_priv = dev->dev_private;
3726         struct drm_i915_file_private *file_priv = file->driver_priv;
3727         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3728         struct drm_i915_gem_request *request;
3729         struct intel_ring_buffer *ring = NULL;
3730         u32 seqno = 0;
3731         int ret;
3732
3733         if (atomic_read(&dev_priv->mm.wedged))
3734                 return -EIO;
3735
3736         mtx_lock(&file_priv->mm.lock);
3737         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3738                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3739                         break;
3740
3741                 ring = request->ring;
3742                 seqno = request->seqno;
3743         }
3744         mtx_unlock(&file_priv->mm.lock);
3745
3746         if (seqno == 0)
3747                 return 0;
3748
3749         ret = __wait_seqno(ring, seqno, true, NULL);
3750         if (ret == 0)
3751                 taskqueue_enqueue_timeout(dev_priv->wq,
3752                     &dev_priv->mm.retire_work, 0);
3753
3754         return ret;
3755 }
3756
3757 int
3758 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3759                     uint32_t alignment,
3760                     bool map_and_fenceable,
3761                     bool nonblocking)
3762 {
3763         int ret;
3764
3765         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3766                 return -EBUSY;
3767
3768         if (obj->gtt_space != NULL) {
3769                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3770                     (map_and_fenceable && !obj->map_and_fenceable)) {
3771                         WARN(obj->pin_count,
3772                              "bo is already pinned with incorrect alignment:"
3773                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3774                              " obj->map_and_fenceable=%d\n",
3775                              obj->gtt_offset, alignment,
3776                              map_and_fenceable,
3777                              obj->map_and_fenceable);
3778                         ret = i915_gem_object_unbind(obj);
3779                         if (ret)
3780                                 return ret;
3781                 }
3782         }
3783
3784         if (obj->gtt_space == NULL) {
3785                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3786
3787                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3788                                                   map_and_fenceable,
3789                                                   nonblocking);
3790                 if (ret)
3791                         return ret;
3792
3793                 if (!dev_priv->mm.aliasing_ppgtt)
3794                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3795         }
3796
3797         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3798                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3799
3800         obj->pin_count++;
3801         obj->pin_mappable |= map_and_fenceable;
3802
3803         return 0;
3804 }
3805
3806 void
3807 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3808 {
3809         BUG_ON(obj->pin_count == 0);
3810         BUG_ON(obj->gtt_space == NULL);
3811
3812         if (--obj->pin_count == 0)
3813                 obj->pin_mappable = false;
3814 }
3815
3816 int
3817 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3818                    struct drm_file *file)
3819 {
3820         struct drm_i915_gem_pin *args = data;
3821         struct drm_i915_gem_object *obj;
3822         int ret;
3823
3824         ret = i915_mutex_lock_interruptible(dev);
3825         if (ret)
3826                 return ret;
3827
3828         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3829         if (&obj->base == NULL) {
3830                 ret = -ENOENT;
3831                 goto unlock;
3832         }
3833
3834         if (obj->madv != I915_MADV_WILLNEED) {
3835                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3836                 ret = -EINVAL;
3837                 goto out;
3838         }
3839
3840         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3841                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3842                           args->handle);
3843                 ret = -EINVAL;
3844                 goto out;
3845         }
3846
3847         if (obj->user_pin_count == 0) {
3848                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3849                 if (ret)
3850                         goto out;
3851         }
3852
3853         obj->user_pin_count++;
3854         obj->pin_filp = file;
3855
3856         /* XXX - flush the CPU caches for pinned objects
3857          * as the X server doesn't manage domains yet
3858          */
3859         i915_gem_object_flush_cpu_write_domain(obj);
3860         args->offset = obj->gtt_offset;
3861 out:
3862         drm_gem_object_unreference(&obj->base);
3863 unlock:
3864         DRM_UNLOCK(dev);
3865         return ret;
3866 }
3867
3868 int
3869 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3870                      struct drm_file *file)
3871 {
3872         struct drm_i915_gem_pin *args = data;
3873         struct drm_i915_gem_object *obj;
3874         int ret;
3875
3876         ret = i915_mutex_lock_interruptible(dev);
3877         if (ret)
3878                 return ret;
3879
3880         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3881         if (&obj->base == NULL) {
3882                 ret = -ENOENT;
3883                 goto unlock;
3884         }
3885
3886         if (obj->pin_filp != file) {
3887                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3888                           args->handle);
3889                 ret = -EINVAL;
3890                 goto out;
3891         }
3892         obj->user_pin_count--;
3893         if (obj->user_pin_count == 0) {
3894                 obj->pin_filp = NULL;
3895                 i915_gem_object_unpin(obj);
3896         }
3897
3898 out:
3899         drm_gem_object_unreference(&obj->base);
3900 unlock:
3901         DRM_UNLOCK(dev);
3902         return ret;
3903 }
3904
3905 int
3906 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3907                     struct drm_file *file)
3908 {
3909         struct drm_i915_gem_busy *args = data;
3910         struct drm_i915_gem_object *obj;
3911         int ret;
3912
3913         ret = i915_mutex_lock_interruptible(dev);
3914         if (ret)
3915                 return ret;
3916
3917         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3918         if (&obj->base == NULL) {
3919                 ret = -ENOENT;
3920                 goto unlock;
3921         }
3922
3923         /* Count all active objects as busy, even if they are currently not used
3924          * by the gpu. Users of this interface expect objects to eventually
3925          * become non-busy without any further actions, therefore emit any
3926          * necessary flushes here.
3927          */
3928         ret = i915_gem_object_flush_active(obj);
3929
3930         args->busy = obj->active;
3931         if (obj->ring) {
3932                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3933                 args->busy |= intel_ring_flag(obj->ring) << 16;
3934         }
3935
3936         drm_gem_object_unreference(&obj->base);
3937 unlock:
3938         DRM_UNLOCK(dev);
3939         return ret;
3940 }
3941
3942 int
3943 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3944                         struct drm_file *file_priv)
3945 {
3946         return i915_gem_ring_throttle(dev, file_priv);
3947 }
3948
3949 int
3950 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3951                        struct drm_file *file_priv)
3952 {
3953         struct drm_i915_gem_madvise *args = data;
3954         struct drm_i915_gem_object *obj;
3955         int ret;
3956
3957         switch (args->madv) {
3958         case I915_MADV_DONTNEED:
3959         case I915_MADV_WILLNEED:
3960             break;
3961         default:
3962             return -EINVAL;
3963         }
3964
3965         ret = i915_mutex_lock_interruptible(dev);
3966         if (ret)
3967                 return ret;
3968
3969         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3970         if (&obj->base == NULL) {
3971                 ret = -ENOENT;
3972                 goto unlock;
3973         }
3974
3975         if (obj->pin_count) {
3976                 ret = -EINVAL;
3977                 goto out;
3978         }
3979
3980         if (obj->madv != __I915_MADV_PURGED)
3981                 obj->madv = args->madv;
3982
3983         /* if the object is no longer attached, discard its backing storage */
3984         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3985                 i915_gem_object_truncate(obj);
3986
3987         args->retained = obj->madv != __I915_MADV_PURGED;
3988
3989 out:
3990         drm_gem_object_unreference(&obj->base);
3991 unlock:
3992         DRM_UNLOCK(dev);
3993         return ret;
3994 }
3995
3996 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3997                           const struct drm_i915_gem_object_ops *ops)
3998 {
3999         INIT_LIST_HEAD(&obj->mm_list);
4000         INIT_LIST_HEAD(&obj->gtt_list);
4001         INIT_LIST_HEAD(&obj->ring_list);
4002         INIT_LIST_HEAD(&obj->exec_list);
4003
4004         obj->ops = ops;
4005
4006         obj->fence_reg = I915_FENCE_REG_NONE;
4007         obj->madv = I915_MADV_WILLNEED;
4008         /* Avoid an unnecessary call to unbind on the first bind. */
4009         obj->map_and_fenceable = true;
4010
4011         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4012 }
4013
4014 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4015         .get_pages = i915_gem_object_get_pages_gtt,
4016         .put_pages = i915_gem_object_put_pages_gtt,
4017 };
4018
4019 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4020                                                   size_t size)
4021 {
4022         struct drm_i915_gem_object *obj;
4023
4024         obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
4025         if (obj == NULL)
4026                 return NULL;
4027
4028         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4029                 free(obj, DRM_I915_GEM);
4030                 return NULL;
4031         }
4032
4033 #ifdef FREEBSD_WIP
4034         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4035         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4036                 /* 965gm cannot relocate objects above 4GiB. */
4037                 mask &= ~__GFP_HIGHMEM;
4038                 mask |= __GFP_DMA32;
4039         }
4040
4041         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4042         mapping_set_gfp_mask(mapping, mask);
4043 #endif /* FREEBSD_WIP */
4044
4045         i915_gem_object_init(obj, &i915_gem_object_ops);
4046
4047         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4048         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4049
4050         if (HAS_LLC(dev)) {
4051                 /* On some devices, we can have the GPU use the LLC (the CPU
4052                  * cache) for about a 10% performance improvement
4053                  * compared to uncached.  Graphics requests other than
4054                  * display scanout are coherent with the CPU in
4055                  * accessing this cache.  This means in this mode we
4056                  * don't need to clflush on the CPU side, and on the
4057                  * GPU side we only need to flush internal caches to
4058                  * get data visible to the CPU.
4059                  *
4060                  * However, we maintain the display planes as UC, and so
4061                  * need to rebind when first used as such.
4062                  */
4063                 obj->cache_level = I915_CACHE_LLC;
4064         } else
4065                 obj->cache_level = I915_CACHE_NONE;
4066
4067         return obj;
4068 }
4069
4070 int i915_gem_init_object(struct drm_gem_object *obj)
4071 {
4072         printf("i915_gem_init_object called\n");
4073
4074         return 0;
4075 }
4076
4077 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4078 {
4079         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4080         struct drm_device *dev = obj->base.dev;
4081         drm_i915_private_t *dev_priv = dev->dev_private;
4082
4083         CTR1(KTR_DRM, "object_destroy_tail %p", obj);
4084
4085         if (obj->phys_obj)
4086                 i915_gem_detach_phys_object(dev, obj);
4087
4088         obj->pin_count = 0;
4089         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
4090                 bool was_interruptible;
4091
4092                 was_interruptible = dev_priv->mm.interruptible;
4093                 dev_priv->mm.interruptible = false;
4094
4095                 WARN_ON(i915_gem_object_unbind(obj));
4096
4097                 dev_priv->mm.interruptible = was_interruptible;
4098         }
4099
4100         obj->pages_pin_count = 0;
4101         i915_gem_object_put_pages(obj);
4102         i915_gem_object_free_mmap_offset(obj);
4103
4104         BUG_ON(obj->pages);
4105
4106 #ifdef FREEBSD_WIP
4107         if (obj->base.import_attach)
4108                 drm_prime_gem_destroy(&obj->base, NULL);
4109 #endif /* FREEBSD_WIP */
4110
4111         drm_gem_object_release(&obj->base);
4112         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4113
4114         free(obj->bit_17, DRM_I915_GEM);
4115         free(obj, DRM_I915_GEM);
4116 }
4117
4118 int
4119 i915_gem_idle(struct drm_device *dev)
4120 {
4121         drm_i915_private_t *dev_priv = dev->dev_private;
4122         int ret;
4123
4124         DRM_LOCK(dev);
4125
4126         if (dev_priv->mm.suspended) {
4127                 DRM_UNLOCK(dev);
4128                 return 0;
4129         }
4130
4131         ret = i915_gpu_idle(dev);
4132         if (ret) {
4133                 DRM_UNLOCK(dev);
4134                 return ret;
4135         }
4136         i915_gem_retire_requests(dev);
4137
4138         /* Under UMS, be paranoid and evict. */
4139         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4140                 i915_gem_evict_everything(dev);
4141
4142         i915_gem_reset_fences(dev);
4143
4144         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4145          * We need to replace this with a semaphore, or something.
4146          * And not confound mm.suspended!
4147          */
4148         dev_priv->mm.suspended = 1;
4149         callout_stop(&dev_priv->hangcheck_timer);
4150
4151         i915_kernel_lost_context(dev);
4152         i915_gem_cleanup_ringbuffer(dev);
4153
4154         DRM_UNLOCK(dev);
4155
4156         /* Cancel the retire work handler, which should be idle now. */
4157         taskqueue_cancel_timeout(dev_priv->wq, &dev_priv->mm.retire_work, NULL);
4158
4159         return 0;
4160 }
4161
4162 void i915_gem_l3_remap(struct drm_device *dev)
4163 {
4164         drm_i915_private_t *dev_priv = dev->dev_private;
4165         u32 misccpctl;
4166         int i;
4167
4168         if (!HAS_L3_GPU_CACHE(dev))
4169                 return;
4170
4171         if (!dev_priv->l3_parity.remap_info)
4172                 return;
4173
4174         misccpctl = I915_READ(GEN7_MISCCPCTL);
4175         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4176         POSTING_READ(GEN7_MISCCPCTL);
4177
4178         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4179                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4180                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4181                         DRM_DEBUG("0x%x was already programmed to %x\n",
4182                                   GEN7_L3LOG_BASE + i, remap);
4183                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4184                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4185                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4186         }
4187
4188         /* Make sure all the writes land before disabling dop clock gating */
4189         POSTING_READ(GEN7_L3LOG_BASE);
4190
4191         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4192 }
4193
4194 void i915_gem_init_swizzling(struct drm_device *dev)
4195 {
4196         drm_i915_private_t *dev_priv = dev->dev_private;
4197
4198         if (INTEL_INFO(dev)->gen < 5 ||
4199             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4200                 return;
4201
4202         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4203                                  DISP_TILE_SURFACE_SWIZZLING);
4204
4205         if (IS_GEN5(dev))
4206                 return;
4207
4208         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4209         if (IS_GEN6(dev))
4210                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4211         else
4212                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4213 }
4214
4215 static bool
4216 intel_enable_blt(struct drm_device *dev)
4217 {
4218         if (!HAS_BLT(dev))
4219                 return false;
4220
4221         /* The blitter was dysfunctional on early prototypes */
4222         if (IS_GEN6(dev) && pci_get_revid(dev->dev) < 8) {
4223                 DRM_INFO("BLT not supported on this pre-production hardware;"
4224                          " graphics performance will be degraded.\n");
4225                 return false;
4226         }
4227
4228         return true;
4229 }
4230
4231 int
4232 i915_gem_init_hw(struct drm_device *dev)
4233 {
4234         drm_i915_private_t *dev_priv = dev->dev_private;
4235         int ret;
4236
4237 #ifdef FREEBSD_WIP
4238         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4239                 return -EIO;
4240 #endif /* FREEBSD_WIP */
4241
4242         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4243                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4244
4245         i915_gem_l3_remap(dev);
4246
4247         i915_gem_init_swizzling(dev);
4248
4249         ret = intel_init_render_ring_buffer(dev);
4250         if (ret)
4251                 return ret;
4252
4253         if (HAS_BSD(dev)) {
4254                 ret = intel_init_bsd_ring_buffer(dev);
4255                 if (ret)
4256                         goto cleanup_render_ring;
4257         }
4258
4259         if (intel_enable_blt(dev)) {
4260                 ret = intel_init_blt_ring_buffer(dev);
4261                 if (ret)
4262                         goto cleanup_bsd_ring;
4263         }
4264
4265         dev_priv->next_seqno = 1;
4266
4267         /*
4268          * XXX: There was some w/a described somewhere suggesting loading
4269          * contexts before PPGTT.
4270          */
4271         i915_gem_context_init(dev);
4272         i915_gem_init_ppgtt(dev);
4273
4274         return 0;
4275
4276 cleanup_bsd_ring:
4277         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4278 cleanup_render_ring:
4279         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4280         return ret;
4281 }
4282
4283 static bool
4284 intel_enable_ppgtt(struct drm_device *dev)
4285 {
4286         if (i915_enable_ppgtt >= 0)
4287                 return i915_enable_ppgtt;
4288
4289 #ifdef CONFIG_INTEL_IOMMU
4290         /* Disable ppgtt on SNB if VT-d is on. */
4291         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
4292                 return false;
4293 #endif
4294
4295         return true;
4296 }
4297
4298 int i915_gem_init(struct drm_device *dev)
4299 {
4300         struct drm_i915_private *dev_priv = dev->dev_private;
4301         unsigned long gtt_size, mappable_size;
4302         int ret;
4303
4304         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
4305         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
4306
4307         DRM_LOCK(dev);
4308         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
4309                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
4310                  * aperture accordingly when using aliasing ppgtt. */
4311                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
4312
4313                 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
4314
4315                 ret = i915_gem_init_aliasing_ppgtt(dev);
4316                 if (ret) {
4317                         DRM_UNLOCK(dev);
4318                         return ret;
4319                 }
4320         } else {
4321                 /* Let GEM Manage all of the aperture.
4322                  *
4323                  * However, leave one page at the end still bound to the scratch
4324                  * page.  There are a number of places where the hardware
4325                  * apparently prefetches past the end of the object, and we've
4326                  * seen multiple hangs with the GPU head pointer stuck in a
4327                  * batchbuffer bound at the last page of the aperture.  One page
4328                  * should be enough to keep any prefetching inside of the
4329                  * aperture.
4330                  */
4331                 i915_gem_init_global_gtt(dev, 0, mappable_size,
4332                                          gtt_size);
4333         }
4334
4335         ret = i915_gem_init_hw(dev);
4336         DRM_UNLOCK(dev);
4337         if (ret) {
4338                 i915_gem_cleanup_aliasing_ppgtt(dev);
4339                 return ret;
4340         }
4341
4342         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4343         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4344                 dev_priv->dri1.allow_batchbuffer = 1;
4345         return 0;
4346 }
4347
4348 void
4349 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4350 {
4351         drm_i915_private_t *dev_priv = dev->dev_private;
4352         struct intel_ring_buffer *ring;
4353         int i;
4354
4355         for_each_ring(ring, dev_priv, i)
4356                 intel_cleanup_ring_buffer(ring);
4357 }
4358
4359 int
4360 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4361                        struct drm_file *file_priv)
4362 {
4363         drm_i915_private_t *dev_priv = dev->dev_private;
4364         int ret;
4365
4366         if (drm_core_check_feature(dev, DRIVER_MODESET))
4367                 return 0;
4368
4369         if (atomic_read(&dev_priv->mm.wedged)) {
4370                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4371                 atomic_set(&dev_priv->mm.wedged, 0);
4372         }
4373
4374         DRM_LOCK(dev);
4375         dev_priv->mm.suspended = 0;
4376
4377         ret = i915_gem_init_hw(dev);
4378         if (ret != 0) {
4379                 DRM_UNLOCK(dev);
4380                 return ret;
4381         }
4382
4383         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4384         DRM_UNLOCK(dev);
4385
4386         ret = drm_irq_install(dev);
4387         if (ret)
4388                 goto cleanup_ringbuffer;
4389
4390         return 0;
4391
4392 cleanup_ringbuffer:
4393         DRM_LOCK(dev);
4394         i915_gem_cleanup_ringbuffer(dev);
4395         dev_priv->mm.suspended = 1;
4396         DRM_UNLOCK(dev);
4397
4398         return ret;
4399 }
4400
4401 int
4402 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4403                        struct drm_file *file_priv)
4404 {
4405         if (drm_core_check_feature(dev, DRIVER_MODESET))
4406                 return 0;
4407
4408         drm_irq_uninstall(dev);
4409         return i915_gem_idle(dev);
4410 }
4411
4412 void
4413 i915_gem_lastclose(struct drm_device *dev)
4414 {
4415         int ret;
4416
4417         if (drm_core_check_feature(dev, DRIVER_MODESET))
4418                 return;
4419
4420         ret = i915_gem_idle(dev);
4421         if (ret)
4422                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4423 }
4424
4425 static void
4426 init_ring_lists(struct intel_ring_buffer *ring)
4427 {
4428         INIT_LIST_HEAD(&ring->active_list);
4429         INIT_LIST_HEAD(&ring->request_list);
4430 }
4431
4432 void
4433 i915_gem_load(struct drm_device *dev)
4434 {
4435         int i;
4436         drm_i915_private_t *dev_priv = dev->dev_private;
4437
4438         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4439         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4440         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4441         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4442         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4443         for (i = 0; i < I915_NUM_RINGS; i++)
4444                 init_ring_lists(&dev_priv->ring[i]);
4445         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4446                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4447         TIMEOUT_TASK_INIT(dev_priv->wq, &dev_priv->mm.retire_work, 0,
4448             i915_gem_retire_work_handler, dev_priv);
4449         init_completion(&dev_priv->error_completion);
4450
4451         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4452         if (IS_GEN3(dev)) {
4453                 I915_WRITE(MI_ARB_STATE,
4454                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4455         }
4456
4457         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4458
4459         /* Old X drivers will take 0-2 for front, back, depth buffers */
4460         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4461                 dev_priv->fence_reg_start = 3;
4462
4463         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4464                 dev_priv->num_fence_regs = 16;
4465         else
4466                 dev_priv->num_fence_regs = 8;
4467
4468         /* Initialize fence registers to zero */
4469         i915_gem_reset_fences(dev);
4470
4471         i915_gem_detect_bit_6_swizzle(dev);
4472         DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue);
4473
4474         dev_priv->mm.interruptible = true;
4475
4476         dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4477             i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
4478 }
4479
4480 /*
4481  * Create a physically contiguous memory object for this object
4482  * e.g. for cursor + overlay regs
4483  */
4484 static int i915_gem_init_phys_object(struct drm_device *dev,
4485                                      int id, int size, int align)
4486 {
4487         drm_i915_private_t *dev_priv = dev->dev_private;
4488         struct drm_i915_gem_phys_object *phys_obj;
4489         int ret;
4490
4491         if (dev_priv->mm.phys_objs[id - 1] || !size)
4492                 return 0;
4493
4494         phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object),
4495             DRM_I915_GEM, M_WAITOK | M_ZERO);
4496         if (!phys_obj)
4497                 return -ENOMEM;
4498
4499         phys_obj->id = id;
4500
4501         phys_obj->handle = drm_pci_alloc(dev, size, align, BUS_SPACE_MAXADDR);
4502         if (!phys_obj->handle) {
4503                 ret = -ENOMEM;
4504                 goto kfree_obj;
4505         }
4506 #ifdef CONFIG_X86
4507         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4508             size / PAGE_SIZE, PAT_WRITE_COMBINING);
4509 #endif
4510
4511         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4512
4513         return 0;
4514 kfree_obj:
4515         free(phys_obj, DRM_I915_GEM);
4516         return ret;
4517 }
4518
4519 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4520 {
4521         drm_i915_private_t *dev_priv = dev->dev_private;
4522         struct drm_i915_gem_phys_object *phys_obj;
4523
4524         if (!dev_priv->mm.phys_objs[id - 1])
4525                 return;
4526
4527         phys_obj = dev_priv->mm.phys_objs[id - 1];
4528         if (phys_obj->cur_obj) {
4529                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4530         }
4531
4532 #ifdef FREEBSD_WIP
4533 #ifdef CONFIG_X86
4534         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4535 #endif
4536 #endif /* FREEBSD_WIP */
4537
4538         drm_pci_free(dev, phys_obj->handle);
4539         free(phys_obj, DRM_I915_GEM);
4540         dev_priv->mm.phys_objs[id - 1] = NULL;
4541 }
4542
4543 void i915_gem_free_all_phys_object(struct drm_device *dev)
4544 {
4545         int i;
4546
4547         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4548                 i915_gem_free_phys_object(dev, i);
4549 }
4550
4551 void i915_gem_detach_phys_object(struct drm_device *dev,
4552                                  struct drm_i915_gem_object *obj)
4553 {
4554         struct sf_buf *sf;
4555         char *vaddr;
4556         char *dst;
4557         int i;
4558         int page_count;
4559
4560         if (!obj->phys_obj)
4561                 return;
4562         vaddr = obj->phys_obj->handle->vaddr;
4563
4564         page_count = obj->base.size / PAGE_SIZE;
4565         VM_OBJECT_WLOCK(obj->base.vm_obj);
4566         for (i = 0; i < page_count; i++) {
4567                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4568                 if (page == NULL)
4569                         continue; /* XXX */
4570
4571                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4572                 sf = sf_buf_alloc(page, 0);
4573                 if (sf != NULL) {
4574                         dst = (char *)sf_buf_kva(sf);
4575                         memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
4576                         sf_buf_free(sf);
4577                 }
4578                 drm_clflush_pages(&page, 1);
4579
4580                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4581                 vm_page_reference(page);
4582                 vm_page_lock(page);
4583                 vm_page_dirty(page);
4584                 vm_page_unwire(page, PQ_INACTIVE);
4585                 vm_page_unlock(page);
4586                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4587         }
4588         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4589         i915_gem_chipset_flush(dev);
4590
4591         obj->phys_obj->cur_obj = NULL;
4592         obj->phys_obj = NULL;
4593 }
4594
4595 int
4596 i915_gem_attach_phys_object(struct drm_device *dev,
4597                             struct drm_i915_gem_object *obj,
4598                             int id,
4599                             int align)
4600 {
4601         drm_i915_private_t *dev_priv = dev->dev_private;
4602         struct sf_buf *sf;
4603         char *dst, *src;
4604         int ret = 0;
4605         int page_count;
4606         int i;
4607
4608         if (id > I915_MAX_PHYS_OBJECT)
4609                 return -EINVAL;
4610
4611         if (obj->phys_obj) {
4612                 if (obj->phys_obj->id == id)
4613                         return 0;
4614                 i915_gem_detach_phys_object(dev, obj);
4615         }
4616
4617         /* create a new object */
4618         if (!dev_priv->mm.phys_objs[id - 1]) {
4619                 ret = i915_gem_init_phys_object(dev, id,
4620                                                 obj->base.size, align);
4621                 if (ret) {
4622                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4623                                   id, obj->base.size);
4624                         return ret;
4625                 }
4626         }
4627
4628         /* bind to the object */
4629         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4630         obj->phys_obj->cur_obj = obj;
4631
4632         page_count = obj->base.size / PAGE_SIZE;
4633
4634         VM_OBJECT_WLOCK(obj->base.vm_obj);
4635         for (i = 0; i < page_count; i++) {
4636                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4637                 if (page == NULL) {
4638                         ret = -EIO;
4639                         break;
4640                 }
4641                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4642                 sf = sf_buf_alloc(page, 0);
4643                 src = (char *)sf_buf_kva(sf);
4644                 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
4645                 memcpy(dst, src, PAGE_SIZE);
4646                 sf_buf_free(sf);
4647
4648                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4649
4650                 vm_page_reference(page);
4651                 vm_page_lock(page);
4652                 vm_page_unwire(page, PQ_INACTIVE);
4653                 vm_page_unlock(page);
4654                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4655         }
4656         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4657
4658         return ret;
4659 }
4660
4661 static int
4662 i915_gem_phys_pwrite(struct drm_device *dev,
4663                      struct drm_i915_gem_object *obj,
4664                      struct drm_i915_gem_pwrite *args,
4665                      struct drm_file *file_priv)
4666 {
4667         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4668         char __user *user_data = to_user_ptr(args->data_ptr);
4669
4670         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4671                 unsigned long unwritten;
4672
4673                 /* The physical object once assigned is fixed for the lifetime
4674                  * of the obj, so we can safely drop the lock and continue
4675                  * to access vaddr.
4676                  */
4677                 DRM_UNLOCK(dev);
4678                 unwritten = copy_from_user(vaddr, user_data, args->size);
4679                 DRM_LOCK(dev);
4680                 if (unwritten)
4681                         return -EFAULT;
4682         }
4683
4684         i915_gem_chipset_flush(dev);
4685         return 0;
4686 }
4687
4688 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4689 {
4690         struct drm_i915_file_private *file_priv = file->driver_priv;
4691
4692         /* Clean up our request list when the client is going away, so that
4693          * later retire_requests won't dereference our soon-to-be-gone
4694          * file_priv.
4695          */
4696         mtx_lock(&file_priv->mm.lock);
4697         while (!list_empty(&file_priv->mm.request_list)) {
4698                 struct drm_i915_gem_request *request;
4699
4700                 request = list_first_entry(&file_priv->mm.request_list,
4701                                            struct drm_i915_gem_request,
4702                                            client_list);
4703                 list_del(&request->client_list);
4704                 request->file_priv = NULL;
4705         }
4706         mtx_unlock(&file_priv->mm.lock);
4707 }
4708
4709 static void
4710 i915_gem_inactive_shrink(void *arg)
4711 {
4712         struct drm_device *dev = arg;
4713         struct drm_i915_private *dev_priv = dev->dev_private;
4714         int pass1, pass2;
4715
4716         if (!sx_try_xlock(&dev->dev_struct_lock)) {
4717                 return;
4718         }
4719
4720         CTR0(KTR_DRM, "gem_lowmem");
4721
4722         pass1 = i915_gem_purge(dev_priv, -1);
4723         pass2 = __i915_gem_shrink(dev_priv, -1, false);
4724
4725         if (pass2 <= pass1 / 100)
4726                 i915_gem_shrink_all(dev_priv);
4727
4728         DRM_UNLOCK(dev);
4729 }
4730
4731 static vm_page_t
4732 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, bool *fresh)
4733 {
4734         vm_page_t page;
4735         int rv;
4736
4737         VM_OBJECT_ASSERT_WLOCKED(object);
4738         page = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
4739             VM_ALLOC_WIRED);
4740         if (page->valid != VM_PAGE_BITS_ALL) {
4741                 vm_page_xbusy(page);
4742                 if (vm_pager_has_page(object, pindex, NULL, NULL)) {
4743                         rv = vm_pager_get_pages(object, &page, 1, NULL, NULL);
4744                         if (rv != VM_PAGER_OK) {
4745                                 vm_page_lock(page);
4746                                 vm_page_unwire(page, PQ_NONE);
4747                                 vm_page_free(page);
4748                                 vm_page_unlock(page);
4749                                 return (NULL);
4750                         }
4751                         if (fresh != NULL)
4752                                 *fresh = true;
4753                 } else {
4754                         pmap_zero_page(page);
4755                         page->valid = VM_PAGE_BITS_ALL;
4756                         page->dirty = 0;
4757                         if (fresh != NULL)
4758                                 *fresh = false;
4759                 }
4760                 vm_page_xunbusy(page);
4761         } else if (fresh != NULL)
4762                 *fresh = false;
4763         atomic_add_long(&i915_gem_wired_pages_cnt, 1);
4764         return (page);
4765 }