]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/i915/i915_gem.c
Revert and redo r306083.
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
56
57 #include <dev/drm2/drmP.h>
58 #include <dev/drm2/i915/i915_drm.h>
59 #include <dev/drm2/i915/i915_drv.h>
60 #include <dev/drm2/i915/intel_drv.h>
61
62 #include <sys/resourcevar.h>
63 #include <sys/sched.h>
64 #include <sys/sf_buf.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_pageout.h>
68
69 #include <machine/md_var.h>
70
71 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
72 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
73 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
74                                                     unsigned alignment,
75                                                     bool map_and_fenceable,
76                                                     bool nonblocking);
77 static int i915_gem_phys_pwrite(struct drm_device *dev,
78                                 struct drm_i915_gem_object *obj,
79                                 struct drm_i915_gem_pwrite *args,
80                                 struct drm_file *file);
81
82 static void i915_gem_write_fence(struct drm_device *dev, int reg,
83                                  struct drm_i915_gem_object *obj);
84 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
85                                          struct drm_i915_fence_reg *fence,
86                                          bool enable);
87
88 static void i915_gem_inactive_shrink(void *);
89 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
90 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
91 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
92
93 static int i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
94     off_t start, off_t end);
95
96 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex,
97     bool *fresh);
98
99 MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
100 long i915_gem_wired_pages_cnt;
101
102 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
103 {
104         if (obj->tiling_mode)
105                 i915_gem_release_mmap(obj);
106
107         /* As we do not have an associated fence register, we will force
108          * a tiling change if we ever need to acquire one.
109          */
110         obj->fence_dirty = false;
111         obj->fence_reg = I915_FENCE_REG_NONE;
112 }
113
114 /* some bookkeeping */
115 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
116                                   size_t size)
117 {
118         dev_priv->mm.object_count++;
119         dev_priv->mm.object_memory += size;
120 }
121
122 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
123                                      size_t size)
124 {
125         dev_priv->mm.object_count--;
126         dev_priv->mm.object_memory -= size;
127 }
128
129 static int
130 i915_gem_wait_for_error(struct drm_device *dev)
131 {
132         struct drm_i915_private *dev_priv = dev->dev_private;
133         struct completion *x = &dev_priv->error_completion;
134         int ret;
135
136         if (!atomic_read(&dev_priv->mm.wedged))
137                 return 0;
138
139         /*
140          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
141          * userspace. If it takes that long something really bad is going on and
142          * we should simply try to bail out and fail as gracefully as possible.
143          */
144         ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
145         if (ret == 0) {
146                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
147                 return -EIO;
148         } else if (ret < 0) {
149                 return ret;
150         }
151
152         if (atomic_read(&dev_priv->mm.wedged)) {
153                 /* GPU is hung, bump the completion count to account for
154                  * the token we just consumed so that we never hit zero and
155                  * end up waiting upon a subsequent completion event that
156                  * will never happen.
157                  */
158                 mtx_lock(&x->lock);
159                 x->done++;
160                 mtx_unlock(&x->lock);
161         }
162         return 0;
163 }
164
165 int i915_mutex_lock_interruptible(struct drm_device *dev)
166 {
167         int ret;
168
169         ret = i915_gem_wait_for_error(dev);
170         if (ret)
171                 return ret;
172
173         /*
174          * interruptible shall it be. might indeed be if dev_lock is
175          * changed to sx
176          */
177         ret = sx_xlock_sig(&dev->dev_struct_lock);
178         if (ret)
179                 return -EINTR;
180
181         WARN_ON(i915_verify_lists(dev));
182         return 0;
183 }
184
185 static inline bool
186 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
187 {
188         return obj->gtt_space && !obj->active;
189 }
190
191 int
192 i915_gem_init_ioctl(struct drm_device *dev, void *data,
193                     struct drm_file *file)
194 {
195         struct drm_i915_gem_init *args = data;
196
197         if (drm_core_check_feature(dev, DRIVER_MODESET))
198                 return -ENODEV;
199
200         if (args->gtt_start >= args->gtt_end ||
201             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
202                 return -EINVAL;
203
204         /* GEM with user mode setting was never supported on ilk and later. */
205         if (INTEL_INFO(dev)->gen >= 5)
206                 return -ENODEV;
207
208         /*
209          * XXXKIB. The second-time initialization should be guarded
210          * against.
211          */
212         DRM_LOCK(dev);
213         i915_gem_init_global_gtt(dev, args->gtt_start,
214                                  args->gtt_end, args->gtt_end);
215         DRM_UNLOCK(dev);
216
217         return 0;
218 }
219
220 int
221 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
222                             struct drm_file *file)
223 {
224         struct drm_i915_private *dev_priv = dev->dev_private;
225         struct drm_i915_gem_get_aperture *args = data;
226         struct drm_i915_gem_object *obj;
227         size_t pinned;
228
229         pinned = 0;
230         DRM_LOCK(dev);
231         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
232                 if (obj->pin_count)
233                         pinned += obj->gtt_space->size;
234         DRM_UNLOCK(dev);
235
236         args->aper_size = dev_priv->mm.gtt_total;
237         args->aper_available_size = args->aper_size - pinned;
238
239         return 0;
240 }
241
242 static int
243 i915_gem_create(struct drm_file *file,
244                 struct drm_device *dev,
245                 uint64_t size,
246                 uint32_t *handle_p)
247 {
248         struct drm_i915_gem_object *obj;
249         int ret;
250         u32 handle;
251
252         size = roundup(size, PAGE_SIZE);
253         if (size == 0)
254                 return -EINVAL;
255
256         /* Allocate the new object */
257         obj = i915_gem_alloc_object(dev, size);
258         if (obj == NULL)
259                 return -ENOMEM;
260
261         ret = drm_gem_handle_create(file, &obj->base, &handle);
262         if (ret) {
263                 drm_gem_object_release(&obj->base);
264                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
265                 free(obj, DRM_I915_GEM);
266                 return ret;
267         }
268
269         /* drop reference from allocate - handle holds it now */
270         drm_gem_object_unreference(&obj->base);
271         CTR2(KTR_DRM, "object_create %p %x", obj, size);
272
273         *handle_p = handle;
274         return 0;
275 }
276
277 int
278 i915_gem_dumb_create(struct drm_file *file,
279                      struct drm_device *dev,
280                      struct drm_mode_create_dumb *args)
281 {
282         /* have to work out size/pitch and return them */
283         args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
284         args->size = args->pitch * args->height;
285         return i915_gem_create(file, dev,
286                                args->size, &args->handle);
287 }
288
289 int i915_gem_dumb_destroy(struct drm_file *file,
290                           struct drm_device *dev,
291                           uint32_t handle)
292 {
293         return drm_gem_handle_delete(file, handle);
294 }
295
296 /**
297  * Creates a new mm object and returns a handle to it.
298  */
299 int
300 i915_gem_create_ioctl(struct drm_device *dev, void *data,
301                       struct drm_file *file)
302 {
303         struct drm_i915_gem_create *args = data;
304
305         return i915_gem_create(file, dev,
306                                args->size, &args->handle);
307 }
308
309 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
310 {
311         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
312
313         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
314                 obj->tiling_mode != I915_TILING_NONE;
315 }
316
317 static inline int
318 __copy_to_user_swizzled(char __user *cpu_vaddr,
319                         const char *gpu_vaddr, int gpu_offset,
320                         int length)
321 {
322         int ret, cpu_offset = 0;
323
324         while (length > 0) {
325                 int cacheline_end = roundup2(gpu_offset + 1, 64);
326                 int this_length = min(cacheline_end - gpu_offset, length);
327                 int swizzled_gpu_offset = gpu_offset ^ 64;
328
329                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
330                                      gpu_vaddr + swizzled_gpu_offset,
331                                      this_length);
332                 if (ret)
333                         return ret + length;
334
335                 cpu_offset += this_length;
336                 gpu_offset += this_length;
337                 length -= this_length;
338         }
339
340         return 0;
341 }
342
343 static inline int
344 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
345                           const char __user *cpu_vaddr,
346                           int length)
347 {
348         int ret, cpu_offset = 0;
349
350         while (length > 0) {
351                 int cacheline_end = roundup2(gpu_offset + 1, 64);
352                 int this_length = min(cacheline_end - gpu_offset, length);
353                 int swizzled_gpu_offset = gpu_offset ^ 64;
354
355                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
356                                        cpu_vaddr + cpu_offset,
357                                        this_length);
358                 if (ret)
359                         return ret + length;
360
361                 cpu_offset += this_length;
362                 gpu_offset += this_length;
363                 length -= this_length;
364         }
365
366         return 0;
367 }
368
369 /* Per-page copy function for the shmem pread fastpath.
370  * Flushes invalid cachelines before reading the target if
371  * needs_clflush is set. */
372 static int
373 shmem_pread_fast(vm_page_t page, int shmem_page_offset, int page_length,
374                  char __user *user_data,
375                  bool page_do_bit17_swizzling, bool needs_clflush)
376 {
377         char *vaddr;
378         struct sf_buf *sf;
379         int ret;
380
381         if (unlikely(page_do_bit17_swizzling))
382                 return -EINVAL;
383
384         sched_pin();
385         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
386         if (sf == NULL) {
387                 sched_unpin();
388                 return (-EFAULT);
389         }
390         vaddr = (char *)sf_buf_kva(sf);
391         if (needs_clflush)
392                 drm_clflush_virt_range(vaddr + shmem_page_offset,
393                                        page_length);
394         ret = __copy_to_user_inatomic(user_data,
395                                       vaddr + shmem_page_offset,
396                                       page_length);
397         sf_buf_free(sf);
398         sched_unpin();
399
400         return ret ? -EFAULT : 0;
401 }
402
403 static void
404 shmem_clflush_swizzled_range(char *addr, unsigned long length,
405                              bool swizzled)
406 {
407         if (unlikely(swizzled)) {
408                 unsigned long start = (unsigned long) addr;
409                 unsigned long end = (unsigned long) addr + length;
410
411                 /* For swizzling simply ensure that we always flush both
412                  * channels. Lame, but simple and it works. Swizzled
413                  * pwrite/pread is far from a hotpath - current userspace
414                  * doesn't use it at all. */
415                 start = round_down(start, 128);
416                 end = round_up(end, 128);
417
418                 drm_clflush_virt_range((void *)start, end - start);
419         } else {
420                 drm_clflush_virt_range(addr, length);
421         }
422
423 }
424
425 /* Only difference to the fast-path function is that this can handle bit17
426  * and uses non-atomic copy and kmap functions. */
427 static int
428 shmem_pread_slow(vm_page_t page, int shmem_page_offset, int page_length,
429                  char __user *user_data,
430                  bool page_do_bit17_swizzling, bool needs_clflush)
431 {
432         char *vaddr;
433         struct sf_buf *sf;
434         int ret;
435
436         sf = sf_buf_alloc(page, 0);
437         vaddr = (char *)sf_buf_kva(sf);
438         if (needs_clflush)
439                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
440                                              page_length,
441                                              page_do_bit17_swizzling);
442
443         if (page_do_bit17_swizzling)
444                 ret = __copy_to_user_swizzled(user_data,
445                                               vaddr, shmem_page_offset,
446                                               page_length);
447         else
448                 ret = __copy_to_user(user_data,
449                                      vaddr + shmem_page_offset,
450                                      page_length);
451         sf_buf_free(sf);
452
453         return ret ? - EFAULT : 0;
454 }
455
456 static int
457 i915_gem_shmem_pread(struct drm_device *dev,
458                      struct drm_i915_gem_object *obj,
459                      struct drm_i915_gem_pread *args,
460                      struct drm_file *file)
461 {
462         char __user *user_data;
463         ssize_t remain;
464         off_t offset;
465         int shmem_page_offset, page_length, ret = 0;
466         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
467         int hit_slowpath = 0;
468         int prefaulted = 0;
469         int needs_clflush = 0;
470
471         user_data = to_user_ptr(args->data_ptr);
472         remain = args->size;
473
474         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
475
476         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
477                 /* If we're not in the cpu read domain, set ourself into the gtt
478                  * read domain and manually flush cachelines (if required). This
479                  * optimizes for the case when the gpu will dirty the data
480                  * anyway again before the next pread happens. */
481                 if (obj->cache_level == I915_CACHE_NONE)
482                         needs_clflush = 1;
483                 if (obj->gtt_space) {
484                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
485                         if (ret)
486                                 return ret;
487                 }
488         }
489
490         ret = i915_gem_object_get_pages(obj);
491         if (ret)
492                 return ret;
493
494         i915_gem_object_pin_pages(obj);
495
496         offset = args->offset;
497
498         VM_OBJECT_WLOCK(obj->base.vm_obj);
499         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
500             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
501                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
502
503                 if (remain <= 0)
504                         break;
505
506                 /* Operation in this page
507                  *
508                  * shmem_page_offset = offset within page in shmem file
509                  * page_length = bytes to copy for this page
510                  */
511                 shmem_page_offset = offset_in_page(offset);
512                 page_length = remain;
513                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
514                         page_length = PAGE_SIZE - shmem_page_offset;
515
516                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
517                         (page_to_phys(page) & (1 << 17)) != 0;
518
519                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
520                                        user_data, page_do_bit17_swizzling,
521                                        needs_clflush);
522                 if (ret == 0)
523                         goto next_page;
524
525                 hit_slowpath = 1;
526                 DRM_UNLOCK(dev);
527
528                 if (!prefaulted) {
529                         ret = fault_in_multipages_writeable(user_data, remain);
530                         /* Userspace is tricking us, but we've already clobbered
531                          * its pages with the prefault and promised to write the
532                          * data up to the first fault. Hence ignore any errors
533                          * and just continue. */
534                         (void)ret;
535                         prefaulted = 1;
536                 }
537
538                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
539                                        user_data, page_do_bit17_swizzling,
540                                        needs_clflush);
541
542                 DRM_LOCK(dev);
543
544 next_page:
545                 vm_page_reference(page);
546
547                 if (ret)
548                         goto out;
549
550                 remain -= page_length;
551                 user_data += page_length;
552                 offset += page_length;
553                 VM_OBJECT_WLOCK(obj->base.vm_obj);
554         }
555
556 out:
557         i915_gem_object_unpin_pages(obj);
558
559         if (hit_slowpath) {
560                 /* Fixup: Kill any reinstated backing storage pages */
561                 if (obj->madv == __I915_MADV_PURGED)
562                         i915_gem_object_truncate(obj);
563         }
564
565         return ret;
566 }
567
568 /**
569  * Reads data from the object referenced by handle.
570  *
571  * On error, the contents of *data are undefined.
572  */
573 int
574 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
575                      struct drm_file *file)
576 {
577         struct drm_i915_gem_pread *args = data;
578         struct drm_i915_gem_object *obj;
579         int ret = 0;
580
581         if (args->size == 0)
582                 return 0;
583
584         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_WRITE))
585                 return -EFAULT;
586
587         ret = i915_mutex_lock_interruptible(dev);
588         if (ret)
589                 return ret;
590
591         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
592         if (&obj->base == NULL) {
593                 ret = -ENOENT;
594                 goto unlock;
595         }
596
597         /* Bounds check source.  */
598         if (args->offset > obj->base.size ||
599             args->size > obj->base.size - args->offset) {
600                 ret = -EINVAL;
601                 goto out;
602         }
603
604 #ifdef FREEBSD_WIP
605         /* prime objects have no backing filp to GEM pread/pwrite
606          * pages from.
607          */
608         if (!obj->base.filp) {
609                 ret = -EINVAL;
610                 goto out;
611         }
612 #endif /* FREEBSD_WIP */
613
614         CTR3(KTR_DRM, "pread %p %jx %jx", obj, args->offset, args->size);
615
616         ret = i915_gem_shmem_pread(dev, obj, args, file);
617
618 out:
619         drm_gem_object_unreference(&obj->base);
620 unlock:
621         DRM_UNLOCK(dev);
622         return ret;
623 }
624
625 /* This is the fast write path which cannot handle
626  * page faults in the source data
627  */
628
629 static inline int
630 fast_user_write(vm_paddr_t mapping_addr,
631                 off_t page_base, int page_offset,
632                 char __user *user_data,
633                 int length)
634 {
635         void __iomem *vaddr_atomic;
636         void *vaddr;
637         unsigned long unwritten;
638
639         vaddr_atomic = pmap_mapdev_attr(mapping_addr + page_base,
640             length, PAT_WRITE_COMBINING);
641         /* We can use the cpu mem copy function because this is X86. */
642         vaddr = (char __force*)vaddr_atomic + page_offset;
643         unwritten = __copy_from_user_inatomic_nocache(vaddr,
644                                                       user_data, length);
645         pmap_unmapdev((vm_offset_t)vaddr_atomic, length);
646         return unwritten;
647 }
648
649 /**
650  * This is the fast pwrite path, where we copy the data directly from the
651  * user into the GTT, uncached.
652  */
653 static int
654 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
655                          struct drm_i915_gem_object *obj,
656                          struct drm_i915_gem_pwrite *args,
657                          struct drm_file *file)
658 {
659         drm_i915_private_t *dev_priv = dev->dev_private;
660         ssize_t remain;
661         off_t offset, page_base;
662         char __user *user_data;
663         int page_offset, page_length, ret;
664
665         ret = i915_gem_object_pin(obj, 0, true, true);
666         if (ret)
667                 goto out;
668
669         ret = i915_gem_object_set_to_gtt_domain(obj, true);
670         if (ret)
671                 goto out_unpin;
672
673         ret = i915_gem_object_put_fence(obj);
674         if (ret)
675                 goto out_unpin;
676
677         user_data = to_user_ptr(args->data_ptr);
678         remain = args->size;
679
680         offset = obj->gtt_offset + args->offset;
681
682         while (remain > 0) {
683                 /* Operation in this page
684                  *
685                  * page_base = page offset within aperture
686                  * page_offset = offset within page
687                  * page_length = bytes to copy for this page
688                  */
689                 page_base = offset & ~PAGE_MASK;
690                 page_offset = offset_in_page(offset);
691                 page_length = remain;
692                 if ((page_offset + remain) > PAGE_SIZE)
693                         page_length = PAGE_SIZE - page_offset;
694
695                 /* If we get a fault while copying data, then (presumably) our
696                  * source page isn't available.  Return the error and we'll
697                  * retry in the slow path.
698                  */
699                 if (fast_user_write(dev_priv->mm.gtt_base_addr, page_base,
700                                     page_offset, user_data, page_length)) {
701                         ret = -EFAULT;
702                         goto out_unpin;
703                 }
704
705                 remain -= page_length;
706                 user_data += page_length;
707                 offset += page_length;
708         }
709
710 out_unpin:
711         i915_gem_object_unpin(obj);
712 out:
713         return ret;
714 }
715
716 /* Per-page copy function for the shmem pwrite fastpath.
717  * Flushes invalid cachelines before writing to the target if
718  * needs_clflush_before is set and flushes out any written cachelines after
719  * writing if needs_clflush is set. */
720 static int
721 shmem_pwrite_fast(vm_page_t page, int shmem_page_offset, int page_length,
722                   char __user *user_data,
723                   bool page_do_bit17_swizzling,
724                   bool needs_clflush_before,
725                   bool needs_clflush_after)
726 {
727         char *vaddr;
728         struct sf_buf *sf;
729         int ret;
730
731         if (unlikely(page_do_bit17_swizzling))
732                 return -EINVAL;
733
734         sched_pin();
735         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
736         if (sf == NULL) {
737                 sched_unpin();
738                 return (-EFAULT);
739         }
740         vaddr = (char *)sf_buf_kva(sf);
741         if (needs_clflush_before)
742                 drm_clflush_virt_range(vaddr + shmem_page_offset,
743                                        page_length);
744         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
745                                                 user_data,
746                                                 page_length);
747         if (needs_clflush_after)
748                 drm_clflush_virt_range(vaddr + shmem_page_offset,
749                                        page_length);
750         sf_buf_free(sf);
751         sched_unpin();
752
753         return ret ? -EFAULT : 0;
754 }
755
756 /* Only difference to the fast-path function is that this can handle bit17
757  * and uses non-atomic copy and kmap functions. */
758 static int
759 shmem_pwrite_slow(vm_page_t page, int shmem_page_offset, int page_length,
760                   char __user *user_data,
761                   bool page_do_bit17_swizzling,
762                   bool needs_clflush_before,
763                   bool needs_clflush_after)
764 {
765         char *vaddr;
766         struct sf_buf *sf;
767         int ret;
768
769         sf = sf_buf_alloc(page, 0);
770         vaddr = (char *)sf_buf_kva(sf);
771         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
772                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
773                                              page_length,
774                                              page_do_bit17_swizzling);
775         if (page_do_bit17_swizzling)
776                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
777                                                 user_data,
778                                                 page_length);
779         else
780                 ret = __copy_from_user(vaddr + shmem_page_offset,
781                                        user_data,
782                                        page_length);
783         if (needs_clflush_after)
784                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
785                                              page_length,
786                                              page_do_bit17_swizzling);
787         sf_buf_free(sf);
788
789         return ret ? -EFAULT : 0;
790 }
791
792 static int
793 i915_gem_shmem_pwrite(struct drm_device *dev,
794                       struct drm_i915_gem_object *obj,
795                       struct drm_i915_gem_pwrite *args,
796                       struct drm_file *file)
797 {
798         ssize_t remain;
799         off_t offset;
800         char __user *user_data;
801         int shmem_page_offset, page_length, ret = 0;
802         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
803         int hit_slowpath = 0;
804         int needs_clflush_after = 0;
805         int needs_clflush_before = 0;
806
807         user_data = to_user_ptr(args->data_ptr);
808         remain = args->size;
809
810         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
811
812         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
813                 /* If we're not in the cpu write domain, set ourself into the gtt
814                  * write domain and manually flush cachelines (if required). This
815                  * optimizes for the case when the gpu will use the data
816                  * right away and we therefore have to clflush anyway. */
817                 if (obj->cache_level == I915_CACHE_NONE)
818                         needs_clflush_after = 1;
819                 if (obj->gtt_space) {
820                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
821                         if (ret)
822                                 return ret;
823                 }
824         }
825         /* Same trick applies for invalidate partially written cachelines before
826          * writing.  */
827         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
828             && obj->cache_level == I915_CACHE_NONE)
829                 needs_clflush_before = 1;
830
831         ret = i915_gem_object_get_pages(obj);
832         if (ret)
833                 return ret;
834
835         i915_gem_object_pin_pages(obj);
836
837         offset = args->offset;
838         obj->dirty = 1;
839
840         VM_OBJECT_WLOCK(obj->base.vm_obj);
841         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
842             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
843                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
844                 int partial_cacheline_write;
845
846                 if (remain <= 0)
847                         break;
848
849                 /* Operation in this page
850                  *
851                  * shmem_page_offset = offset within page in shmem file
852                  * page_length = bytes to copy for this page
853                  */
854                 shmem_page_offset = offset_in_page(offset);
855
856                 page_length = remain;
857                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
858                         page_length = PAGE_SIZE - shmem_page_offset;
859
860                 /* If we don't overwrite a cacheline completely we need to be
861                  * careful to have up-to-date data by first clflushing. Don't
862                  * overcomplicate things and flush the entire patch. */
863                 partial_cacheline_write = needs_clflush_before &&
864                         ((shmem_page_offset | page_length)
865                                 & (cpu_clflush_line_size - 1));
866
867                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
868                         (page_to_phys(page) & (1 << 17)) != 0;
869
870                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
871                                         user_data, page_do_bit17_swizzling,
872                                         partial_cacheline_write,
873                                         needs_clflush_after);
874                 if (ret == 0)
875                         goto next_page;
876
877                 hit_slowpath = 1;
878                 DRM_UNLOCK(dev);
879                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
880                                         user_data, page_do_bit17_swizzling,
881                                         partial_cacheline_write,
882                                         needs_clflush_after);
883
884                 DRM_LOCK(dev);
885
886 next_page:
887                 vm_page_dirty(page);
888                 vm_page_reference(page);
889
890                 if (ret)
891                         goto out;
892
893                 remain -= page_length;
894                 user_data += page_length;
895                 offset += page_length;
896                 VM_OBJECT_WLOCK(obj->base.vm_obj);
897         }
898
899 out:
900         i915_gem_object_unpin_pages(obj);
901
902         if (hit_slowpath) {
903                 /* Fixup: Kill any reinstated backing storage pages */
904                 if (obj->madv == __I915_MADV_PURGED)
905                         i915_gem_object_truncate(obj);
906                 /* and flush dirty cachelines in case the object isn't in the cpu write
907                  * domain anymore. */
908                 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
909                         i915_gem_clflush_object(obj);
910                         i915_gem_chipset_flush(dev);
911                 }
912         }
913
914         if (needs_clflush_after)
915                 i915_gem_chipset_flush(dev);
916
917         return ret;
918 }
919
920 /**
921  * Writes data to the object referenced by handle.
922  *
923  * On error, the contents of the buffer that were to be modified are undefined.
924  */
925 int
926 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
927                       struct drm_file *file)
928 {
929         struct drm_i915_gem_pwrite *args = data;
930         struct drm_i915_gem_object *obj;
931         int ret;
932
933         if (args->size == 0)
934                 return 0;
935
936         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_READ))
937                 return -EFAULT;
938
939         ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
940                                            args->size);
941         if (ret)
942                 return -EFAULT;
943
944         ret = i915_mutex_lock_interruptible(dev);
945         if (ret)
946                 return ret;
947
948         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
949         if (&obj->base == NULL) {
950                 ret = -ENOENT;
951                 goto unlock;
952         }
953
954         /* Bounds check destination. */
955         if (args->offset > obj->base.size ||
956             args->size > obj->base.size - args->offset) {
957                 ret = -EINVAL;
958                 goto out;
959         }
960
961 #ifdef FREEBSD_WIP
962         /* prime objects have no backing filp to GEM pread/pwrite
963          * pages from.
964          */
965         if (!obj->base.filp) {
966                 ret = -EINVAL;
967                 goto out;
968         }
969 #endif /* FREEBSD_WIP */
970
971         CTR3(KTR_DRM, "pwrite %p %jx %jx", obj, args->offset, args->size);
972
973         ret = -EFAULT;
974         /* We can only do the GTT pwrite on untiled buffers, as otherwise
975          * it would end up going through the fenced access, and we'll get
976          * different detiling behavior between reading and writing.
977          * pread/pwrite currently are reading and writing from the CPU
978          * perspective, requiring manual detiling by the client.
979          */
980         if (obj->phys_obj) {
981                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
982                 goto out;
983         }
984
985         if (obj->cache_level == I915_CACHE_NONE &&
986             obj->tiling_mode == I915_TILING_NONE &&
987             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
988                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
989                 /* Note that the gtt paths might fail with non-page-backed user
990                  * pointers (e.g. gtt mappings when moving data between
991                  * textures). Fallback to the shmem path in that case. */
992         }
993
994         if (ret == -EFAULT || ret == -ENOSPC)
995                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
996
997 out:
998         drm_gem_object_unreference(&obj->base);
999 unlock:
1000         DRM_UNLOCK(dev);
1001         return ret;
1002 }
1003
1004 int
1005 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1006                      bool interruptible)
1007 {
1008         if (atomic_read(&dev_priv->mm.wedged)) {
1009                 struct completion *x = &dev_priv->error_completion;
1010                 bool recovery_complete;
1011
1012                 /* Give the error handler a chance to run. */
1013                 mtx_lock(&x->lock);
1014                 recovery_complete = x->done > 0;
1015                 mtx_unlock(&x->lock);
1016
1017                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1018                  * -EIO unconditionally for these. */
1019                 if (!interruptible)
1020                         return -EIO;
1021
1022                 /* Recovery complete, but still wedged means reset failure. */
1023                 if (recovery_complete)
1024                         return -EIO;
1025
1026                 return -EAGAIN;
1027         }
1028
1029         return 0;
1030 }
1031
1032 /*
1033  * Compare seqno against outstanding lazy request. Emit a request if they are
1034  * equal.
1035  */
1036 static int
1037 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1038 {
1039         int ret;
1040
1041         DRM_LOCK_ASSERT(ring->dev);
1042
1043         ret = 0;
1044         if (seqno == ring->outstanding_lazy_request)
1045                 ret = i915_add_request(ring, NULL, NULL);
1046
1047         return ret;
1048 }
1049
1050 /**
1051  * __wait_seqno - wait until execution of seqno has finished
1052  * @ring: the ring expected to report seqno
1053  * @seqno: duh!
1054  * @interruptible: do an interruptible wait (normally yes)
1055  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1056  *
1057  * Returns 0 if the seqno was found within the alloted time. Else returns the
1058  * errno with remaining time filled in timeout argument.
1059  */
1060 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1061                         bool interruptible, struct timespec *timeout)
1062 {
1063         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1064         struct timespec before, now, wait_time={1,0};
1065         sbintime_t timeout_sbt;
1066         long end;
1067         bool wait_forever = true;
1068         int ret, flags;
1069
1070         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1071                 return 0;
1072
1073         CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
1074
1075         if (timeout != NULL) {
1076                 wait_time = *timeout;
1077                 wait_forever = false;
1078         }
1079
1080         timeout_sbt = tstosbt(wait_time);
1081
1082         if (WARN_ON(!ring->irq_get(ring)))
1083                 return -ENODEV;
1084
1085         /* Record current time in case interrupted by signal, or wedged * */
1086         getrawmonotonic(&before);
1087
1088 #define EXIT_COND \
1089         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1090         atomic_read(&dev_priv->mm.wedged))
1091         flags = interruptible ? PCATCH : 0;
1092         mtx_lock(&dev_priv->irq_lock);
1093         do {
1094                 if (EXIT_COND) {
1095                         end = 1;
1096                 } else {
1097                         ret = -msleep_sbt(&ring->irq_queue, &dev_priv->irq_lock, flags,
1098                             "915gwr", timeout_sbt, 0, 0);
1099
1100                         /*
1101                          * NOTE Linux<->FreeBSD: Convert msleep_sbt() return
1102                          * value to something close to wait_event*_timeout()
1103                          * functions used on Linux.
1104                          *
1105                          * >0 -> condition is true (end = time remaining)
1106                          * =0 -> sleep timed out
1107                          * <0 -> error (interrupted)
1108                          *
1109                          * We fake the remaining time by returning 1. We
1110                          * compute a proper value later.
1111                          */
1112                         if (EXIT_COND)
1113                                 /* We fake a remaining time of 1 tick. */
1114                                 end = 1;
1115                         else if (ret == -EINTR || ret == -ERESTART)
1116                                 /* Interrupted. */
1117                                 end = -ERESTARTSYS;
1118                         else
1119                                 /* Timeout. */
1120                                 end = 0;
1121                 }
1122
1123                 ret = i915_gem_check_wedge(dev_priv, interruptible);
1124                 if (ret)
1125                         end = ret;
1126         } while (end == 0 && wait_forever);
1127         mtx_unlock(&dev_priv->irq_lock);
1128
1129         getrawmonotonic(&now);
1130
1131         ring->irq_put(ring);
1132         CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, end);
1133 #undef EXIT_COND
1134
1135         if (timeout) {
1136                 timespecsub(&now, &before);
1137                 timespecsub(timeout, &now);
1138         }
1139
1140         switch (end) {
1141         case -EIO:
1142         case -EAGAIN: /* Wedged */
1143         case -ERESTARTSYS: /* Signal */
1144         case -ETIMEDOUT: /* Timeout */
1145                 return (int)end;
1146         case 0: /* Timeout */
1147                 return -ETIMEDOUT;
1148         default: /* Completed */
1149                 WARN_ON(end < 0); /* We're not aware of other errors */
1150                 return 0;
1151         }
1152 }
1153
1154 /**
1155  * Waits for a sequence number to be signaled, and cleans up the
1156  * request and object lists appropriately for that event.
1157  */
1158 int
1159 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1160 {
1161         struct drm_device *dev = ring->dev;
1162         struct drm_i915_private *dev_priv = dev->dev_private;
1163         bool interruptible = dev_priv->mm.interruptible;
1164         int ret;
1165
1166         DRM_LOCK_ASSERT(dev);
1167         BUG_ON(seqno == 0);
1168
1169         ret = i915_gem_check_wedge(dev_priv, interruptible);
1170         if (ret)
1171                 return ret;
1172
1173         ret = i915_gem_check_olr(ring, seqno);
1174         if (ret)
1175                 return ret;
1176
1177         return __wait_seqno(ring, seqno, interruptible, NULL);
1178 }
1179
1180 /**
1181  * Ensures that all rendering to the object has completed and the object is
1182  * safe to unbind from the GTT or access from the CPU.
1183  */
1184 static __must_check int
1185 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1186                                bool readonly)
1187 {
1188         struct intel_ring_buffer *ring = obj->ring;
1189         u32 seqno;
1190         int ret;
1191
1192         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1193         if (seqno == 0)
1194                 return 0;
1195
1196         ret = i915_wait_seqno(ring, seqno);
1197         if (ret)
1198                 return ret;
1199
1200         i915_gem_retire_requests_ring(ring);
1201
1202         /* Manually manage the write flush as we may have not yet
1203          * retired the buffer.
1204          */
1205         if (obj->last_write_seqno &&
1206             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1207                 obj->last_write_seqno = 0;
1208                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1209         }
1210
1211         return 0;
1212 }
1213
1214 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1215  * as the object state may change during this call.
1216  */
1217 static __must_check int
1218 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1219                                             bool readonly)
1220 {
1221         struct drm_device *dev = obj->base.dev;
1222         struct drm_i915_private *dev_priv = dev->dev_private;
1223         struct intel_ring_buffer *ring = obj->ring;
1224         u32 seqno;
1225         int ret;
1226
1227         DRM_LOCK_ASSERT(dev);
1228         BUG_ON(!dev_priv->mm.interruptible);
1229
1230         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1231         if (seqno == 0)
1232                 return 0;
1233
1234         ret = i915_gem_check_wedge(dev_priv, true);
1235         if (ret)
1236                 return ret;
1237
1238         ret = i915_gem_check_olr(ring, seqno);
1239         if (ret)
1240                 return ret;
1241
1242         DRM_UNLOCK(dev);
1243         ret = __wait_seqno(ring, seqno, true, NULL);
1244         DRM_LOCK(dev);
1245
1246         i915_gem_retire_requests_ring(ring);
1247
1248         /* Manually manage the write flush as we may have not yet
1249          * retired the buffer.
1250          */
1251         if (ret == 0 &&
1252             obj->last_write_seqno &&
1253             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1254                 obj->last_write_seqno = 0;
1255                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1256         }
1257
1258         return ret;
1259 }
1260
1261 /**
1262  * Called when user space prepares to use an object with the CPU, either
1263  * through the mmap ioctl's mapping or a GTT mapping.
1264  */
1265 int
1266 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1267                           struct drm_file *file)
1268 {
1269         struct drm_i915_gem_set_domain *args = data;
1270         struct drm_i915_gem_object *obj;
1271         uint32_t read_domains = args->read_domains;
1272         uint32_t write_domain = args->write_domain;
1273         int ret;
1274
1275         /* Only handle setting domains to types used by the CPU. */
1276         if (write_domain & I915_GEM_GPU_DOMAINS)
1277                 return -EINVAL;
1278
1279         if (read_domains & I915_GEM_GPU_DOMAINS)
1280                 return -EINVAL;
1281
1282         /* Having something in the write domain implies it's in the read
1283          * domain, and only that read domain.  Enforce that in the request.
1284          */
1285         if (write_domain != 0 && read_domains != write_domain)
1286                 return -EINVAL;
1287
1288         ret = i915_mutex_lock_interruptible(dev);
1289         if (ret)
1290                 return ret;
1291
1292         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1293         if (&obj->base == NULL) {
1294                 ret = -ENOENT;
1295                 goto unlock;
1296         }
1297
1298         /* Try to flush the object off the GPU without holding the lock.
1299          * We will repeat the flush holding the lock in the normal manner
1300          * to catch cases where we are gazumped.
1301          */
1302         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1303         if (ret)
1304                 goto unref;
1305
1306         if (read_domains & I915_GEM_DOMAIN_GTT) {
1307                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1308
1309                 /* Silently promote "you're not bound, there was nothing to do"
1310                  * to success, since the client was just asking us to
1311                  * make sure everything was done.
1312                  */
1313                 if (ret == -EINVAL)
1314                         ret = 0;
1315         } else {
1316                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1317         }
1318
1319 unref:
1320         drm_gem_object_unreference(&obj->base);
1321 unlock:
1322         DRM_UNLOCK(dev);
1323         return ret;
1324 }
1325
1326 /**
1327  * Called when user space has done writes to this buffer
1328  */
1329 int
1330 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1331                          struct drm_file *file)
1332 {
1333         struct drm_i915_gem_sw_finish *args = data;
1334         struct drm_i915_gem_object *obj;
1335         int ret = 0;
1336
1337         ret = i915_mutex_lock_interruptible(dev);
1338         if (ret)
1339                 return ret;
1340
1341         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1342         if (&obj->base == NULL) {
1343                 ret = -ENOENT;
1344                 goto unlock;
1345         }
1346
1347         /* Pinned buffers may be scanout, so flush the cache */
1348         if (obj->pin_count)
1349                 i915_gem_object_flush_cpu_write_domain(obj);
1350
1351         drm_gem_object_unreference(&obj->base);
1352 unlock:
1353         DRM_UNLOCK(dev);
1354         return ret;
1355 }
1356
1357 /**
1358  * Maps the contents of an object, returning the address it is mapped
1359  * into.
1360  *
1361  * While the mapping holds a reference on the contents of the object, it doesn't
1362  * imply a ref on the object itself.
1363  */
1364 int
1365 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1366                     struct drm_file *file)
1367 {
1368         struct drm_i915_gem_mmap *args = data;
1369         struct drm_gem_object *obj;
1370         struct proc *p;
1371         vm_map_t map;
1372         vm_offset_t addr;
1373         vm_size_t size;
1374         int error, rv;
1375
1376         obj = drm_gem_object_lookup(dev, file, args->handle);
1377         if (obj == NULL)
1378                 return -ENOENT;
1379
1380 #ifdef FREEBSD_WIP
1381         /* prime objects have no backing filp to GEM mmap
1382          * pages from.
1383          */
1384         if (!obj->filp) {
1385                 drm_gem_object_unreference_unlocked(obj);
1386                 return -EINVAL;
1387         }
1388 #endif /* FREEBSD_WIP */
1389
1390         error = 0;
1391         if (args->size == 0)
1392                 goto out;
1393         p = curproc;
1394         map = &p->p_vmspace->vm_map;
1395         size = round_page(args->size);
1396         PROC_LOCK(p);
1397         if (map->size + size > lim_cur_proc(p, RLIMIT_VMEM)) {
1398                 PROC_UNLOCK(p);
1399                 error = -ENOMEM;
1400                 goto out;
1401         }
1402         PROC_UNLOCK(p);
1403
1404         addr = 0;
1405         vm_object_reference(obj->vm_obj);
1406         rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
1407             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1408             VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
1409         if (rv != KERN_SUCCESS) {
1410                 vm_object_deallocate(obj->vm_obj);
1411                 error = -vm_mmap_to_errno(rv);
1412         } else {
1413                 args->addr_ptr = (uint64_t)addr;
1414         }
1415 out:
1416         drm_gem_object_unreference_unlocked(obj);
1417         return (error);
1418 }
1419
1420 static int
1421 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1422     vm_ooffset_t foff, struct ucred *cred, u_short *color)
1423 {
1424
1425         /*
1426          * NOTE Linux<->FreeBSD: drm_gem_mmap_single() takes care of
1427          * calling drm_gem_object_reference(). That's why we don't
1428          * do this here. i915_gem_pager_dtor(), below, will call
1429          * drm_gem_object_unreference().
1430          *
1431          * On Linux, drm_gem_vm_open() references the object because
1432          * it's called the mapping is copied. drm_gem_vm_open() is not
1433          * called when the mapping is created. So the possible sequences
1434          * are:
1435          *     1. drm_gem_mmap():     ref++
1436          *     2. drm_gem_vm_close(): ref--
1437          *
1438          *     1. drm_gem_mmap():     ref++
1439          *     2. drm_gem_vm_open():  ref++ (for the copied vma)
1440          *     3. drm_gem_vm_close(): ref-- (for the copied vma)
1441          *     4. drm_gem_vm_close(): ref-- (for the initial vma)
1442          *
1443          * On FreeBSD, i915_gem_pager_ctor() is called once during the
1444          * creation of the mapping. No callback is called when the
1445          * mapping is shared during a fork(). i915_gem_pager_dtor() is
1446          * called when the last reference to the mapping is dropped. So
1447          * the only sequence is:
1448          *     1. drm_gem_mmap_single(): ref++
1449          *     2. i915_gem_pager_ctor(): <noop>
1450          *     3. i915_gem_pager_dtor(): ref--
1451          */
1452
1453         *color = 0; /* XXXKIB */
1454         return (0);
1455 }
1456
1457 /**
1458  * i915_gem_fault - fault a page into the GTT
1459  * vma: VMA in question
1460  * vmf: fault info
1461  *
1462  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1463  * from userspace.  The fault handler takes care of binding the object to
1464  * the GTT (if needed), allocating and programming a fence register (again,
1465  * only if needed based on whether the old reg is still valid or the object
1466  * is tiled) and inserting a new PTE into the faulting process.
1467  *
1468  * Note that the faulting process may involve evicting existing objects
1469  * from the GTT and/or fence registers to make room.  So performance may
1470  * suffer if the GTT working set is large or there are few fence registers
1471  * left.
1472  */
1473
1474 int i915_intr_pf;
1475
1476 static int
1477 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
1478     vm_page_t *mres)
1479 {
1480         struct drm_gem_object *gem_obj = vm_obj->handle;
1481         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
1482         struct drm_device *dev = obj->base.dev;
1483         drm_i915_private_t *dev_priv = dev->dev_private;
1484         vm_page_t page;
1485         int ret = 0;
1486 #ifdef FREEBSD_WIP
1487         bool write = (prot & VM_PROT_WRITE) != 0;
1488 #else
1489         bool write = true;
1490 #endif /* FREEBSD_WIP */
1491         bool pinned;
1492
1493         vm_object_pip_add(vm_obj, 1);
1494
1495         /*
1496          * Remove the placeholder page inserted by vm_fault() from the
1497          * object before dropping the object lock. If
1498          * i915_gem_release_mmap() is active in parallel on this gem
1499          * object, then it owns the drm device sx and might find the
1500          * placeholder already. Then, since the page is busy,
1501          * i915_gem_release_mmap() sleeps waiting for the busy state
1502          * of the page cleared. We will be unable to acquire drm
1503          * device lock until i915_gem_release_mmap() is able to make a
1504          * progress.
1505          */
1506         if (*mres != NULL) {
1507                 vm_page_lock(*mres);
1508                 vm_page_remove(*mres);
1509                 vm_page_unlock(*mres);
1510         }
1511         VM_OBJECT_WUNLOCK(vm_obj);
1512 retry:
1513         ret = 0;
1514         pinned = 0;
1515         page = NULL;
1516
1517         if (i915_intr_pf) {
1518                 ret = i915_mutex_lock_interruptible(dev);
1519                 if (ret != 0)
1520                         goto out;
1521         } else
1522                 DRM_LOCK(dev);
1523
1524         /*
1525          * Since the object lock was dropped, other thread might have
1526          * faulted on the same GTT address and instantiated the
1527          * mapping for the page.  Recheck.
1528          */
1529         VM_OBJECT_WLOCK(vm_obj);
1530         page = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1531         if (page != NULL) {
1532                 if (vm_page_busied(page)) {
1533                         DRM_UNLOCK(dev);
1534                         vm_page_lock(page);
1535                         VM_OBJECT_WUNLOCK(vm_obj);
1536                         vm_page_busy_sleep(page, "915pee");
1537                         goto retry;
1538                 }
1539                 goto have_page;
1540         } else
1541                 VM_OBJECT_WUNLOCK(vm_obj);
1542
1543         /* Now bind it into the GTT if needed */
1544         ret = i915_gem_object_pin(obj, 0, true, false);
1545         if (ret)
1546                 goto unlock;
1547         pinned = 1;
1548
1549         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1550         if (ret)
1551                 goto unpin;
1552
1553         ret = i915_gem_object_get_fence(obj);
1554         if (ret)
1555                 goto unpin;
1556
1557         obj->fault_mappable = true;
1558
1559         VM_OBJECT_WLOCK(vm_obj);
1560         page = PHYS_TO_VM_PAGE(dev_priv->mm.gtt_base_addr + obj->gtt_offset + offset);
1561         KASSERT((page->flags & PG_FICTITIOUS) != 0,
1562             ("physical address %#jx not fictitious",
1563             (uintmax_t)(dev_priv->mm.gtt_base_addr + obj->gtt_offset + offset)));
1564         if (page == NULL) {
1565                 VM_OBJECT_WUNLOCK(vm_obj);
1566                 ret = -EFAULT;
1567                 goto unpin;
1568         }
1569         KASSERT((page->flags & PG_FICTITIOUS) != 0,
1570             ("not fictitious %p", page));
1571         KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
1572
1573         if (vm_page_busied(page)) {
1574                 i915_gem_object_unpin(obj);
1575                 DRM_UNLOCK(dev);
1576                 vm_page_lock(page);
1577                 VM_OBJECT_WUNLOCK(vm_obj);
1578                 vm_page_busy_sleep(page, "915pbs");
1579                 goto retry;
1580         }
1581         if (vm_page_insert(page, vm_obj, OFF_TO_IDX(offset))) {
1582                 i915_gem_object_unpin(obj);
1583                 DRM_UNLOCK(dev);
1584                 VM_OBJECT_WUNLOCK(vm_obj);
1585                 VM_WAIT;
1586                 goto retry;
1587         }
1588         page->valid = VM_PAGE_BITS_ALL;
1589 have_page:
1590         vm_page_xbusy(page);
1591
1592         CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
1593             page->phys_addr);
1594         if (pinned) {
1595                 /*
1596                  * We may have not pinned the object if the page was
1597                  * found by the call to vm_page_lookup()
1598                  */
1599                 i915_gem_object_unpin(obj);
1600         }
1601         DRM_UNLOCK(dev);
1602         if (*mres != NULL) {
1603                 KASSERT(*mres != page, ("losing %p %p", *mres, page));
1604                 vm_page_lock(*mres);
1605                 vm_page_free(*mres);
1606                 vm_page_unlock(*mres);
1607         }
1608         *mres = page;
1609         vm_object_pip_wakeup(vm_obj);
1610         return (VM_PAGER_OK);
1611
1612 unpin:
1613         i915_gem_object_unpin(obj);
1614 unlock:
1615         DRM_UNLOCK(dev);
1616 out:
1617         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1618         CTR4(KTR_DRM, "fault_fail %p %jx %x err %d", gem_obj, offset, prot,
1619             -ret);
1620         if (ret == -ERESTARTSYS) {
1621                 /*
1622                  * NOTE Linux<->FreeBSD: Convert Linux' -ERESTARTSYS to
1623                  * the more common -EINTR, so the page fault is retried.
1624                  */
1625                 ret = -EINTR;
1626         }
1627         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1628                 kern_yield(PRI_USER);
1629                 goto retry;
1630         }
1631         VM_OBJECT_WLOCK(vm_obj);
1632         vm_object_pip_wakeup(vm_obj);
1633         return (VM_PAGER_ERROR);
1634 }
1635
1636 static void
1637 i915_gem_pager_dtor(void *handle)
1638 {
1639         struct drm_gem_object *obj = handle;
1640         struct drm_device *dev = obj->dev;
1641
1642         DRM_LOCK(dev);
1643         drm_gem_object_unreference(obj);
1644         DRM_UNLOCK(dev);
1645 }
1646
1647 struct cdev_pager_ops i915_gem_pager_ops = {
1648         .cdev_pg_fault  = i915_gem_pager_fault,
1649         .cdev_pg_ctor   = i915_gem_pager_ctor,
1650         .cdev_pg_dtor   = i915_gem_pager_dtor
1651 };
1652
1653 /**
1654  * i915_gem_release_mmap - remove physical page mappings
1655  * @obj: obj in question
1656  *
1657  * Preserve the reservation of the mmapping with the DRM core code, but
1658  * relinquish ownership of the pages back to the system.
1659  *
1660  * It is vital that we remove the page mapping if we have mapped a tiled
1661  * object through the GTT and then lose the fence register due to
1662  * resource pressure. Similarly if the object has been moved out of the
1663  * aperture, than pages mapped into userspace must be revoked. Removing the
1664  * mapping will then trigger a page fault on the next user access, allowing
1665  * fixup by i915_gem_fault().
1666  */
1667 void
1668 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1669 {
1670         vm_object_t devobj;
1671         vm_page_t page;
1672         int i, page_count;
1673
1674         if (!obj->fault_mappable)
1675                 return;
1676
1677         CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset,
1678             OFF_TO_IDX(obj->base.size));
1679         devobj = cdev_pager_lookup(obj);
1680         if (devobj != NULL) {
1681                 page_count = OFF_TO_IDX(obj->base.size);
1682
1683                 VM_OBJECT_WLOCK(devobj);
1684 retry:
1685                 for (i = 0; i < page_count; i++) {
1686                         page = vm_page_lookup(devobj, i);
1687                         if (page == NULL)
1688                                 continue;
1689                         if (vm_page_sleep_if_busy(page, "915unm"))
1690                                 goto retry;
1691                         cdev_pager_free_page(devobj, page);
1692                 }
1693                 VM_OBJECT_WUNLOCK(devobj);
1694                 vm_object_deallocate(devobj);
1695         }
1696
1697         obj->fault_mappable = false;
1698 }
1699
1700 static uint32_t
1701 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1702 {
1703         uint32_t gtt_size;
1704
1705         if (INTEL_INFO(dev)->gen >= 4 ||
1706             tiling_mode == I915_TILING_NONE)
1707                 return size;
1708
1709         /* Previous chips need a power-of-two fence region when tiling */
1710         if (INTEL_INFO(dev)->gen == 3)
1711                 gtt_size = 1024*1024;
1712         else
1713                 gtt_size = 512*1024;
1714
1715         while (gtt_size < size)
1716                 gtt_size <<= 1;
1717
1718         return gtt_size;
1719 }
1720
1721 /**
1722  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1723  * @obj: object to check
1724  *
1725  * Return the required GTT alignment for an object, taking into account
1726  * potential fence register mapping.
1727  */
1728 static uint32_t
1729 i915_gem_get_gtt_alignment(struct drm_device *dev,
1730                            uint32_t size,
1731                            int tiling_mode)
1732 {
1733         /*
1734          * Minimum alignment is 4k (GTT page size), but might be greater
1735          * if a fence register is needed for the object.
1736          */
1737         if (INTEL_INFO(dev)->gen >= 4 ||
1738             tiling_mode == I915_TILING_NONE)
1739                 return 4096;
1740
1741         /*
1742          * Previous chips need to be aligned to the size of the smallest
1743          * fence register that can contain the object.
1744          */
1745         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1746 }
1747
1748 /**
1749  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1750  *                                       unfenced object
1751  * @dev: the device
1752  * @size: size of the object
1753  * @tiling_mode: tiling mode of the object
1754  *
1755  * Return the required GTT alignment for an object, only taking into account
1756  * unfenced tiled surface requirements.
1757  */
1758 uint32_t
1759 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1760                                     uint32_t size,
1761                                     int tiling_mode)
1762 {
1763         /*
1764          * Minimum alignment is 4k (GTT page size) for sane hw.
1765          */
1766         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1767             tiling_mode == I915_TILING_NONE)
1768                 return 4096;
1769
1770         /* Previous hardware however needs to be aligned to a power-of-two
1771          * tile height. The simplest method for determining this is to reuse
1772          * the power-of-tile object size.
1773          */
1774         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1775 }
1776
1777 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1778 {
1779         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1780         int ret;
1781
1782         if (obj->base.on_map)
1783                 return 0;
1784
1785         dev_priv->mm.shrinker_no_lock_stealing = true;
1786
1787         ret = drm_gem_create_mmap_offset(&obj->base);
1788         if (ret != -ENOSPC)
1789                 goto out;
1790
1791         /* Badly fragmented mmap space? The only way we can recover
1792          * space is by destroying unwanted objects. We can't randomly release
1793          * mmap_offsets as userspace expects them to be persistent for the
1794          * lifetime of the objects. The closest we can is to release the
1795          * offsets on purgeable objects by truncating it and marking it purged,
1796          * which prevents userspace from ever using that object again.
1797          */
1798         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1799         ret = drm_gem_create_mmap_offset(&obj->base);
1800         if (ret != -ENOSPC)
1801                 goto out;
1802
1803         i915_gem_shrink_all(dev_priv);
1804         ret = drm_gem_create_mmap_offset(&obj->base);
1805 out:
1806         dev_priv->mm.shrinker_no_lock_stealing = false;
1807
1808         return ret;
1809 }
1810
1811 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1812 {
1813         if (!obj->base.on_map)
1814                 return;
1815
1816         drm_gem_free_mmap_offset(&obj->base);
1817 }
1818
1819 int
1820 i915_gem_mmap_gtt(struct drm_file *file,
1821                   struct drm_device *dev,
1822                   uint32_t handle,
1823                   uint64_t *offset)
1824 {
1825         struct drm_i915_private *dev_priv = dev->dev_private;
1826         struct drm_i915_gem_object *obj;
1827         int ret;
1828
1829         ret = i915_mutex_lock_interruptible(dev);
1830         if (ret)
1831                 return ret;
1832
1833         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1834         if (&obj->base == NULL) {
1835                 ret = -ENOENT;
1836                 goto unlock;
1837         }
1838
1839         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1840                 ret = -E2BIG;
1841                 goto out;
1842         }
1843
1844         if (obj->madv != I915_MADV_WILLNEED) {
1845                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1846                 ret = -EINVAL;
1847                 goto out;
1848         }
1849
1850         ret = i915_gem_object_create_mmap_offset(obj);
1851         if (ret)
1852                 goto out;
1853
1854         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1855             DRM_GEM_MAPPING_KEY;
1856
1857 out:
1858         drm_gem_object_unreference(&obj->base);
1859 unlock:
1860         DRM_UNLOCK(dev);
1861         return ret;
1862 }
1863
1864 /**
1865  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1866  * @dev: DRM device
1867  * @data: GTT mapping ioctl data
1868  * @file: GEM object info
1869  *
1870  * Simply returns the fake offset to userspace so it can mmap it.
1871  * The mmap call will end up in drm_gem_mmap(), which will set things
1872  * up so we can get faults in the handler above.
1873  *
1874  * The fault handler will take care of binding the object into the GTT
1875  * (since it may have been evicted to make room for something), allocating
1876  * a fence register, and mapping the appropriate aperture address into
1877  * userspace.
1878  */
1879 int
1880 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1881                         struct drm_file *file)
1882 {
1883         struct drm_i915_gem_mmap_gtt *args = data;
1884
1885         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1886 }
1887
1888 /* Immediately discard the backing storage */
1889 static void
1890 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1891 {
1892         vm_object_t vm_obj;
1893
1894         vm_obj = obj->base.vm_obj;
1895         VM_OBJECT_WLOCK(vm_obj);
1896         vm_object_page_remove(vm_obj, 0, 0, false);
1897         VM_OBJECT_WUNLOCK(vm_obj);
1898         i915_gem_object_free_mmap_offset(obj);
1899
1900         obj->madv = __I915_MADV_PURGED;
1901 }
1902
1903 static inline int
1904 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1905 {
1906         return obj->madv == I915_MADV_DONTNEED;
1907 }
1908
1909 static void
1910 i915_gem_object_put_pages_range_locked(struct drm_i915_gem_object *obj,
1911     vm_pindex_t si, vm_pindex_t ei)
1912 {
1913         vm_object_t vm_obj;
1914         vm_page_t page;
1915         vm_pindex_t i;
1916
1917         vm_obj = obj->base.vm_obj;
1918         VM_OBJECT_ASSERT_LOCKED(vm_obj);
1919         for (i = si,  page = vm_page_lookup(vm_obj, i); i < ei;
1920             page = vm_page_next(page), i++) {
1921                 KASSERT(page->pindex == i, ("pindex %jx %jx",
1922                     (uintmax_t)page->pindex, (uintmax_t)i));
1923                 vm_page_lock(page);
1924                 vm_page_unwire(page, PQ_INACTIVE);
1925                 if (page->wire_count == 0)
1926                         atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1927                 vm_page_unlock(page);
1928         }
1929 }
1930
1931 #define GEM_PARANOID_CHECK_GTT 0
1932 #if GEM_PARANOID_CHECK_GTT
1933 static void
1934 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
1935     int page_count)
1936 {
1937         struct drm_i915_private *dev_priv;
1938         vm_paddr_t pa;
1939         unsigned long start, end;
1940         u_int i;
1941         int j;
1942
1943         dev_priv = dev->dev_private;
1944         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
1945         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
1946         for (i = start; i < end; i++) {
1947                 pa = intel_gtt_read_pte_paddr(i);
1948                 for (j = 0; j < page_count; j++) {
1949                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
1950                                 panic("Page %p in GTT pte index %d pte %x",
1951                                     ma[i], i, intel_gtt_read_pte(i));
1952                         }
1953                 }
1954         }
1955 }
1956 #endif
1957
1958 static void
1959 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1960 {
1961         int page_count = obj->base.size / PAGE_SIZE;
1962         int ret, i;
1963
1964         BUG_ON(obj->madv == __I915_MADV_PURGED);
1965
1966         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1967         if (ret) {
1968                 /* In the event of a disaster, abandon all caches and
1969                  * hope for the best.
1970                  */
1971                 WARN_ON(ret != -EIO);
1972                 i915_gem_clflush_object(obj);
1973                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1974         }
1975
1976         if (i915_gem_object_needs_bit17_swizzle(obj))
1977                 i915_gem_object_save_bit_17_swizzle(obj);
1978
1979         if (obj->madv == I915_MADV_DONTNEED)
1980                 obj->dirty = 0;
1981
1982         VM_OBJECT_WLOCK(obj->base.vm_obj);
1983 #if GEM_PARANOID_CHECK_GTT
1984         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1985 #endif
1986         for (i = 0; i < page_count; i++) {
1987                 vm_page_t page = obj->pages[i];
1988
1989                 if (obj->dirty)
1990                         vm_page_dirty(page);
1991
1992                 if (obj->madv == I915_MADV_WILLNEED)
1993                         vm_page_reference(page);
1994
1995                 vm_page_lock(page);
1996                 vm_page_unwire(obj->pages[i], PQ_ACTIVE);
1997                 vm_page_unlock(page);
1998                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1999         }
2000         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
2001         obj->dirty = 0;
2002
2003         free(obj->pages, DRM_I915_GEM);
2004         obj->pages = NULL;
2005 }
2006
2007 static int
2008 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2009 {
2010         const struct drm_i915_gem_object_ops *ops = obj->ops;
2011
2012         if (obj->pages == NULL)
2013                 return 0;
2014
2015         BUG_ON(obj->gtt_space);
2016
2017         if (obj->pages_pin_count)
2018                 return -EBUSY;
2019
2020         /* ->put_pages might need to allocate memory for the bit17 swizzle
2021          * array, hence protect them from being reaped by removing them from gtt
2022          * lists early. */
2023         list_del(&obj->gtt_list);
2024
2025         ops->put_pages(obj);
2026         obj->pages = NULL;
2027
2028         if (i915_gem_object_is_purgeable(obj))
2029                 i915_gem_object_truncate(obj);
2030
2031         return 0;
2032 }
2033
2034 static long
2035 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
2036                   bool purgeable_only)
2037 {
2038         struct drm_i915_gem_object *obj, *next;
2039         long count = 0;
2040
2041         list_for_each_entry_safe(obj, next,
2042                                  &dev_priv->mm.unbound_list,
2043                                  gtt_list) {
2044                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2045                     i915_gem_object_put_pages(obj) == 0) {
2046                         count += obj->base.size >> PAGE_SHIFT;
2047                         if (target != -1 && count >= target)
2048                                 return count;
2049                 }
2050         }
2051
2052         list_for_each_entry_safe(obj, next,
2053                                  &dev_priv->mm.inactive_list,
2054                                  mm_list) {
2055                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2056                     i915_gem_object_unbind(obj) == 0 &&
2057                     i915_gem_object_put_pages(obj) == 0) {
2058                         count += obj->base.size >> PAGE_SHIFT;
2059                         if (target != -1 && count >= target)
2060                                 return count;
2061                 }
2062         }
2063
2064         return count;
2065 }
2066
2067 static long
2068 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2069 {
2070         return __i915_gem_shrink(dev_priv, target, true);
2071 }
2072
2073 static void
2074 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2075 {
2076         struct drm_i915_gem_object *obj, *next;
2077
2078         i915_gem_evict_everything(dev_priv->dev);
2079
2080         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
2081                 i915_gem_object_put_pages(obj);
2082 }
2083
2084 static int
2085 i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
2086     off_t start, off_t end)
2087 {
2088         vm_object_t vm_obj;
2089         vm_page_t page;
2090         vm_pindex_t si, ei, i;
2091         bool need_swizzle, fresh;
2092
2093         need_swizzle = i915_gem_object_needs_bit17_swizzle(obj) != 0;
2094         vm_obj = obj->base.vm_obj;
2095         si = OFF_TO_IDX(trunc_page(start));
2096         ei = OFF_TO_IDX(round_page(end));
2097         VM_OBJECT_WLOCK(vm_obj);
2098         for (i = si; i < ei; i++) {
2099                 page = i915_gem_wire_page(vm_obj, i, &fresh);
2100                 if (page == NULL)
2101                         goto failed;
2102                 if (need_swizzle && fresh)
2103                         i915_gem_object_do_bit_17_swizzle_page(obj, page);
2104         }
2105         VM_OBJECT_WUNLOCK(vm_obj);
2106         return (0);
2107 failed:
2108         i915_gem_object_put_pages_range_locked(obj, si, i);
2109         VM_OBJECT_WUNLOCK(vm_obj);
2110         return (-EIO);
2111 }
2112
2113 static int
2114 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2115 {
2116         vm_object_t vm_obj;
2117         vm_page_t page;
2118         vm_pindex_t i, page_count;
2119         int res;
2120
2121         /* Assert that the object is not currently in any GPU domain. As it
2122          * wasn't in the GTT, there shouldn't be any way it could have been in
2123          * a GPU cache
2124          */
2125         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2126         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2127         KASSERT(obj->pages == NULL, ("Obj already has pages"));
2128
2129         page_count = OFF_TO_IDX(obj->base.size);
2130         obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
2131             M_WAITOK);
2132         res = i915_gem_object_get_pages_range(obj, 0, obj->base.size);
2133         if (res != 0) {
2134                 free(obj->pages, DRM_I915_GEM);
2135                 obj->pages = NULL;
2136                 return (res);
2137         }
2138         vm_obj = obj->base.vm_obj;
2139         VM_OBJECT_WLOCK(vm_obj);
2140         for (i = 0, page = vm_page_lookup(vm_obj, 0); i < page_count;
2141             i++, page = vm_page_next(page)) {
2142                 KASSERT(page->pindex == i, ("pindex %jx %jx",
2143                     (uintmax_t)page->pindex, (uintmax_t)i));
2144                 obj->pages[i] = page;
2145         }
2146         VM_OBJECT_WUNLOCK(vm_obj);
2147         return (0);
2148 }
2149
2150 /* Ensure that the associated pages are gathered from the backing storage
2151  * and pinned into our object. i915_gem_object_get_pages() may be called
2152  * multiple times before they are released by a single call to
2153  * i915_gem_object_put_pages() - once the pages are no longer referenced
2154  * either as a result of memory pressure (reaping pages under the shrinker)
2155  * or as the object is itself released.
2156  */
2157 int
2158 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2159 {
2160         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2161         const struct drm_i915_gem_object_ops *ops = obj->ops;
2162         int ret;
2163
2164         if (obj->pages)
2165                 return 0;
2166
2167         BUG_ON(obj->pages_pin_count);
2168
2169         ret = ops->get_pages(obj);
2170         if (ret)
2171                 return ret;
2172
2173         list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2174         return 0;
2175 }
2176
2177 void
2178 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2179                                struct intel_ring_buffer *ring)
2180 {
2181         struct drm_device *dev = obj->base.dev;
2182         struct drm_i915_private *dev_priv = dev->dev_private;
2183         u32 seqno = intel_ring_get_seqno(ring);
2184
2185         BUG_ON(ring == NULL);
2186         obj->ring = ring;
2187
2188         /* Add a reference if we're newly entering the active list. */
2189         if (!obj->active) {
2190                 drm_gem_object_reference(&obj->base);
2191                 obj->active = 1;
2192         }
2193
2194         /* Move from whatever list we were on to the tail of execution. */
2195         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
2196         list_move_tail(&obj->ring_list, &ring->active_list);
2197
2198         obj->last_read_seqno = seqno;
2199
2200         if (obj->fenced_gpu_access) {
2201                 obj->last_fenced_seqno = seqno;
2202
2203                 /* Bump MRU to take account of the delayed flush */
2204                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2205                         struct drm_i915_fence_reg *reg;
2206
2207                         reg = &dev_priv->fence_regs[obj->fence_reg];
2208                         list_move_tail(&reg->lru_list,
2209                                        &dev_priv->mm.fence_list);
2210                 }
2211         }
2212 }
2213
2214 static void
2215 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2216 {
2217         struct drm_device *dev = obj->base.dev;
2218         struct drm_i915_private *dev_priv = dev->dev_private;
2219
2220         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2221         BUG_ON(!obj->active);
2222
2223         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2224
2225         list_del_init(&obj->ring_list);
2226         obj->ring = NULL;
2227
2228         obj->last_read_seqno = 0;
2229         obj->last_write_seqno = 0;
2230         obj->base.write_domain = 0;
2231
2232         obj->last_fenced_seqno = 0;
2233         obj->fenced_gpu_access = false;
2234
2235         obj->active = 0;
2236         drm_gem_object_unreference(&obj->base);
2237
2238         WARN_ON(i915_verify_lists(dev));
2239 }
2240
2241 static int
2242 i915_gem_handle_seqno_wrap(struct drm_device *dev)
2243 {
2244         struct drm_i915_private *dev_priv = dev->dev_private;
2245         struct intel_ring_buffer *ring;
2246         int ret, i, j;
2247
2248         /* The hardware uses various monotonic 32-bit counters, if we
2249          * detect that they will wraparound we need to idle the GPU
2250          * and reset those counters.
2251          */
2252         ret = 0;
2253         for_each_ring(ring, dev_priv, i) {
2254                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2255                         ret |= ring->sync_seqno[j] != 0;
2256         }
2257         if (ret == 0)
2258                 return ret;
2259
2260         ret = i915_gpu_idle(dev);
2261         if (ret)
2262                 return ret;
2263
2264         i915_gem_retire_requests(dev);
2265         for_each_ring(ring, dev_priv, i) {
2266                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2267                         ring->sync_seqno[j] = 0;
2268         }
2269
2270         return 0;
2271 }
2272
2273 int
2274 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2275 {
2276         struct drm_i915_private *dev_priv = dev->dev_private;
2277
2278         /* reserve 0 for non-seqno */
2279         if (dev_priv->next_seqno == 0) {
2280                 int ret = i915_gem_handle_seqno_wrap(dev);
2281                 if (ret)
2282                         return ret;
2283
2284                 dev_priv->next_seqno = 1;
2285         }
2286
2287         *seqno = dev_priv->next_seqno++;
2288         return 0;
2289 }
2290
2291 int
2292 i915_add_request(struct intel_ring_buffer *ring,
2293                  struct drm_file *file,
2294                  u32 *out_seqno)
2295 {
2296         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2297         struct drm_i915_gem_request *request;
2298         u32 request_ring_position;
2299         int was_empty;
2300         int ret;
2301
2302         /*
2303          * Emit any outstanding flushes - execbuf can fail to emit the flush
2304          * after having emitted the batchbuffer command. Hence we need to fix
2305          * things up similar to emitting the lazy request. The difference here
2306          * is that the flush _must_ happen before the next request, no matter
2307          * what.
2308          */
2309         ret = intel_ring_flush_all_caches(ring);
2310         if (ret)
2311                 return ret;
2312
2313         request = malloc(sizeof(*request), DRM_I915_GEM, M_NOWAIT);
2314         if (request == NULL)
2315                 return -ENOMEM;
2316
2317
2318         /* Record the position of the start of the request so that
2319          * should we detect the updated seqno part-way through the
2320          * GPU processing the request, we never over-estimate the
2321          * position of the head.
2322          */
2323         request_ring_position = intel_ring_get_tail(ring);
2324
2325         ret = ring->add_request(ring);
2326         if (ret) {
2327                 free(request, DRM_I915_GEM);
2328                 return ret;
2329         }
2330
2331         request->seqno = intel_ring_get_seqno(ring);
2332         request->ring = ring;
2333         request->tail = request_ring_position;
2334         request->emitted_jiffies = jiffies;
2335         was_empty = list_empty(&ring->request_list);
2336         list_add_tail(&request->list, &ring->request_list);
2337         request->file_priv = NULL;
2338
2339         if (file) {
2340                 struct drm_i915_file_private *file_priv = file->driver_priv;
2341
2342                 mtx_lock(&file_priv->mm.lock);
2343                 request->file_priv = file_priv;
2344                 list_add_tail(&request->client_list,
2345                               &file_priv->mm.request_list);
2346                 mtx_unlock(&file_priv->mm.lock);
2347         }
2348
2349         CTR2(KTR_DRM, "request_add %s %d", ring->name, request->seqno);
2350         ring->outstanding_lazy_request = 0;
2351
2352         if (!dev_priv->mm.suspended) {
2353                 if (i915_enable_hangcheck) {
2354                         callout_schedule(&dev_priv->hangcheck_timer,
2355                             DRM_I915_HANGCHECK_PERIOD);
2356                 }
2357                 if (was_empty) {
2358                         taskqueue_enqueue_timeout(dev_priv->wq,
2359                             &dev_priv->mm.retire_work, hz);
2360                         intel_mark_busy(dev_priv->dev);
2361                 }
2362         }
2363
2364         if (out_seqno)
2365                 *out_seqno = request->seqno;
2366         return 0;
2367 }
2368
2369 static inline void
2370 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2371 {
2372         struct drm_i915_file_private *file_priv = request->file_priv;
2373
2374         if (!file_priv)
2375                 return;
2376
2377         mtx_lock(&file_priv->mm.lock);
2378         if (request->file_priv) {
2379                 list_del(&request->client_list);
2380                 request->file_priv = NULL;
2381         }
2382         mtx_unlock(&file_priv->mm.lock);
2383 }
2384
2385 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2386                                       struct intel_ring_buffer *ring)
2387 {
2388         if (ring->dev != NULL)
2389                 DRM_LOCK_ASSERT(ring->dev);
2390
2391         while (!list_empty(&ring->request_list)) {
2392                 struct drm_i915_gem_request *request;
2393
2394                 request = list_first_entry(&ring->request_list,
2395                                            struct drm_i915_gem_request,
2396                                            list);
2397
2398                 list_del(&request->list);
2399                 i915_gem_request_remove_from_client(request);
2400                 free(request, DRM_I915_GEM);
2401         }
2402
2403         while (!list_empty(&ring->active_list)) {
2404                 struct drm_i915_gem_object *obj;
2405
2406                 obj = list_first_entry(&ring->active_list,
2407                                        struct drm_i915_gem_object,
2408                                        ring_list);
2409
2410                 i915_gem_object_move_to_inactive(obj);
2411         }
2412 }
2413
2414 static void i915_gem_reset_fences(struct drm_device *dev)
2415 {
2416         struct drm_i915_private *dev_priv = dev->dev_private;
2417         int i;
2418
2419         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2420                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2421
2422                 i915_gem_write_fence(dev, i, NULL);
2423
2424                 if (reg->obj)
2425                         i915_gem_object_fence_lost(reg->obj);
2426
2427                 reg->pin_count = 0;
2428                 reg->obj = NULL;
2429                 INIT_LIST_HEAD(&reg->lru_list);
2430         }
2431
2432         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2433 }
2434
2435 void i915_gem_reset(struct drm_device *dev)
2436 {
2437         struct drm_i915_private *dev_priv = dev->dev_private;
2438         struct drm_i915_gem_object *obj;
2439         struct intel_ring_buffer *ring;
2440         int i;
2441
2442         for_each_ring(ring, dev_priv, i)
2443                 i915_gem_reset_ring_lists(dev_priv, ring);
2444
2445         /* Move everything out of the GPU domains to ensure we do any
2446          * necessary invalidation upon reuse.
2447          */
2448         list_for_each_entry(obj,
2449                             &dev_priv->mm.inactive_list,
2450                             mm_list)
2451         {
2452                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2453         }
2454
2455         /* The fence registers are invalidated so clear them out */
2456         i915_gem_reset_fences(dev);
2457 }
2458
2459 /**
2460  * This function clears the request list as sequence numbers are passed.
2461  */
2462 void
2463 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2464 {
2465         uint32_t seqno;
2466
2467         if (list_empty(&ring->request_list))
2468                 return;
2469
2470         WARN_ON(i915_verify_lists(ring->dev));
2471
2472         seqno = ring->get_seqno(ring, true);
2473         CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
2474
2475         while (!list_empty(&ring->request_list)) {
2476                 struct drm_i915_gem_request *request;
2477
2478                 request = list_first_entry(&ring->request_list,
2479                                            struct drm_i915_gem_request,
2480                                            list);
2481
2482                 if (!i915_seqno_passed(seqno, request->seqno))
2483                         break;
2484
2485                 CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
2486                     ring->name, seqno);
2487                 /* We know the GPU must have read the request to have
2488                  * sent us the seqno + interrupt, so use the position
2489                  * of tail of the request to update the last known position
2490                  * of the GPU head.
2491                  */
2492                 ring->last_retired_head = request->tail;
2493
2494                 list_del(&request->list);
2495                 i915_gem_request_remove_from_client(request);
2496                 free(request, DRM_I915_GEM);
2497         }
2498
2499         /* Move any buffers on the active list that are no longer referenced
2500          * by the ringbuffer to the flushing/inactive lists as appropriate.
2501          */
2502         while (!list_empty(&ring->active_list)) {
2503                 struct drm_i915_gem_object *obj;
2504
2505                 obj = list_first_entry(&ring->active_list,
2506                                       struct drm_i915_gem_object,
2507                                       ring_list);
2508
2509                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2510                         break;
2511
2512                 i915_gem_object_move_to_inactive(obj);
2513         }
2514
2515         if (unlikely(ring->trace_irq_seqno &&
2516                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2517                 ring->irq_put(ring);
2518                 ring->trace_irq_seqno = 0;
2519         }
2520
2521         WARN_ON(i915_verify_lists(ring->dev));
2522 }
2523
2524 void
2525 i915_gem_retire_requests(struct drm_device *dev)
2526 {
2527         drm_i915_private_t *dev_priv = dev->dev_private;
2528         struct intel_ring_buffer *ring;
2529         int i;
2530
2531         for_each_ring(ring, dev_priv, i)
2532                 i915_gem_retire_requests_ring(ring);
2533 }
2534
2535 static void
2536 i915_gem_retire_work_handler(void *arg, int pending)
2537 {
2538         drm_i915_private_t *dev_priv;
2539         struct drm_device *dev;
2540         struct intel_ring_buffer *ring;
2541         bool idle;
2542         int i;
2543
2544         dev_priv = arg;
2545         dev = dev_priv->dev;
2546
2547         /* Come back later if the device is busy... */
2548         if (!sx_try_xlock(&dev->dev_struct_lock)) {
2549                 taskqueue_enqueue_timeout(dev_priv->wq,
2550                     &dev_priv->mm.retire_work, hz);
2551                 return;
2552         }
2553
2554         CTR0(KTR_DRM, "retire_task");
2555
2556         i915_gem_retire_requests(dev);
2557
2558         /* Send a periodic flush down the ring so we don't hold onto GEM
2559          * objects indefinitely.
2560          */
2561         idle = true;
2562         for_each_ring(ring, dev_priv, i) {
2563                 if (ring->gpu_caches_dirty)
2564                         i915_add_request(ring, NULL, NULL);
2565
2566                 idle &= list_empty(&ring->request_list);
2567         }
2568
2569         if (!dev_priv->mm.suspended && !idle)
2570                 taskqueue_enqueue_timeout(dev_priv->wq,
2571                     &dev_priv->mm.retire_work, hz);
2572         if (idle)
2573                 intel_mark_idle(dev);
2574
2575         DRM_UNLOCK(dev);
2576 }
2577
2578 /**
2579  * Ensures that an object will eventually get non-busy by flushing any required
2580  * write domains, emitting any outstanding lazy request and retiring and
2581  * completed requests.
2582  */
2583 static int
2584 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2585 {
2586         int ret;
2587
2588         if (obj->active) {
2589                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2590                 if (ret)
2591                         return ret;
2592
2593                 i915_gem_retire_requests_ring(obj->ring);
2594         }
2595
2596         return 0;
2597 }
2598
2599 /**
2600  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2601  * @DRM_IOCTL_ARGS: standard ioctl arguments
2602  *
2603  * Returns 0 if successful, else an error is returned with the remaining time in
2604  * the timeout parameter.
2605  *  -ETIME: object is still busy after timeout
2606  *  -ERESTARTSYS: signal interrupted the wait
2607  *  -ENONENT: object doesn't exist
2608  * Also possible, but rare:
2609  *  -EAGAIN: GPU wedged
2610  *  -ENOMEM: damn
2611  *  -ENODEV: Internal IRQ fail
2612  *  -E?: The add request failed
2613  *
2614  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2615  * non-zero timeout parameter the wait ioctl will wait for the given number of
2616  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2617  * without holding struct_mutex the object may become re-busied before this
2618  * function completes. A similar but shorter * race condition exists in the busy
2619  * ioctl
2620  */
2621 int
2622 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2623 {
2624         struct drm_i915_gem_wait *args = data;
2625         struct drm_i915_gem_object *obj;
2626         struct intel_ring_buffer *ring = NULL;
2627         struct timespec timeout_stack, *timeout = NULL;
2628         u32 seqno = 0;
2629         int ret = 0;
2630
2631         if (args->timeout_ns >= 0) {
2632                 timeout_stack.tv_sec = args->timeout_ns / 1000000;
2633                 timeout_stack.tv_nsec = args->timeout_ns % 1000000;
2634                 timeout = &timeout_stack;
2635         }
2636
2637         ret = i915_mutex_lock_interruptible(dev);
2638         if (ret)
2639                 return ret;
2640
2641         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2642         if (&obj->base == NULL) {
2643                 DRM_UNLOCK(dev);
2644                 return -ENOENT;
2645         }
2646
2647         /* Need to make sure the object gets inactive eventually. */
2648         ret = i915_gem_object_flush_active(obj);
2649         if (ret)
2650                 goto out;
2651
2652         if (obj->active) {
2653                 seqno = obj->last_read_seqno;
2654                 ring = obj->ring;
2655         }
2656
2657         if (seqno == 0)
2658                  goto out;
2659
2660         /* Do this after OLR check to make sure we make forward progress polling
2661          * on this IOCTL with a 0 timeout (like busy ioctl)
2662          */
2663         if (!args->timeout_ns) {
2664                 ret = -ETIMEDOUT;
2665                 goto out;
2666         }
2667
2668         drm_gem_object_unreference(&obj->base);
2669         DRM_UNLOCK(dev);
2670
2671         ret = __wait_seqno(ring, seqno, true, timeout);
2672         if (timeout) {
2673                 args->timeout_ns = timeout->tv_sec * 1000000 + timeout->tv_nsec;
2674         }
2675         return ret;
2676
2677 out:
2678         drm_gem_object_unreference(&obj->base);
2679         DRM_UNLOCK(dev);
2680         return ret;
2681 }
2682
2683 /**
2684  * i915_gem_object_sync - sync an object to a ring.
2685  *
2686  * @obj: object which may be in use on another ring.
2687  * @to: ring we wish to use the object on. May be NULL.
2688  *
2689  * This code is meant to abstract object synchronization with the GPU.
2690  * Calling with NULL implies synchronizing the object with the CPU
2691  * rather than a particular GPU ring.
2692  *
2693  * Returns 0 if successful, else propagates up the lower layer error.
2694  */
2695 int
2696 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2697                      struct intel_ring_buffer *to)
2698 {
2699         struct intel_ring_buffer *from = obj->ring;
2700         u32 seqno;
2701         int ret, idx;
2702
2703         if (from == NULL || to == from)
2704                 return 0;
2705
2706         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2707                 return i915_gem_object_wait_rendering(obj, false);
2708
2709         idx = intel_ring_sync_index(from, to);
2710
2711         seqno = obj->last_read_seqno;
2712         if (seqno <= from->sync_seqno[idx])
2713                 return 0;
2714
2715         ret = i915_gem_check_olr(obj->ring, seqno);
2716         if (ret)
2717                 return ret;
2718
2719         ret = to->sync_to(to, from, seqno);
2720         if (!ret)
2721                 /* We use last_read_seqno because sync_to()
2722                  * might have just caused seqno wrap under
2723                  * the radar.
2724                  */
2725                 from->sync_seqno[idx] = obj->last_read_seqno;
2726
2727         return ret;
2728 }
2729
2730 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2731 {
2732         u32 old_write_domain, old_read_domains;
2733
2734         /* Act a barrier for all accesses through the GTT */
2735         mb();
2736
2737         /* Force a pagefault for domain tracking on next user access */
2738         i915_gem_release_mmap(obj);
2739
2740         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2741                 return;
2742
2743         old_read_domains = obj->base.read_domains;
2744         old_write_domain = obj->base.write_domain;
2745
2746         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2747         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2748
2749         CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x",
2750             obj, old_read_domains, old_write_domain);
2751 }
2752
2753 /**
2754  * Unbinds an object from the GTT aperture.
2755  */
2756 int
2757 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2758 {
2759         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2760         int ret = 0;
2761
2762         if (obj->gtt_space == NULL)
2763                 return 0;
2764
2765         if (obj->pin_count)
2766                 return -EBUSY;
2767
2768         BUG_ON(obj->pages == NULL);
2769
2770         ret = i915_gem_object_finish_gpu(obj);
2771         if (ret)
2772                 return ret;
2773         /* Continue on if we fail due to EIO, the GPU is hung so we
2774          * should be safe and we need to cleanup or else we might
2775          * cause memory corruption through use-after-free.
2776          */
2777
2778         i915_gem_object_finish_gtt(obj);
2779
2780         /* release the fence reg _after_ flushing */
2781         ret = i915_gem_object_put_fence(obj);
2782         if (ret)
2783                 return ret;
2784
2785         if (obj->has_global_gtt_mapping)
2786                 i915_gem_gtt_unbind_object(obj);
2787         if (obj->has_aliasing_ppgtt_mapping) {
2788                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2789                 obj->has_aliasing_ppgtt_mapping = 0;
2790         }
2791         i915_gem_gtt_finish_object(obj);
2792
2793         list_del(&obj->mm_list);
2794         list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2795         /* Avoid an unnecessary call to unbind on rebind. */
2796         obj->map_and_fenceable = true;
2797
2798         drm_mm_put_block(obj->gtt_space);
2799         obj->gtt_space = NULL;
2800         obj->gtt_offset = 0;
2801
2802         return 0;
2803 }
2804
2805 int i915_gpu_idle(struct drm_device *dev)
2806 {
2807         drm_i915_private_t *dev_priv = dev->dev_private;
2808         struct intel_ring_buffer *ring;
2809         int ret, i;
2810
2811         /* Flush everything onto the inactive list. */
2812         for_each_ring(ring, dev_priv, i) {
2813                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2814                 if (ret)
2815                         return ret;
2816
2817                 ret = intel_ring_idle(ring);
2818                 if (ret)
2819                         return ret;
2820         }
2821
2822         return 0;
2823 }
2824
2825 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2826                                         struct drm_i915_gem_object *obj)
2827 {
2828         drm_i915_private_t *dev_priv = dev->dev_private;
2829         uint64_t val;
2830
2831         if (obj) {
2832                 u32 size = obj->gtt_space->size;
2833
2834                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2835                                  0xfffff000) << 32;
2836                 val |= obj->gtt_offset & 0xfffff000;
2837                 val |= (uint64_t)((obj->stride / 128) - 1) <<
2838                         SANDYBRIDGE_FENCE_PITCH_SHIFT;
2839
2840                 if (obj->tiling_mode == I915_TILING_Y)
2841                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2842                 val |= I965_FENCE_REG_VALID;
2843         } else
2844                 val = 0;
2845
2846         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2847         POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2848 }
2849
2850 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2851                                  struct drm_i915_gem_object *obj)
2852 {
2853         drm_i915_private_t *dev_priv = dev->dev_private;
2854         uint64_t val;
2855
2856         if (obj) {
2857                 u32 size = obj->gtt_space->size;
2858
2859                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2860                                  0xfffff000) << 32;
2861                 val |= obj->gtt_offset & 0xfffff000;
2862                 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2863                 if (obj->tiling_mode == I915_TILING_Y)
2864                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2865                 val |= I965_FENCE_REG_VALID;
2866         } else
2867                 val = 0;
2868
2869         I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2870         POSTING_READ(FENCE_REG_965_0 + reg * 8);
2871 }
2872
2873 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2874                                  struct drm_i915_gem_object *obj)
2875 {
2876         drm_i915_private_t *dev_priv = dev->dev_private;
2877         u32 val;
2878
2879         if (obj) {
2880                 u32 size = obj->gtt_space->size;
2881                 int pitch_val;
2882                 int tile_width;
2883
2884                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2885                      (size & -size) != size ||
2886                      (obj->gtt_offset & (size - 1)),
2887                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2888                      obj->gtt_offset, obj->map_and_fenceable, size);
2889
2890                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2891                         tile_width = 128;
2892                 else
2893                         tile_width = 512;
2894
2895                 /* Note: pitch better be a power of two tile widths */
2896                 pitch_val = obj->stride / tile_width;
2897                 pitch_val = ffs(pitch_val) - 1;
2898
2899                 val = obj->gtt_offset;
2900                 if (obj->tiling_mode == I915_TILING_Y)
2901                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2902                 val |= I915_FENCE_SIZE_BITS(size);
2903                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2904                 val |= I830_FENCE_REG_VALID;
2905         } else
2906                 val = 0;
2907
2908         if (reg < 8)
2909                 reg = FENCE_REG_830_0 + reg * 4;
2910         else
2911                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2912
2913         I915_WRITE(reg, val);
2914         POSTING_READ(reg);
2915 }
2916
2917 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2918                                 struct drm_i915_gem_object *obj)
2919 {
2920         drm_i915_private_t *dev_priv = dev->dev_private;
2921         uint32_t val;
2922
2923         if (obj) {
2924                 u32 size = obj->gtt_space->size;
2925                 uint32_t pitch_val;
2926
2927                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2928                      (size & -size) != size ||
2929                      (obj->gtt_offset & (size - 1)),
2930                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2931                      obj->gtt_offset, size);
2932
2933                 pitch_val = obj->stride / 128;
2934                 pitch_val = ffs(pitch_val) - 1;
2935
2936                 val = obj->gtt_offset;
2937                 if (obj->tiling_mode == I915_TILING_Y)
2938                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2939                 val |= I830_FENCE_SIZE_BITS(size);
2940                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2941                 val |= I830_FENCE_REG_VALID;
2942         } else
2943                 val = 0;
2944
2945         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2946         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2947 }
2948
2949 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2950                                  struct drm_i915_gem_object *obj)
2951 {
2952         switch (INTEL_INFO(dev)->gen) {
2953         case 7:
2954         case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2955         case 5:
2956         case 4: i965_write_fence_reg(dev, reg, obj); break;
2957         case 3: i915_write_fence_reg(dev, reg, obj); break;
2958         case 2: i830_write_fence_reg(dev, reg, obj); break;
2959         default: break;
2960         }
2961 }
2962
2963 static inline int fence_number(struct drm_i915_private *dev_priv,
2964                                struct drm_i915_fence_reg *fence)
2965 {
2966         return fence - dev_priv->fence_regs;
2967 }
2968
2969 static void i915_gem_write_fence__ipi(void *data)
2970 {
2971         wbinvd();
2972 }
2973
2974 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2975                                          struct drm_i915_fence_reg *fence,
2976                                          bool enable)
2977 {
2978         struct drm_device *dev = obj->base.dev;
2979         struct drm_i915_private *dev_priv = dev->dev_private;
2980         int fence_reg = fence_number(dev_priv, fence);
2981
2982         /* In order to fully serialize access to the fenced region and
2983          * the update to the fence register we need to take extreme
2984          * measures on SNB+. In theory, the write to the fence register
2985          * flushes all memory transactions before, and coupled with the
2986          * mb() placed around the register write we serialise all memory
2987          * operations with respect to the changes in the tiler. Yet, on
2988          * SNB+ we need to take a step further and emit an explicit wbinvd()
2989          * on each processor in order to manually flush all memory
2990          * transactions before updating the fence register.
2991          */
2992         if (HAS_LLC(obj->base.dev))
2993                 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2994         i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2995
2996         if (enable) {
2997                 obj->fence_reg = fence_reg;
2998                 fence->obj = obj;
2999                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3000         } else {
3001                 obj->fence_reg = I915_FENCE_REG_NONE;
3002                 fence->obj = NULL;
3003                 list_del_init(&fence->lru_list);
3004         }
3005 }
3006
3007 static int
3008 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
3009 {
3010         if (obj->last_fenced_seqno) {
3011                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3012                 if (ret)
3013                         return ret;
3014
3015                 obj->last_fenced_seqno = 0;
3016         }
3017
3018         /* Ensure that all CPU reads are completed before installing a fence
3019          * and all writes before removing the fence.
3020          */
3021         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
3022                 mb();
3023
3024         obj->fenced_gpu_access = false;
3025         return 0;
3026 }
3027
3028 int
3029 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3030 {
3031         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3032         int ret;
3033
3034         ret = i915_gem_object_flush_fence(obj);
3035         if (ret)
3036                 return ret;
3037
3038         if (obj->fence_reg == I915_FENCE_REG_NONE)
3039                 return 0;
3040
3041         i915_gem_object_update_fence(obj,
3042                                      &dev_priv->fence_regs[obj->fence_reg],
3043                                      false);
3044         i915_gem_object_fence_lost(obj);
3045
3046         return 0;
3047 }
3048
3049 static struct drm_i915_fence_reg *
3050 i915_find_fence_reg(struct drm_device *dev)
3051 {
3052         struct drm_i915_private *dev_priv = dev->dev_private;
3053         struct drm_i915_fence_reg *reg, *avail;
3054         int i;
3055
3056         /* First try to find a free reg */
3057         avail = NULL;
3058         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3059                 reg = &dev_priv->fence_regs[i];
3060                 if (!reg->obj)
3061                         return reg;
3062
3063                 if (!reg->pin_count)
3064                         avail = reg;
3065         }
3066
3067         if (avail == NULL)
3068                 return NULL;
3069
3070         /* None available, try to steal one or wait for a user to finish */
3071         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3072                 if (reg->pin_count)
3073                         continue;
3074
3075                 return reg;
3076         }
3077
3078         return NULL;
3079 }
3080
3081 /**
3082  * i915_gem_object_get_fence - set up fencing for an object
3083  * @obj: object to map through a fence reg
3084  *
3085  * When mapping objects through the GTT, userspace wants to be able to write
3086  * to them without having to worry about swizzling if the object is tiled.
3087  * This function walks the fence regs looking for a free one for @obj,
3088  * stealing one if it can't find any.
3089  *
3090  * It then sets up the reg based on the object's properties: address, pitch
3091  * and tiling format.
3092  *
3093  * For an untiled surface, this removes any existing fence.
3094  */
3095 int
3096 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3097 {
3098         struct drm_device *dev = obj->base.dev;
3099         struct drm_i915_private *dev_priv = dev->dev_private;
3100         bool enable = obj->tiling_mode != I915_TILING_NONE;
3101         struct drm_i915_fence_reg *reg;
3102         int ret;
3103
3104         /* Have we updated the tiling parameters upon the object and so
3105          * will need to serialise the write to the associated fence register?
3106          */
3107         if (obj->fence_dirty) {
3108                 ret = i915_gem_object_flush_fence(obj);
3109                 if (ret)
3110                         return ret;
3111         }
3112
3113         /* Just update our place in the LRU if our fence is getting reused. */
3114         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3115                 reg = &dev_priv->fence_regs[obj->fence_reg];
3116                 if (!obj->fence_dirty) {
3117                         list_move_tail(&reg->lru_list,
3118                                        &dev_priv->mm.fence_list);
3119                         return 0;
3120                 }
3121         } else if (enable) {
3122                 reg = i915_find_fence_reg(dev);
3123                 if (reg == NULL)
3124                         return -EDEADLK;
3125
3126                 if (reg->obj) {
3127                         struct drm_i915_gem_object *old = reg->obj;
3128
3129                         ret = i915_gem_object_flush_fence(old);
3130                         if (ret)
3131                                 return ret;
3132
3133                         i915_gem_object_fence_lost(old);
3134                 }
3135         } else
3136                 return 0;
3137
3138         i915_gem_object_update_fence(obj, reg, enable);
3139         obj->fence_dirty = false;
3140
3141         return 0;
3142 }
3143
3144 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3145                                      struct drm_mm_node *gtt_space,
3146                                      unsigned long cache_level)
3147 {
3148         struct drm_mm_node *other;
3149
3150         /* On non-LLC machines we have to be careful when putting differing
3151          * types of snoopable memory together to avoid the prefetcher
3152          * crossing memory domains and dying.
3153          */
3154         if (HAS_LLC(dev))
3155                 return true;
3156
3157         if (gtt_space == NULL)
3158                 return true;
3159
3160         if (list_empty(&gtt_space->node_list))
3161                 return true;
3162
3163         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3164         if (other->allocated && !other->hole_follows && other->color != cache_level)
3165                 return false;
3166
3167         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3168         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3169                 return false;
3170
3171         return true;
3172 }
3173
3174 static void i915_gem_verify_gtt(struct drm_device *dev)
3175 {
3176 #if WATCH_GTT
3177         struct drm_i915_private *dev_priv = dev->dev_private;
3178         struct drm_i915_gem_object *obj;
3179         int err = 0;
3180
3181         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
3182                 if (obj->gtt_space == NULL) {
3183                         DRM_ERROR("object found on GTT list with no space reserved\n");
3184                         err++;
3185                         continue;
3186                 }
3187
3188                 if (obj->cache_level != obj->gtt_space->color) {
3189                         DRM_ERROR("object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3190                                obj->gtt_space->start,
3191                                obj->gtt_space->start + obj->gtt_space->size,
3192                                obj->cache_level,
3193                                obj->gtt_space->color);
3194                         err++;
3195                         continue;
3196                 }
3197
3198                 if (!i915_gem_valid_gtt_space(dev,
3199                                               obj->gtt_space,
3200                                               obj->cache_level)) {
3201                         DRM_ERROR("invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3202                                obj->gtt_space->start,
3203                                obj->gtt_space->start + obj->gtt_space->size,
3204                                obj->cache_level);
3205                         err++;
3206                         continue;
3207                 }
3208         }
3209
3210         WARN_ON(err);
3211 #endif
3212 }
3213
3214 /**
3215  * Finds free space in the GTT aperture and binds the object there.
3216  */
3217 static int
3218 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3219                             unsigned alignment,
3220                             bool map_and_fenceable,
3221                             bool nonblocking)
3222 {
3223         struct drm_device *dev = obj->base.dev;
3224         drm_i915_private_t *dev_priv = dev->dev_private;
3225         struct drm_mm_node *node;
3226         u32 size, fence_size, fence_alignment, unfenced_alignment;
3227         bool mappable, fenceable;
3228         int ret;
3229
3230         if (obj->madv != I915_MADV_WILLNEED) {
3231                 DRM_ERROR("Attempting to bind a purgeable object\n");
3232                 return -EINVAL;
3233         }
3234
3235         fence_size = i915_gem_get_gtt_size(dev,
3236                                            obj->base.size,
3237                                            obj->tiling_mode);
3238         fence_alignment = i915_gem_get_gtt_alignment(dev,
3239                                                      obj->base.size,
3240                                                      obj->tiling_mode);
3241         unfenced_alignment =
3242                 i915_gem_get_unfenced_gtt_alignment(dev,
3243                                                     obj->base.size,
3244                                                     obj->tiling_mode);
3245
3246         if (alignment == 0)
3247                 alignment = map_and_fenceable ? fence_alignment :
3248                                                 unfenced_alignment;
3249         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3250                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3251                 return -EINVAL;
3252         }
3253
3254         size = map_and_fenceable ? fence_size : obj->base.size;
3255
3256         /* If the object is bigger than the entire aperture, reject it early
3257          * before evicting everything in a vain attempt to find space.
3258          */
3259         if (obj->base.size >
3260             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
3261                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
3262                 return -E2BIG;
3263         }
3264
3265         ret = i915_gem_object_get_pages(obj);
3266         if (ret)
3267                 return ret;
3268
3269         i915_gem_object_pin_pages(obj);
3270
3271         node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
3272         if (node == NULL) {
3273                 i915_gem_object_unpin_pages(obj);
3274                 return -ENOMEM;
3275         }
3276
3277  search_free:
3278         if (map_and_fenceable)
3279                 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
3280                                                           size, alignment, obj->cache_level,
3281                                                           0, dev_priv->mm.gtt_mappable_end);
3282         else
3283                 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
3284                                                  size, alignment, obj->cache_level);
3285         if (ret) {
3286                 ret = i915_gem_evict_something(dev, size, alignment,
3287                                                obj->cache_level,
3288                                                map_and_fenceable,
3289                                                nonblocking);
3290                 if (ret == 0)
3291                         goto search_free;
3292
3293                 i915_gem_object_unpin_pages(obj);
3294                 free(node, DRM_MEM_MM);
3295                 return ret;
3296         }
3297         if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
3298                 i915_gem_object_unpin_pages(obj);
3299                 drm_mm_put_block(node);
3300                 return -EINVAL;
3301         }
3302
3303         ret = i915_gem_gtt_prepare_object(obj);
3304         if (ret) {
3305                 i915_gem_object_unpin_pages(obj);
3306                 drm_mm_put_block(node);
3307                 return ret;
3308         }
3309
3310         list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
3311         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3312
3313         obj->gtt_space = node;
3314         obj->gtt_offset = node->start;
3315
3316         fenceable =
3317                 node->size == fence_size &&
3318                 (node->start & (fence_alignment - 1)) == 0;
3319
3320         mappable =
3321                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
3322
3323         obj->map_and_fenceable = mappable && fenceable;
3324
3325         i915_gem_object_unpin_pages(obj);
3326         CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
3327             obj->base.size, map_and_fenceable);
3328         i915_gem_verify_gtt(dev);
3329         return 0;
3330 }
3331
3332 void
3333 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3334 {
3335         /* If we don't have a page list set up, then we're not pinned
3336          * to GPU, and we can ignore the cache flush because it'll happen
3337          * again at bind time.
3338          */
3339         if (obj->pages == NULL)
3340                 return;
3341
3342         /* If the GPU is snooping the contents of the CPU cache,
3343          * we do not need to manually clear the CPU cache lines.  However,
3344          * the caches are only snooped when the render cache is
3345          * flushed/invalidated.  As we always have to emit invalidations
3346          * and flushes when moving into and out of the RENDER domain, correct
3347          * snooping behaviour occurs naturally as the result of our domain
3348          * tracking.
3349          */
3350         if (obj->cache_level != I915_CACHE_NONE)
3351                 return;
3352
3353         CTR1(KTR_DRM, "object_clflush %p", obj);
3354
3355         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3356 }
3357
3358 /** Flushes the GTT write domain for the object if it's dirty. */
3359 static void
3360 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3361 {
3362         uint32_t old_write_domain;
3363
3364         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3365                 return;
3366
3367         /* No actual flushing is required for the GTT write domain.  Writes
3368          * to it immediately go to main memory as far as we know, so there's
3369          * no chipset flush.  It also doesn't land in render cache.
3370          *
3371          * However, we do have to enforce the order so that all writes through
3372          * the GTT land before any writes to the device, such as updates to
3373          * the GATT itself.
3374          */
3375         wmb();
3376
3377         old_write_domain = obj->base.write_domain;
3378         obj->base.write_domain = 0;
3379
3380         CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj,
3381             obj->base.read_domains, old_write_domain);
3382 }
3383
3384 /** Flushes the CPU write domain for the object if it's dirty. */
3385 static void
3386 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3387 {
3388         uint32_t old_write_domain;
3389
3390         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3391                 return;
3392
3393         i915_gem_clflush_object(obj);
3394         i915_gem_chipset_flush(obj->base.dev);
3395         old_write_domain = obj->base.write_domain;
3396         obj->base.write_domain = 0;
3397
3398         CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj,
3399             obj->base.read_domains, old_write_domain);
3400 }
3401
3402 /**
3403  * Moves a single object to the GTT read, and possibly write domain.
3404  *
3405  * This function returns when the move is complete, including waiting on
3406  * flushes to occur.
3407  */
3408 int
3409 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3410 {
3411         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3412         uint32_t old_write_domain, old_read_domains;
3413         int ret;
3414
3415         /* Not valid to be called on unbound objects. */
3416         if (obj->gtt_space == NULL)
3417                 return -EINVAL;
3418
3419         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3420                 return 0;
3421
3422         ret = i915_gem_object_wait_rendering(obj, !write);
3423         if (ret)
3424                 return ret;
3425
3426         i915_gem_object_flush_cpu_write_domain(obj);
3427
3428         old_write_domain = obj->base.write_domain;
3429         old_read_domains = obj->base.read_domains;
3430
3431         /* It should now be out of any other write domains, and we can update
3432          * the domain values for our changes.
3433          */
3434         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3435         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3436         if (write) {
3437                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3438                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3439                 obj->dirty = 1;
3440         }
3441
3442         CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
3443             old_read_domains, old_write_domain);
3444
3445         /* And bump the LRU for this access */
3446         if (i915_gem_object_is_inactive(obj))
3447                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3448
3449         return 0;
3450 }
3451
3452 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3453                                     enum i915_cache_level cache_level)
3454 {
3455         struct drm_device *dev = obj->base.dev;
3456         drm_i915_private_t *dev_priv = dev->dev_private;
3457         int ret;
3458
3459         if (obj->cache_level == cache_level)
3460                 return 0;
3461
3462         if (obj->pin_count) {
3463                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3464                 return -EBUSY;
3465         }
3466
3467         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3468                 ret = i915_gem_object_unbind(obj);
3469                 if (ret)
3470                         return ret;
3471         }
3472
3473         if (obj->gtt_space) {
3474                 ret = i915_gem_object_finish_gpu(obj);
3475                 if (ret)
3476                         return ret;
3477
3478                 i915_gem_object_finish_gtt(obj);
3479
3480                 /* Before SandyBridge, you could not use tiling or fence
3481                  * registers with snooped memory, so relinquish any fences
3482                  * currently pointing to our region in the aperture.
3483                  */
3484                 if (INTEL_INFO(dev)->gen < 6) {
3485                         ret = i915_gem_object_put_fence(obj);
3486                         if (ret)
3487                                 return ret;
3488                 }
3489
3490                 if (obj->has_global_gtt_mapping)
3491                         i915_gem_gtt_bind_object(obj, cache_level);
3492                 if (obj->has_aliasing_ppgtt_mapping)
3493                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3494                                                obj, cache_level);
3495
3496                 obj->gtt_space->color = cache_level;
3497         }
3498
3499         if (cache_level == I915_CACHE_NONE) {
3500                 u32 old_read_domains, old_write_domain;
3501
3502                 /* If we're coming from LLC cached, then we haven't
3503                  * actually been tracking whether the data is in the
3504                  * CPU cache or not, since we only allow one bit set
3505                  * in obj->write_domain and have been skipping the clflushes.
3506                  * Just set it to the CPU cache for now.
3507                  */
3508                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3509                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3510
3511                 old_read_domains = obj->base.read_domains;
3512                 old_write_domain = obj->base.write_domain;
3513
3514                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3515                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3516
3517                 CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x",
3518                     obj, old_read_domains, old_write_domain);
3519         }
3520
3521         obj->cache_level = cache_level;
3522         i915_gem_verify_gtt(dev);
3523         return 0;
3524 }
3525
3526 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3527                                struct drm_file *file)
3528 {
3529         struct drm_i915_gem_caching *args = data;
3530         struct drm_i915_gem_object *obj;
3531         int ret;
3532
3533         ret = i915_mutex_lock_interruptible(dev);
3534         if (ret)
3535                 return ret;
3536
3537         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3538         if (&obj->base == NULL) {
3539                 ret = -ENOENT;
3540                 goto unlock;
3541         }
3542
3543         args->caching = obj->cache_level != I915_CACHE_NONE;
3544
3545         drm_gem_object_unreference(&obj->base);
3546 unlock:
3547         DRM_UNLOCK(dev);
3548         return ret;
3549 }
3550
3551 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3552                                struct drm_file *file)
3553 {
3554         struct drm_i915_gem_caching *args = data;
3555         struct drm_i915_gem_object *obj;
3556         enum i915_cache_level level;
3557         int ret;
3558
3559         switch (args->caching) {
3560         case I915_CACHING_NONE:
3561                 level = I915_CACHE_NONE;
3562                 break;
3563         case I915_CACHING_CACHED:
3564                 level = I915_CACHE_LLC;
3565                 break;
3566         default:
3567                 return -EINVAL;
3568         }
3569
3570         ret = i915_mutex_lock_interruptible(dev);
3571         if (ret)
3572                 return ret;
3573
3574         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3575         if (&obj->base == NULL) {
3576                 ret = -ENOENT;
3577                 goto unlock;
3578         }
3579
3580         ret = i915_gem_object_set_cache_level(obj, level);
3581
3582         drm_gem_object_unreference(&obj->base);
3583 unlock:
3584         DRM_UNLOCK(dev);
3585         return ret;
3586 }
3587
3588 static bool is_pin_display(struct drm_i915_gem_object *obj)
3589 {
3590         /* There are 3 sources that pin objects:
3591          *   1. The display engine (scanouts, sprites, cursors);
3592          *   2. Reservations for execbuffer;
3593          *   3. The user.
3594          *
3595          * We can ignore reservations as we hold the struct_mutex and
3596          * are only called outside of the reservation path.  The user
3597          * can only increment pin_count once, and so if after
3598          * subtracting the potential reference by the user, any pin_count
3599          * remains, it must be due to another use by the display engine.
3600          */
3601         return obj->pin_count - !!obj->user_pin_count;
3602 }
3603
3604 /*
3605  * Prepare buffer for display plane (scanout, cursors, etc).
3606  * Can be called from an uninterruptible phase (modesetting) and allows
3607  * any flushes to be pipelined (for pageflips).
3608  */
3609 int
3610 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3611                                      u32 alignment,
3612                                      struct intel_ring_buffer *pipelined)
3613 {
3614         u32 old_read_domains, old_write_domain;
3615         int ret;
3616
3617         if (pipelined != obj->ring) {
3618                 ret = i915_gem_object_sync(obj, pipelined);
3619                 if (ret)
3620                         return ret;
3621         }
3622
3623         /* Mark the pin_display early so that we account for the
3624          * display coherency whilst setting up the cache domains.
3625          */
3626         obj->pin_display = true;
3627
3628         /* The display engine is not coherent with the LLC cache on gen6.  As
3629          * a result, we make sure that the pinning that is about to occur is
3630          * done with uncached PTEs. This is lowest common denominator for all
3631          * chipsets.
3632          *
3633          * However for gen6+, we could do better by using the GFDT bit instead
3634          * of uncaching, which would allow us to flush all the LLC-cached data
3635          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3636          */
3637         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3638         if (ret)
3639                 goto err_unpin_display;
3640
3641         /* As the user may map the buffer once pinned in the display plane
3642          * (e.g. libkms for the bootup splash), we have to ensure that we
3643          * always use map_and_fenceable for all scanout buffers.
3644          */
3645         ret = i915_gem_object_pin(obj, alignment, true, false);
3646         if (ret)
3647                 goto err_unpin_display;
3648
3649         i915_gem_object_flush_cpu_write_domain(obj);
3650
3651         old_write_domain = obj->base.write_domain;
3652         old_read_domains = obj->base.read_domains;
3653
3654         /* It should now be out of any other write domains, and we can update
3655          * the domain values for our changes.
3656          */
3657         obj->base.write_domain = 0;
3658         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3659
3660         CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
3661             obj, old_read_domains, old_write_domain);
3662
3663         return 0;
3664
3665 err_unpin_display:
3666         obj->pin_display = is_pin_display(obj);
3667         return ret;
3668 }
3669
3670 void
3671 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3672 {
3673         i915_gem_object_unpin(obj);
3674         obj->pin_display = is_pin_display(obj);
3675 }
3676
3677 int
3678 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3679 {
3680         int ret;
3681
3682         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3683                 return 0;
3684
3685         ret = i915_gem_object_wait_rendering(obj, false);
3686         if (ret)
3687                 return ret;
3688
3689         /* Ensure that we invalidate the GPU's caches and TLBs. */
3690         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3691         return 0;
3692 }
3693
3694 /**
3695  * Moves a single object to the CPU read, and possibly write domain.
3696  *
3697  * This function returns when the move is complete, including waiting on
3698  * flushes to occur.
3699  */
3700 int
3701 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3702 {
3703         uint32_t old_write_domain, old_read_domains;
3704         int ret;
3705
3706         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3707                 return 0;
3708
3709         ret = i915_gem_object_wait_rendering(obj, !write);
3710         if (ret)
3711                 return ret;
3712
3713         i915_gem_object_flush_gtt_write_domain(obj);
3714
3715         old_write_domain = obj->base.write_domain;
3716         old_read_domains = obj->base.read_domains;
3717
3718         /* Flush the CPU cache if it's still invalid. */
3719         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3720                 i915_gem_clflush_object(obj);
3721
3722                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3723         }
3724
3725         /* It should now be out of any other write domains, and we can update
3726          * the domain values for our changes.
3727          */
3728         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3729
3730         /* If we're writing through the CPU, then the GPU read domains will
3731          * need to be invalidated at next use.
3732          */
3733         if (write) {
3734                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3735                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3736         }
3737
3738         CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj,
3739             old_read_domains, old_write_domain);
3740
3741         return 0;
3742 }
3743
3744 /* Throttle our rendering by waiting until the ring has completed our requests
3745  * emitted over 20 msec ago.
3746  *
3747  * Note that if we were to use the current jiffies each time around the loop,
3748  * we wouldn't escape the function with any frames outstanding if the time to
3749  * render a frame was over 20ms.
3750  *
3751  * This should get us reasonable parallelism between CPU and GPU but also
3752  * relatively low latency when blocking on a particular request to finish.
3753  */
3754 static int
3755 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3756 {
3757         struct drm_i915_private *dev_priv = dev->dev_private;
3758         struct drm_i915_file_private *file_priv = file->driver_priv;
3759         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3760         struct drm_i915_gem_request *request;
3761         struct intel_ring_buffer *ring = NULL;
3762         u32 seqno = 0;
3763         int ret;
3764
3765         if (atomic_read(&dev_priv->mm.wedged))
3766                 return -EIO;
3767
3768         mtx_lock(&file_priv->mm.lock);
3769         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3770                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3771                         break;
3772
3773                 ring = request->ring;
3774                 seqno = request->seqno;
3775         }
3776         mtx_unlock(&file_priv->mm.lock);
3777
3778         if (seqno == 0)
3779                 return 0;
3780
3781         ret = __wait_seqno(ring, seqno, true, NULL);
3782         if (ret == 0)
3783                 taskqueue_enqueue_timeout(dev_priv->wq,
3784                     &dev_priv->mm.retire_work, 0);
3785
3786         return ret;
3787 }
3788
3789 int
3790 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3791                     uint32_t alignment,
3792                     bool map_and_fenceable,
3793                     bool nonblocking)
3794 {
3795         int ret;
3796
3797         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3798                 return -EBUSY;
3799
3800         if (obj->gtt_space != NULL) {
3801                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3802                     (map_and_fenceable && !obj->map_and_fenceable)) {
3803                         WARN(obj->pin_count,
3804                              "bo is already pinned with incorrect alignment:"
3805                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3806                              " obj->map_and_fenceable=%d\n",
3807                              obj->gtt_offset, alignment,
3808                              map_and_fenceable,
3809                              obj->map_and_fenceable);
3810                         ret = i915_gem_object_unbind(obj);
3811                         if (ret)
3812                                 return ret;
3813                 }
3814         }
3815
3816         if (obj->gtt_space == NULL) {
3817                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3818
3819                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3820                                                   map_and_fenceable,
3821                                                   nonblocking);
3822                 if (ret)
3823                         return ret;
3824
3825                 if (!dev_priv->mm.aliasing_ppgtt)
3826                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3827         }
3828
3829         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3830                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3831
3832         obj->pin_count++;
3833         obj->pin_mappable |= map_and_fenceable;
3834
3835         return 0;
3836 }
3837
3838 void
3839 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3840 {
3841         BUG_ON(obj->pin_count == 0);
3842         BUG_ON(obj->gtt_space == NULL);
3843
3844         if (--obj->pin_count == 0)
3845                 obj->pin_mappable = false;
3846 }
3847
3848 int
3849 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3850                    struct drm_file *file)
3851 {
3852         struct drm_i915_gem_pin *args = data;
3853         struct drm_i915_gem_object *obj;
3854         int ret;
3855
3856         ret = i915_mutex_lock_interruptible(dev);
3857         if (ret)
3858                 return ret;
3859
3860         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3861         if (&obj->base == NULL) {
3862                 ret = -ENOENT;
3863                 goto unlock;
3864         }
3865
3866         if (obj->madv != I915_MADV_WILLNEED) {
3867                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3868                 ret = -EINVAL;
3869                 goto out;
3870         }
3871
3872         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3873                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3874                           args->handle);
3875                 ret = -EINVAL;
3876                 goto out;
3877         }
3878
3879         if (obj->user_pin_count == 0) {
3880                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3881                 if (ret)
3882                         goto out;
3883         }
3884
3885         obj->user_pin_count++;
3886         obj->pin_filp = file;
3887
3888         /* XXX - flush the CPU caches for pinned objects
3889          * as the X server doesn't manage domains yet
3890          */
3891         i915_gem_object_flush_cpu_write_domain(obj);
3892         args->offset = obj->gtt_offset;
3893 out:
3894         drm_gem_object_unreference(&obj->base);
3895 unlock:
3896         DRM_UNLOCK(dev);
3897         return ret;
3898 }
3899
3900 int
3901 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3902                      struct drm_file *file)
3903 {
3904         struct drm_i915_gem_pin *args = data;
3905         struct drm_i915_gem_object *obj;
3906         int ret;
3907
3908         ret = i915_mutex_lock_interruptible(dev);
3909         if (ret)
3910                 return ret;
3911
3912         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3913         if (&obj->base == NULL) {
3914                 ret = -ENOENT;
3915                 goto unlock;
3916         }
3917
3918         if (obj->pin_filp != file) {
3919                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3920                           args->handle);
3921                 ret = -EINVAL;
3922                 goto out;
3923         }
3924         obj->user_pin_count--;
3925         if (obj->user_pin_count == 0) {
3926                 obj->pin_filp = NULL;
3927                 i915_gem_object_unpin(obj);
3928         }
3929
3930 out:
3931         drm_gem_object_unreference(&obj->base);
3932 unlock:
3933         DRM_UNLOCK(dev);
3934         return ret;
3935 }
3936
3937 int
3938 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3939                     struct drm_file *file)
3940 {
3941         struct drm_i915_gem_busy *args = data;
3942         struct drm_i915_gem_object *obj;
3943         int ret;
3944
3945         ret = i915_mutex_lock_interruptible(dev);
3946         if (ret)
3947                 return ret;
3948
3949         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3950         if (&obj->base == NULL) {
3951                 ret = -ENOENT;
3952                 goto unlock;
3953         }
3954
3955         /* Count all active objects as busy, even if they are currently not used
3956          * by the gpu. Users of this interface expect objects to eventually
3957          * become non-busy without any further actions, therefore emit any
3958          * necessary flushes here.
3959          */
3960         ret = i915_gem_object_flush_active(obj);
3961
3962         args->busy = obj->active;
3963         if (obj->ring) {
3964                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3965                 args->busy |= intel_ring_flag(obj->ring) << 16;
3966         }
3967
3968         drm_gem_object_unreference(&obj->base);
3969 unlock:
3970         DRM_UNLOCK(dev);
3971         return ret;
3972 }
3973
3974 int
3975 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3976                         struct drm_file *file_priv)
3977 {
3978         return i915_gem_ring_throttle(dev, file_priv);
3979 }
3980
3981 int
3982 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3983                        struct drm_file *file_priv)
3984 {
3985         struct drm_i915_gem_madvise *args = data;
3986         struct drm_i915_gem_object *obj;
3987         int ret;
3988
3989         switch (args->madv) {
3990         case I915_MADV_DONTNEED:
3991         case I915_MADV_WILLNEED:
3992             break;
3993         default:
3994             return -EINVAL;
3995         }
3996
3997         ret = i915_mutex_lock_interruptible(dev);
3998         if (ret)
3999                 return ret;
4000
4001         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4002         if (&obj->base == NULL) {
4003                 ret = -ENOENT;
4004                 goto unlock;
4005         }
4006
4007         if (obj->pin_count) {
4008                 ret = -EINVAL;
4009                 goto out;
4010         }
4011
4012         if (obj->madv != __I915_MADV_PURGED)
4013                 obj->madv = args->madv;
4014
4015         /* if the object is no longer attached, discard its backing storage */
4016         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4017                 i915_gem_object_truncate(obj);
4018
4019         args->retained = obj->madv != __I915_MADV_PURGED;
4020
4021 out:
4022         drm_gem_object_unreference(&obj->base);
4023 unlock:
4024         DRM_UNLOCK(dev);
4025         return ret;
4026 }
4027
4028 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4029                           const struct drm_i915_gem_object_ops *ops)
4030 {
4031         INIT_LIST_HEAD(&obj->mm_list);
4032         INIT_LIST_HEAD(&obj->gtt_list);
4033         INIT_LIST_HEAD(&obj->ring_list);
4034         INIT_LIST_HEAD(&obj->exec_list);
4035
4036         obj->ops = ops;
4037
4038         obj->fence_reg = I915_FENCE_REG_NONE;
4039         obj->madv = I915_MADV_WILLNEED;
4040         /* Avoid an unnecessary call to unbind on the first bind. */
4041         obj->map_and_fenceable = true;
4042
4043         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4044 }
4045
4046 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4047         .get_pages = i915_gem_object_get_pages_gtt,
4048         .put_pages = i915_gem_object_put_pages_gtt,
4049 };
4050
4051 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4052                                                   size_t size)
4053 {
4054         struct drm_i915_gem_object *obj;
4055
4056         obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
4057         if (obj == NULL)
4058                 return NULL;
4059
4060         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4061                 free(obj, DRM_I915_GEM);
4062                 return NULL;
4063         }
4064
4065 #ifdef FREEBSD_WIP
4066         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4067         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4068                 /* 965gm cannot relocate objects above 4GiB. */
4069                 mask &= ~__GFP_HIGHMEM;
4070                 mask |= __GFP_DMA32;
4071         }
4072
4073         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4074         mapping_set_gfp_mask(mapping, mask);
4075 #endif /* FREEBSD_WIP */
4076
4077         i915_gem_object_init(obj, &i915_gem_object_ops);
4078
4079         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4080         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4081
4082         if (HAS_LLC(dev)) {
4083                 /* On some devices, we can have the GPU use the LLC (the CPU
4084                  * cache) for about a 10% performance improvement
4085                  * compared to uncached.  Graphics requests other than
4086                  * display scanout are coherent with the CPU in
4087                  * accessing this cache.  This means in this mode we
4088                  * don't need to clflush on the CPU side, and on the
4089                  * GPU side we only need to flush internal caches to
4090                  * get data visible to the CPU.
4091                  *
4092                  * However, we maintain the display planes as UC, and so
4093                  * need to rebind when first used as such.
4094                  */
4095                 obj->cache_level = I915_CACHE_LLC;
4096         } else
4097                 obj->cache_level = I915_CACHE_NONE;
4098
4099         return obj;
4100 }
4101
4102 int i915_gem_init_object(struct drm_gem_object *obj)
4103 {
4104         printf("i915_gem_init_object called\n");
4105
4106         return 0;
4107 }
4108
4109 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4110 {
4111         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4112         struct drm_device *dev = obj->base.dev;
4113         drm_i915_private_t *dev_priv = dev->dev_private;
4114
4115         CTR1(KTR_DRM, "object_destroy_tail %p", obj);
4116
4117         if (obj->phys_obj)
4118                 i915_gem_detach_phys_object(dev, obj);
4119
4120         obj->pin_count = 0;
4121         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
4122                 bool was_interruptible;
4123
4124                 was_interruptible = dev_priv->mm.interruptible;
4125                 dev_priv->mm.interruptible = false;
4126
4127                 WARN_ON(i915_gem_object_unbind(obj));
4128
4129                 dev_priv->mm.interruptible = was_interruptible;
4130         }
4131
4132         obj->pages_pin_count = 0;
4133         i915_gem_object_put_pages(obj);
4134         i915_gem_object_free_mmap_offset(obj);
4135
4136         BUG_ON(obj->pages);
4137
4138 #ifdef FREEBSD_WIP
4139         if (obj->base.import_attach)
4140                 drm_prime_gem_destroy(&obj->base, NULL);
4141 #endif /* FREEBSD_WIP */
4142
4143         drm_gem_object_release(&obj->base);
4144         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4145
4146         free(obj->bit_17, DRM_I915_GEM);
4147         free(obj, DRM_I915_GEM);
4148 }
4149
4150 int
4151 i915_gem_idle(struct drm_device *dev)
4152 {
4153         drm_i915_private_t *dev_priv = dev->dev_private;
4154         int ret;
4155
4156         DRM_LOCK(dev);
4157
4158         if (dev_priv->mm.suspended) {
4159                 DRM_UNLOCK(dev);
4160                 return 0;
4161         }
4162
4163         ret = i915_gpu_idle(dev);
4164         if (ret) {
4165                 DRM_UNLOCK(dev);
4166                 return ret;
4167         }
4168         i915_gem_retire_requests(dev);
4169
4170         /* Under UMS, be paranoid and evict. */
4171         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4172                 i915_gem_evict_everything(dev);
4173
4174         i915_gem_reset_fences(dev);
4175
4176         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4177          * We need to replace this with a semaphore, or something.
4178          * And not confound mm.suspended!
4179          */
4180         dev_priv->mm.suspended = 1;
4181         callout_stop(&dev_priv->hangcheck_timer);
4182
4183         i915_kernel_lost_context(dev);
4184         i915_gem_cleanup_ringbuffer(dev);
4185
4186         DRM_UNLOCK(dev);
4187
4188         /* Cancel the retire work handler, which should be idle now. */
4189         taskqueue_cancel_timeout(dev_priv->wq, &dev_priv->mm.retire_work, NULL);
4190
4191         return 0;
4192 }
4193
4194 void i915_gem_l3_remap(struct drm_device *dev)
4195 {
4196         drm_i915_private_t *dev_priv = dev->dev_private;
4197         u32 misccpctl;
4198         int i;
4199
4200         if (!HAS_L3_GPU_CACHE(dev))
4201                 return;
4202
4203         if (!dev_priv->l3_parity.remap_info)
4204                 return;
4205
4206         misccpctl = I915_READ(GEN7_MISCCPCTL);
4207         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4208         POSTING_READ(GEN7_MISCCPCTL);
4209
4210         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4211                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4212                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4213                         DRM_DEBUG("0x%x was already programmed to %x\n",
4214                                   GEN7_L3LOG_BASE + i, remap);
4215                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4216                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4217                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4218         }
4219
4220         /* Make sure all the writes land before disabling dop clock gating */
4221         POSTING_READ(GEN7_L3LOG_BASE);
4222
4223         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4224 }
4225
4226 void i915_gem_init_swizzling(struct drm_device *dev)
4227 {
4228         drm_i915_private_t *dev_priv = dev->dev_private;
4229
4230         if (INTEL_INFO(dev)->gen < 5 ||
4231             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4232                 return;
4233
4234         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4235                                  DISP_TILE_SURFACE_SWIZZLING);
4236
4237         if (IS_GEN5(dev))
4238                 return;
4239
4240         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4241         if (IS_GEN6(dev))
4242                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4243         else
4244                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4245 }
4246
4247 static bool
4248 intel_enable_blt(struct drm_device *dev)
4249 {
4250         if (!HAS_BLT(dev))
4251                 return false;
4252
4253         /* The blitter was dysfunctional on early prototypes */
4254         if (IS_GEN6(dev) && pci_get_revid(dev->dev) < 8) {
4255                 DRM_INFO("BLT not supported on this pre-production hardware;"
4256                          " graphics performance will be degraded.\n");
4257                 return false;
4258         }
4259
4260         return true;
4261 }
4262
4263 int
4264 i915_gem_init_hw(struct drm_device *dev)
4265 {
4266         drm_i915_private_t *dev_priv = dev->dev_private;
4267         int ret;
4268
4269 #ifdef FREEBSD_WIP
4270         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4271                 return -EIO;
4272 #endif /* FREEBSD_WIP */
4273
4274         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4275                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4276
4277         i915_gem_l3_remap(dev);
4278
4279         i915_gem_init_swizzling(dev);
4280
4281         ret = intel_init_render_ring_buffer(dev);
4282         if (ret)
4283                 return ret;
4284
4285         if (HAS_BSD(dev)) {
4286                 ret = intel_init_bsd_ring_buffer(dev);
4287                 if (ret)
4288                         goto cleanup_render_ring;
4289         }
4290
4291         if (intel_enable_blt(dev)) {
4292                 ret = intel_init_blt_ring_buffer(dev);
4293                 if (ret)
4294                         goto cleanup_bsd_ring;
4295         }
4296
4297         dev_priv->next_seqno = 1;
4298
4299         /*
4300          * XXX: There was some w/a described somewhere suggesting loading
4301          * contexts before PPGTT.
4302          */
4303         i915_gem_context_init(dev);
4304         i915_gem_init_ppgtt(dev);
4305
4306         return 0;
4307
4308 cleanup_bsd_ring:
4309         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4310 cleanup_render_ring:
4311         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4312         return ret;
4313 }
4314
4315 static bool
4316 intel_enable_ppgtt(struct drm_device *dev)
4317 {
4318         if (i915_enable_ppgtt >= 0)
4319                 return i915_enable_ppgtt;
4320
4321 #ifdef CONFIG_INTEL_IOMMU
4322         /* Disable ppgtt on SNB if VT-d is on. */
4323         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
4324                 return false;
4325 #endif
4326
4327         return true;
4328 }
4329
4330 int i915_gem_init(struct drm_device *dev)
4331 {
4332         struct drm_i915_private *dev_priv = dev->dev_private;
4333         unsigned long gtt_size, mappable_size;
4334         int ret;
4335
4336         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
4337         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
4338
4339         DRM_LOCK(dev);
4340         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
4341                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
4342                  * aperture accordingly when using aliasing ppgtt. */
4343                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
4344
4345                 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
4346
4347                 ret = i915_gem_init_aliasing_ppgtt(dev);
4348                 if (ret) {
4349                         DRM_UNLOCK(dev);
4350                         return ret;
4351                 }
4352         } else {
4353                 /* Let GEM Manage all of the aperture.
4354                  *
4355                  * However, leave one page at the end still bound to the scratch
4356                  * page.  There are a number of places where the hardware
4357                  * apparently prefetches past the end of the object, and we've
4358                  * seen multiple hangs with the GPU head pointer stuck in a
4359                  * batchbuffer bound at the last page of the aperture.  One page
4360                  * should be enough to keep any prefetching inside of the
4361                  * aperture.
4362                  */
4363                 i915_gem_init_global_gtt(dev, 0, mappable_size,
4364                                          gtt_size);
4365         }
4366
4367         ret = i915_gem_init_hw(dev);
4368         DRM_UNLOCK(dev);
4369         if (ret) {
4370                 i915_gem_cleanup_aliasing_ppgtt(dev);
4371                 return ret;
4372         }
4373
4374         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4375         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4376                 dev_priv->dri1.allow_batchbuffer = 1;
4377         return 0;
4378 }
4379
4380 void
4381 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4382 {
4383         drm_i915_private_t *dev_priv = dev->dev_private;
4384         struct intel_ring_buffer *ring;
4385         int i;
4386
4387         for_each_ring(ring, dev_priv, i)
4388                 intel_cleanup_ring_buffer(ring);
4389 }
4390
4391 int
4392 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4393                        struct drm_file *file_priv)
4394 {
4395         drm_i915_private_t *dev_priv = dev->dev_private;
4396         int ret;
4397
4398         if (drm_core_check_feature(dev, DRIVER_MODESET))
4399                 return 0;
4400
4401         if (atomic_read(&dev_priv->mm.wedged)) {
4402                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4403                 atomic_set(&dev_priv->mm.wedged, 0);
4404         }
4405
4406         DRM_LOCK(dev);
4407         dev_priv->mm.suspended = 0;
4408
4409         ret = i915_gem_init_hw(dev);
4410         if (ret != 0) {
4411                 DRM_UNLOCK(dev);
4412                 return ret;
4413         }
4414
4415         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4416         DRM_UNLOCK(dev);
4417
4418         ret = drm_irq_install(dev);
4419         if (ret)
4420                 goto cleanup_ringbuffer;
4421
4422         return 0;
4423
4424 cleanup_ringbuffer:
4425         DRM_LOCK(dev);
4426         i915_gem_cleanup_ringbuffer(dev);
4427         dev_priv->mm.suspended = 1;
4428         DRM_UNLOCK(dev);
4429
4430         return ret;
4431 }
4432
4433 int
4434 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4435                        struct drm_file *file_priv)
4436 {
4437         if (drm_core_check_feature(dev, DRIVER_MODESET))
4438                 return 0;
4439
4440         drm_irq_uninstall(dev);
4441         return i915_gem_idle(dev);
4442 }
4443
4444 void
4445 i915_gem_lastclose(struct drm_device *dev)
4446 {
4447         int ret;
4448
4449         if (drm_core_check_feature(dev, DRIVER_MODESET))
4450                 return;
4451
4452         ret = i915_gem_idle(dev);
4453         if (ret)
4454                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4455 }
4456
4457 static void
4458 init_ring_lists(struct intel_ring_buffer *ring)
4459 {
4460         INIT_LIST_HEAD(&ring->active_list);
4461         INIT_LIST_HEAD(&ring->request_list);
4462 }
4463
4464 void
4465 i915_gem_load(struct drm_device *dev)
4466 {
4467         int i;
4468         drm_i915_private_t *dev_priv = dev->dev_private;
4469
4470         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4471         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4472         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4473         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4474         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4475         for (i = 0; i < I915_NUM_RINGS; i++)
4476                 init_ring_lists(&dev_priv->ring[i]);
4477         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4478                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4479         TIMEOUT_TASK_INIT(dev_priv->wq, &dev_priv->mm.retire_work, 0,
4480             i915_gem_retire_work_handler, dev_priv);
4481         init_completion(&dev_priv->error_completion);
4482
4483         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4484         if (IS_GEN3(dev)) {
4485                 I915_WRITE(MI_ARB_STATE,
4486                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4487         }
4488
4489         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4490
4491         /* Old X drivers will take 0-2 for front, back, depth buffers */
4492         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4493                 dev_priv->fence_reg_start = 3;
4494
4495         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4496                 dev_priv->num_fence_regs = 16;
4497         else
4498                 dev_priv->num_fence_regs = 8;
4499
4500         /* Initialize fence registers to zero */
4501         i915_gem_reset_fences(dev);
4502
4503         i915_gem_detect_bit_6_swizzle(dev);
4504         DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue);
4505
4506         dev_priv->mm.interruptible = true;
4507
4508         dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4509             i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
4510 }
4511
4512 /*
4513  * Create a physically contiguous memory object for this object
4514  * e.g. for cursor + overlay regs
4515  */
4516 static int i915_gem_init_phys_object(struct drm_device *dev,
4517                                      int id, int size, int align)
4518 {
4519         drm_i915_private_t *dev_priv = dev->dev_private;
4520         struct drm_i915_gem_phys_object *phys_obj;
4521         int ret;
4522
4523         if (dev_priv->mm.phys_objs[id - 1] || !size)
4524                 return 0;
4525
4526         phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object),
4527             DRM_I915_GEM, M_WAITOK | M_ZERO);
4528         if (!phys_obj)
4529                 return -ENOMEM;
4530
4531         phys_obj->id = id;
4532
4533         phys_obj->handle = drm_pci_alloc(dev, size, align, BUS_SPACE_MAXADDR);
4534         if (!phys_obj->handle) {
4535                 ret = -ENOMEM;
4536                 goto kfree_obj;
4537         }
4538 #ifdef CONFIG_X86
4539         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4540             size / PAGE_SIZE, PAT_WRITE_COMBINING);
4541 #endif
4542
4543         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4544
4545         return 0;
4546 kfree_obj:
4547         free(phys_obj, DRM_I915_GEM);
4548         return ret;
4549 }
4550
4551 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4552 {
4553         drm_i915_private_t *dev_priv = dev->dev_private;
4554         struct drm_i915_gem_phys_object *phys_obj;
4555
4556         if (!dev_priv->mm.phys_objs[id - 1])
4557                 return;
4558
4559         phys_obj = dev_priv->mm.phys_objs[id - 1];
4560         if (phys_obj->cur_obj) {
4561                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4562         }
4563
4564 #ifdef FREEBSD_WIP
4565 #ifdef CONFIG_X86
4566         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4567 #endif
4568 #endif /* FREEBSD_WIP */
4569
4570         drm_pci_free(dev, phys_obj->handle);
4571         free(phys_obj, DRM_I915_GEM);
4572         dev_priv->mm.phys_objs[id - 1] = NULL;
4573 }
4574
4575 void i915_gem_free_all_phys_object(struct drm_device *dev)
4576 {
4577         int i;
4578
4579         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4580                 i915_gem_free_phys_object(dev, i);
4581 }
4582
4583 void i915_gem_detach_phys_object(struct drm_device *dev,
4584                                  struct drm_i915_gem_object *obj)
4585 {
4586         struct sf_buf *sf;
4587         char *vaddr;
4588         char *dst;
4589         int i;
4590         int page_count;
4591
4592         if (!obj->phys_obj)
4593                 return;
4594         vaddr = obj->phys_obj->handle->vaddr;
4595
4596         page_count = obj->base.size / PAGE_SIZE;
4597         VM_OBJECT_WLOCK(obj->base.vm_obj);
4598         for (i = 0; i < page_count; i++) {
4599                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4600                 if (page == NULL)
4601                         continue; /* XXX */
4602
4603                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4604                 sf = sf_buf_alloc(page, 0);
4605                 if (sf != NULL) {
4606                         dst = (char *)sf_buf_kva(sf);
4607                         memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
4608                         sf_buf_free(sf);
4609                 }
4610                 drm_clflush_pages(&page, 1);
4611
4612                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4613                 vm_page_reference(page);
4614                 vm_page_lock(page);
4615                 vm_page_dirty(page);
4616                 vm_page_unwire(page, PQ_INACTIVE);
4617                 vm_page_unlock(page);
4618                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4619         }
4620         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4621         i915_gem_chipset_flush(dev);
4622
4623         obj->phys_obj->cur_obj = NULL;
4624         obj->phys_obj = NULL;
4625 }
4626
4627 int
4628 i915_gem_attach_phys_object(struct drm_device *dev,
4629                             struct drm_i915_gem_object *obj,
4630                             int id,
4631                             int align)
4632 {
4633         drm_i915_private_t *dev_priv = dev->dev_private;
4634         struct sf_buf *sf;
4635         char *dst, *src;
4636         int ret = 0;
4637         int page_count;
4638         int i;
4639
4640         if (id > I915_MAX_PHYS_OBJECT)
4641                 return -EINVAL;
4642
4643         if (obj->phys_obj) {
4644                 if (obj->phys_obj->id == id)
4645                         return 0;
4646                 i915_gem_detach_phys_object(dev, obj);
4647         }
4648
4649         /* create a new object */
4650         if (!dev_priv->mm.phys_objs[id - 1]) {
4651                 ret = i915_gem_init_phys_object(dev, id,
4652                                                 obj->base.size, align);
4653                 if (ret) {
4654                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4655                                   id, obj->base.size);
4656                         return ret;
4657                 }
4658         }
4659
4660         /* bind to the object */
4661         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4662         obj->phys_obj->cur_obj = obj;
4663
4664         page_count = obj->base.size / PAGE_SIZE;
4665
4666         VM_OBJECT_WLOCK(obj->base.vm_obj);
4667         for (i = 0; i < page_count; i++) {
4668                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4669                 if (page == NULL) {
4670                         ret = -EIO;
4671                         break;
4672                 }
4673                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4674                 sf = sf_buf_alloc(page, 0);
4675                 src = (char *)sf_buf_kva(sf);
4676                 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
4677                 memcpy(dst, src, PAGE_SIZE);
4678                 sf_buf_free(sf);
4679
4680                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4681
4682                 vm_page_reference(page);
4683                 vm_page_lock(page);
4684                 vm_page_unwire(page, PQ_INACTIVE);
4685                 vm_page_unlock(page);
4686                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4687         }
4688         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4689
4690         return ret;
4691 }
4692
4693 static int
4694 i915_gem_phys_pwrite(struct drm_device *dev,
4695                      struct drm_i915_gem_object *obj,
4696                      struct drm_i915_gem_pwrite *args,
4697                      struct drm_file *file_priv)
4698 {
4699         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4700         char __user *user_data = to_user_ptr(args->data_ptr);
4701
4702         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4703                 unsigned long unwritten;
4704
4705                 /* The physical object once assigned is fixed for the lifetime
4706                  * of the obj, so we can safely drop the lock and continue
4707                  * to access vaddr.
4708                  */
4709                 DRM_UNLOCK(dev);
4710                 unwritten = copy_from_user(vaddr, user_data, args->size);
4711                 DRM_LOCK(dev);
4712                 if (unwritten)
4713                         return -EFAULT;
4714         }
4715
4716         i915_gem_chipset_flush(dev);
4717         return 0;
4718 }
4719
4720 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4721 {
4722         struct drm_i915_file_private *file_priv = file->driver_priv;
4723
4724         /* Clean up our request list when the client is going away, so that
4725          * later retire_requests won't dereference our soon-to-be-gone
4726          * file_priv.
4727          */
4728         mtx_lock(&file_priv->mm.lock);
4729         while (!list_empty(&file_priv->mm.request_list)) {
4730                 struct drm_i915_gem_request *request;
4731
4732                 request = list_first_entry(&file_priv->mm.request_list,
4733                                            struct drm_i915_gem_request,
4734                                            client_list);
4735                 list_del(&request->client_list);
4736                 request->file_priv = NULL;
4737         }
4738         mtx_unlock(&file_priv->mm.lock);
4739 }
4740
4741 static void
4742 i915_gem_inactive_shrink(void *arg)
4743 {
4744         struct drm_device *dev = arg;
4745         struct drm_i915_private *dev_priv = dev->dev_private;
4746         int pass1, pass2;
4747
4748         if (!sx_try_xlock(&dev->dev_struct_lock)) {
4749                 return;
4750         }
4751
4752         CTR0(KTR_DRM, "gem_lowmem");
4753
4754         pass1 = i915_gem_purge(dev_priv, -1);
4755         pass2 = __i915_gem_shrink(dev_priv, -1, false);
4756
4757         if (pass2 <= pass1 / 100)
4758                 i915_gem_shrink_all(dev_priv);
4759
4760         DRM_UNLOCK(dev);
4761 }
4762
4763 static vm_page_t
4764 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, bool *fresh)
4765 {
4766         vm_page_t page;
4767         int rv;
4768
4769         VM_OBJECT_ASSERT_WLOCKED(object);
4770         page = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
4771         if (page->valid != VM_PAGE_BITS_ALL) {
4772                 if (vm_pager_has_page(object, pindex, NULL, NULL)) {
4773                         rv = vm_pager_get_pages(object, &page, 1, NULL, NULL);
4774                         if (rv != VM_PAGER_OK) {
4775                                 vm_page_lock(page);
4776                                 vm_page_free(page);
4777                                 vm_page_unlock(page);
4778                                 return (NULL);
4779                         }
4780                         if (fresh != NULL)
4781                                 *fresh = true;
4782                 } else {
4783                         pmap_zero_page(page);
4784                         page->valid = VM_PAGE_BITS_ALL;
4785                         page->dirty = 0;
4786                         if (fresh != NULL)
4787                                 *fresh = false;
4788                 }
4789         } else if (fresh != NULL) {
4790                 *fresh = false;
4791         }
4792         vm_page_lock(page);
4793         vm_page_wire(page);
4794         vm_page_unlock(page);
4795         vm_page_xunbusy(page);
4796         atomic_add_long(&i915_gem_wired_pages_cnt, 1);
4797         return (page);
4798 }