]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/i915/i915_gem.c
Import libxo-0.9.0:
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
27  *
28  * Copyright (c) 2011 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * This software was developed by Konstantin Belousov under sponsorship from
32  * the FreeBSD Foundation.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD$");
58
59 #include <dev/drm2/drmP.h>
60 #include <dev/drm2/i915/i915_drm.h>
61 #include <dev/drm2/i915/i915_drv.h>
62 #include <dev/drm2/i915/intel_drv.h>
63
64 #include <sys/resourcevar.h>
65 #include <sys/sched.h>
66 #include <sys/sf_buf.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_pageout.h>
70
71 #include <machine/md_var.h>
72
73 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
74 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
75 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
76                                                     unsigned alignment,
77                                                     bool map_and_fenceable,
78                                                     bool nonblocking);
79 static int i915_gem_phys_pwrite(struct drm_device *dev,
80                                 struct drm_i915_gem_object *obj,
81                                 struct drm_i915_gem_pwrite *args,
82                                 struct drm_file *file);
83
84 static void i915_gem_write_fence(struct drm_device *dev, int reg,
85                                  struct drm_i915_gem_object *obj);
86 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
87                                          struct drm_i915_fence_reg *fence,
88                                          bool enable);
89
90 static void i915_gem_inactive_shrink(void *);
91 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
92 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
93 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
94
95 static int i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
96     off_t start, off_t end);
97
98 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex,
99     bool *fresh);
100
101 MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
102 long i915_gem_wired_pages_cnt;
103
104 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
105 {
106         if (obj->tiling_mode)
107                 i915_gem_release_mmap(obj);
108
109         /* As we do not have an associated fence register, we will force
110          * a tiling change if we ever need to acquire one.
111          */
112         obj->fence_dirty = false;
113         obj->fence_reg = I915_FENCE_REG_NONE;
114 }
115
116 /* some bookkeeping */
117 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
118                                   size_t size)
119 {
120         dev_priv->mm.object_count++;
121         dev_priv->mm.object_memory += size;
122 }
123
124 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
125                                      size_t size)
126 {
127         dev_priv->mm.object_count--;
128         dev_priv->mm.object_memory -= size;
129 }
130
131 static int
132 i915_gem_wait_for_error(struct drm_device *dev)
133 {
134         struct drm_i915_private *dev_priv = dev->dev_private;
135         struct completion *x = &dev_priv->error_completion;
136         int ret;
137
138         if (!atomic_read(&dev_priv->mm.wedged))
139                 return 0;
140
141         /*
142          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
143          * userspace. If it takes that long something really bad is going on and
144          * we should simply try to bail out and fail as gracefully as possible.
145          */
146         ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
147         if (ret == 0) {
148                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
149                 return -EIO;
150         } else if (ret < 0) {
151                 return ret;
152         }
153
154         if (atomic_read(&dev_priv->mm.wedged)) {
155                 /* GPU is hung, bump the completion count to account for
156                  * the token we just consumed so that we never hit zero and
157                  * end up waiting upon a subsequent completion event that
158                  * will never happen.
159                  */
160                 mtx_lock(&x->lock);
161                 x->done++;
162                 mtx_unlock(&x->lock);
163         }
164         return 0;
165 }
166
167 int i915_mutex_lock_interruptible(struct drm_device *dev)
168 {
169         int ret;
170
171         ret = i915_gem_wait_for_error(dev);
172         if (ret)
173                 return ret;
174
175         /*
176          * interruptible shall it be. might indeed be if dev_lock is
177          * changed to sx
178          */
179         ret = sx_xlock_sig(&dev->dev_struct_lock);
180         if (ret)
181                 return -EINTR;
182
183         WARN_ON(i915_verify_lists(dev));
184         return 0;
185 }
186
187 static inline bool
188 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
189 {
190         return obj->gtt_space && !obj->active;
191 }
192
193 int
194 i915_gem_init_ioctl(struct drm_device *dev, void *data,
195                     struct drm_file *file)
196 {
197         struct drm_i915_gem_init *args = data;
198
199         if (drm_core_check_feature(dev, DRIVER_MODESET))
200                 return -ENODEV;
201
202         if (args->gtt_start >= args->gtt_end ||
203             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
204                 return -EINVAL;
205
206         /* GEM with user mode setting was never supported on ilk and later. */
207         if (INTEL_INFO(dev)->gen >= 5)
208                 return -ENODEV;
209
210         /*
211          * XXXKIB. The second-time initialization should be guarded
212          * against.
213          */
214         DRM_LOCK(dev);
215         i915_gem_init_global_gtt(dev, args->gtt_start,
216                                  args->gtt_end, args->gtt_end);
217         DRM_UNLOCK(dev);
218
219         return 0;
220 }
221
222 int
223 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
224                             struct drm_file *file)
225 {
226         struct drm_i915_private *dev_priv = dev->dev_private;
227         struct drm_i915_gem_get_aperture *args = data;
228         struct drm_i915_gem_object *obj;
229         size_t pinned;
230
231         pinned = 0;
232         DRM_LOCK(dev);
233         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
234                 if (obj->pin_count)
235                         pinned += obj->gtt_space->size;
236         DRM_UNLOCK(dev);
237
238         args->aper_size = dev_priv->mm.gtt_total;
239         args->aper_available_size = args->aper_size - pinned;
240
241         return 0;
242 }
243
244 static int
245 i915_gem_create(struct drm_file *file,
246                 struct drm_device *dev,
247                 uint64_t size,
248                 uint32_t *handle_p)
249 {
250         struct drm_i915_gem_object *obj;
251         int ret;
252         u32 handle;
253
254         size = roundup(size, PAGE_SIZE);
255         if (size == 0)
256                 return -EINVAL;
257
258         /* Allocate the new object */
259         obj = i915_gem_alloc_object(dev, size);
260         if (obj == NULL)
261                 return -ENOMEM;
262
263         ret = drm_gem_handle_create(file, &obj->base, &handle);
264         if (ret) {
265                 drm_gem_object_release(&obj->base);
266                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
267                 free(obj, DRM_I915_GEM);
268                 return ret;
269         }
270
271         /* drop reference from allocate - handle holds it now */
272         drm_gem_object_unreference(&obj->base);
273         CTR2(KTR_DRM, "object_create %p %x", obj, size);
274
275         *handle_p = handle;
276         return 0;
277 }
278
279 int
280 i915_gem_dumb_create(struct drm_file *file,
281                      struct drm_device *dev,
282                      struct drm_mode_create_dumb *args)
283 {
284         /* have to work out size/pitch and return them */
285         args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
286         args->size = args->pitch * args->height;
287         return i915_gem_create(file, dev,
288                                args->size, &args->handle);
289 }
290
291 int i915_gem_dumb_destroy(struct drm_file *file,
292                           struct drm_device *dev,
293                           uint32_t handle)
294 {
295         return drm_gem_handle_delete(file, handle);
296 }
297
298 /**
299  * Creates a new mm object and returns a handle to it.
300  */
301 int
302 i915_gem_create_ioctl(struct drm_device *dev, void *data,
303                       struct drm_file *file)
304 {
305         struct drm_i915_gem_create *args = data;
306
307         return i915_gem_create(file, dev,
308                                args->size, &args->handle);
309 }
310
311 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
312 {
313         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
314
315         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
316                 obj->tiling_mode != I915_TILING_NONE;
317 }
318
319 static inline int
320 __copy_to_user_swizzled(char __user *cpu_vaddr,
321                         const char *gpu_vaddr, int gpu_offset,
322                         int length)
323 {
324         int ret, cpu_offset = 0;
325
326         while (length > 0) {
327                 int cacheline_end = roundup2(gpu_offset + 1, 64);
328                 int this_length = min(cacheline_end - gpu_offset, length);
329                 int swizzled_gpu_offset = gpu_offset ^ 64;
330
331                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
332                                      gpu_vaddr + swizzled_gpu_offset,
333                                      this_length);
334                 if (ret)
335                         return ret + length;
336
337                 cpu_offset += this_length;
338                 gpu_offset += this_length;
339                 length -= this_length;
340         }
341
342         return 0;
343 }
344
345 static inline int
346 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
347                           const char __user *cpu_vaddr,
348                           int length)
349 {
350         int ret, cpu_offset = 0;
351
352         while (length > 0) {
353                 int cacheline_end = roundup2(gpu_offset + 1, 64);
354                 int this_length = min(cacheline_end - gpu_offset, length);
355                 int swizzled_gpu_offset = gpu_offset ^ 64;
356
357                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
358                                        cpu_vaddr + cpu_offset,
359                                        this_length);
360                 if (ret)
361                         return ret + length;
362
363                 cpu_offset += this_length;
364                 gpu_offset += this_length;
365                 length -= this_length;
366         }
367
368         return 0;
369 }
370
371 /* Per-page copy function for the shmem pread fastpath.
372  * Flushes invalid cachelines before reading the target if
373  * needs_clflush is set. */
374 static int
375 shmem_pread_fast(vm_page_t page, int shmem_page_offset, int page_length,
376                  char __user *user_data,
377                  bool page_do_bit17_swizzling, bool needs_clflush)
378 {
379         char *vaddr;
380         struct sf_buf *sf;
381         int ret;
382
383         if (unlikely(page_do_bit17_swizzling))
384                 return -EINVAL;
385
386         sched_pin();
387         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
388         if (sf == NULL) {
389                 sched_unpin();
390                 return (-EFAULT);
391         }
392         vaddr = (char *)sf_buf_kva(sf);
393         if (needs_clflush)
394                 drm_clflush_virt_range(vaddr + shmem_page_offset,
395                                        page_length);
396         ret = __copy_to_user_inatomic(user_data,
397                                       vaddr + shmem_page_offset,
398                                       page_length);
399         sf_buf_free(sf);
400         sched_unpin();
401
402         return ret ? -EFAULT : 0;
403 }
404
405 static void
406 shmem_clflush_swizzled_range(char *addr, unsigned long length,
407                              bool swizzled)
408 {
409         if (unlikely(swizzled)) {
410                 unsigned long start = (unsigned long) addr;
411                 unsigned long end = (unsigned long) addr + length;
412
413                 /* For swizzling simply ensure that we always flush both
414                  * channels. Lame, but simple and it works. Swizzled
415                  * pwrite/pread is far from a hotpath - current userspace
416                  * doesn't use it at all. */
417                 start = round_down(start, 128);
418                 end = round_up(end, 128);
419
420                 drm_clflush_virt_range((void *)start, end - start);
421         } else {
422                 drm_clflush_virt_range(addr, length);
423         }
424
425 }
426
427 /* Only difference to the fast-path function is that this can handle bit17
428  * and uses non-atomic copy and kmap functions. */
429 static int
430 shmem_pread_slow(vm_page_t page, int shmem_page_offset, int page_length,
431                  char __user *user_data,
432                  bool page_do_bit17_swizzling, bool needs_clflush)
433 {
434         char *vaddr;
435         struct sf_buf *sf;
436         int ret;
437
438         sf = sf_buf_alloc(page, 0);
439         vaddr = (char *)sf_buf_kva(sf);
440         if (needs_clflush)
441                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
442                                              page_length,
443                                              page_do_bit17_swizzling);
444
445         if (page_do_bit17_swizzling)
446                 ret = __copy_to_user_swizzled(user_data,
447                                               vaddr, shmem_page_offset,
448                                               page_length);
449         else
450                 ret = __copy_to_user(user_data,
451                                      vaddr + shmem_page_offset,
452                                      page_length);
453         sf_buf_free(sf);
454
455         return ret ? - EFAULT : 0;
456 }
457
458 static int
459 i915_gem_shmem_pread(struct drm_device *dev,
460                      struct drm_i915_gem_object *obj,
461                      struct drm_i915_gem_pread *args,
462                      struct drm_file *file)
463 {
464         char __user *user_data;
465         ssize_t remain;
466         off_t offset;
467         int shmem_page_offset, page_length, ret = 0;
468         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
469         int hit_slowpath = 0;
470         int prefaulted = 0;
471         int needs_clflush = 0;
472
473         user_data = to_user_ptr(args->data_ptr);
474         remain = args->size;
475
476         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
477
478         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
479                 /* If we're not in the cpu read domain, set ourself into the gtt
480                  * read domain and manually flush cachelines (if required). This
481                  * optimizes for the case when the gpu will dirty the data
482                  * anyway again before the next pread happens. */
483                 if (obj->cache_level == I915_CACHE_NONE)
484                         needs_clflush = 1;
485                 if (obj->gtt_space) {
486                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
487                         if (ret)
488                                 return ret;
489                 }
490         }
491
492         ret = i915_gem_object_get_pages(obj);
493         if (ret)
494                 return ret;
495
496         i915_gem_object_pin_pages(obj);
497
498         offset = args->offset;
499
500         VM_OBJECT_WLOCK(obj->base.vm_obj);
501         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
502             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
503                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
504
505                 if (remain <= 0)
506                         break;
507
508                 /* Operation in this page
509                  *
510                  * shmem_page_offset = offset within page in shmem file
511                  * page_length = bytes to copy for this page
512                  */
513                 shmem_page_offset = offset_in_page(offset);
514                 page_length = remain;
515                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
516                         page_length = PAGE_SIZE - shmem_page_offset;
517
518                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
519                         (page_to_phys(page) & (1 << 17)) != 0;
520
521                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
522                                        user_data, page_do_bit17_swizzling,
523                                        needs_clflush);
524                 if (ret == 0)
525                         goto next_page;
526
527                 hit_slowpath = 1;
528                 DRM_UNLOCK(dev);
529
530                 if (!prefaulted) {
531                         ret = fault_in_multipages_writeable(user_data, remain);
532                         /* Userspace is tricking us, but we've already clobbered
533                          * its pages with the prefault and promised to write the
534                          * data up to the first fault. Hence ignore any errors
535                          * and just continue. */
536                         (void)ret;
537                         prefaulted = 1;
538                 }
539
540                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
541                                        user_data, page_do_bit17_swizzling,
542                                        needs_clflush);
543
544                 DRM_LOCK(dev);
545
546 next_page:
547                 vm_page_reference(page);
548
549                 if (ret)
550                         goto out;
551
552                 remain -= page_length;
553                 user_data += page_length;
554                 offset += page_length;
555                 VM_OBJECT_WLOCK(obj->base.vm_obj);
556         }
557
558 out:
559         i915_gem_object_unpin_pages(obj);
560
561         if (hit_slowpath) {
562                 /* Fixup: Kill any reinstated backing storage pages */
563                 if (obj->madv == __I915_MADV_PURGED)
564                         i915_gem_object_truncate(obj);
565         }
566
567         return ret;
568 }
569
570 /**
571  * Reads data from the object referenced by handle.
572  *
573  * On error, the contents of *data are undefined.
574  */
575 int
576 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
577                      struct drm_file *file)
578 {
579         struct drm_i915_gem_pread *args = data;
580         struct drm_i915_gem_object *obj;
581         int ret = 0;
582
583         if (args->size == 0)
584                 return 0;
585
586         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_WRITE))
587                 return -EFAULT;
588
589         ret = i915_mutex_lock_interruptible(dev);
590         if (ret)
591                 return ret;
592
593         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
594         if (&obj->base == NULL) {
595                 ret = -ENOENT;
596                 goto unlock;
597         }
598
599         /* Bounds check source.  */
600         if (args->offset > obj->base.size ||
601             args->size > obj->base.size - args->offset) {
602                 ret = -EINVAL;
603                 goto out;
604         }
605
606 #ifdef FREEBSD_WIP
607         /* prime objects have no backing filp to GEM pread/pwrite
608          * pages from.
609          */
610         if (!obj->base.filp) {
611                 ret = -EINVAL;
612                 goto out;
613         }
614 #endif /* FREEBSD_WIP */
615
616         CTR3(KTR_DRM, "pread %p %jx %jx", obj, args->offset, args->size);
617
618         ret = i915_gem_shmem_pread(dev, obj, args, file);
619
620 out:
621         drm_gem_object_unreference(&obj->base);
622 unlock:
623         DRM_UNLOCK(dev);
624         return ret;
625 }
626
627 /* This is the fast write path which cannot handle
628  * page faults in the source data
629  */
630
631 static inline int
632 fast_user_write(vm_paddr_t mapping_addr,
633                 off_t page_base, int page_offset,
634                 char __user *user_data,
635                 int length)
636 {
637         void __iomem *vaddr_atomic;
638         void *vaddr;
639         unsigned long unwritten;
640
641         vaddr_atomic = pmap_mapdev_attr(mapping_addr + page_base,
642             length, PAT_WRITE_COMBINING);
643         /* We can use the cpu mem copy function because this is X86. */
644         vaddr = (char __force*)vaddr_atomic + page_offset;
645         unwritten = __copy_from_user_inatomic_nocache(vaddr,
646                                                       user_data, length);
647         pmap_unmapdev((vm_offset_t)vaddr_atomic, length);
648         return unwritten;
649 }
650
651 /**
652  * This is the fast pwrite path, where we copy the data directly from the
653  * user into the GTT, uncached.
654  */
655 static int
656 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
657                          struct drm_i915_gem_object *obj,
658                          struct drm_i915_gem_pwrite *args,
659                          struct drm_file *file)
660 {
661         drm_i915_private_t *dev_priv = dev->dev_private;
662         ssize_t remain;
663         off_t offset, page_base;
664         char __user *user_data;
665         int page_offset, page_length, ret;
666
667         ret = i915_gem_object_pin(obj, 0, true, true);
668         if (ret)
669                 goto out;
670
671         ret = i915_gem_object_set_to_gtt_domain(obj, true);
672         if (ret)
673                 goto out_unpin;
674
675         ret = i915_gem_object_put_fence(obj);
676         if (ret)
677                 goto out_unpin;
678
679         user_data = to_user_ptr(args->data_ptr);
680         remain = args->size;
681
682         offset = obj->gtt_offset + args->offset;
683
684         while (remain > 0) {
685                 /* Operation in this page
686                  *
687                  * page_base = page offset within aperture
688                  * page_offset = offset within page
689                  * page_length = bytes to copy for this page
690                  */
691                 page_base = offset & ~PAGE_MASK;
692                 page_offset = offset_in_page(offset);
693                 page_length = remain;
694                 if ((page_offset + remain) > PAGE_SIZE)
695                         page_length = PAGE_SIZE - page_offset;
696
697                 /* If we get a fault while copying data, then (presumably) our
698                  * source page isn't available.  Return the error and we'll
699                  * retry in the slow path.
700                  */
701                 if (fast_user_write(dev_priv->mm.gtt_base_addr, page_base,
702                                     page_offset, user_data, page_length)) {
703                         ret = -EFAULT;
704                         goto out_unpin;
705                 }
706
707                 remain -= page_length;
708                 user_data += page_length;
709                 offset += page_length;
710         }
711
712 out_unpin:
713         i915_gem_object_unpin(obj);
714 out:
715         return ret;
716 }
717
718 /* Per-page copy function for the shmem pwrite fastpath.
719  * Flushes invalid cachelines before writing to the target if
720  * needs_clflush_before is set and flushes out any written cachelines after
721  * writing if needs_clflush is set. */
722 static int
723 shmem_pwrite_fast(vm_page_t page, int shmem_page_offset, int page_length,
724                   char __user *user_data,
725                   bool page_do_bit17_swizzling,
726                   bool needs_clflush_before,
727                   bool needs_clflush_after)
728 {
729         char *vaddr;
730         struct sf_buf *sf;
731         int ret;
732
733         if (unlikely(page_do_bit17_swizzling))
734                 return -EINVAL;
735
736         sched_pin();
737         sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
738         if (sf == NULL) {
739                 sched_unpin();
740                 return (-EFAULT);
741         }
742         vaddr = (char *)sf_buf_kva(sf);
743         if (needs_clflush_before)
744                 drm_clflush_virt_range(vaddr + shmem_page_offset,
745                                        page_length);
746         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
747                                                 user_data,
748                                                 page_length);
749         if (needs_clflush_after)
750                 drm_clflush_virt_range(vaddr + shmem_page_offset,
751                                        page_length);
752         sf_buf_free(sf);
753         sched_unpin();
754
755         return ret ? -EFAULT : 0;
756 }
757
758 /* Only difference to the fast-path function is that this can handle bit17
759  * and uses non-atomic copy and kmap functions. */
760 static int
761 shmem_pwrite_slow(vm_page_t page, int shmem_page_offset, int page_length,
762                   char __user *user_data,
763                   bool page_do_bit17_swizzling,
764                   bool needs_clflush_before,
765                   bool needs_clflush_after)
766 {
767         char *vaddr;
768         struct sf_buf *sf;
769         int ret;
770
771         sf = sf_buf_alloc(page, 0);
772         vaddr = (char *)sf_buf_kva(sf);
773         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
774                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
775                                              page_length,
776                                              page_do_bit17_swizzling);
777         if (page_do_bit17_swizzling)
778                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
779                                                 user_data,
780                                                 page_length);
781         else
782                 ret = __copy_from_user(vaddr + shmem_page_offset,
783                                        user_data,
784                                        page_length);
785         if (needs_clflush_after)
786                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
787                                              page_length,
788                                              page_do_bit17_swizzling);
789         sf_buf_free(sf);
790
791         return ret ? -EFAULT : 0;
792 }
793
794 static int
795 i915_gem_shmem_pwrite(struct drm_device *dev,
796                       struct drm_i915_gem_object *obj,
797                       struct drm_i915_gem_pwrite *args,
798                       struct drm_file *file)
799 {
800         ssize_t remain;
801         off_t offset;
802         char __user *user_data;
803         int shmem_page_offset, page_length, ret = 0;
804         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
805         int hit_slowpath = 0;
806         int needs_clflush_after = 0;
807         int needs_clflush_before = 0;
808
809         user_data = to_user_ptr(args->data_ptr);
810         remain = args->size;
811
812         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
813
814         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
815                 /* If we're not in the cpu write domain, set ourself into the gtt
816                  * write domain and manually flush cachelines (if required). This
817                  * optimizes for the case when the gpu will use the data
818                  * right away and we therefore have to clflush anyway. */
819                 if (obj->cache_level == I915_CACHE_NONE)
820                         needs_clflush_after = 1;
821                 if (obj->gtt_space) {
822                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
823                         if (ret)
824                                 return ret;
825                 }
826         }
827         /* Same trick applies for invalidate partially written cachelines before
828          * writing.  */
829         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
830             && obj->cache_level == I915_CACHE_NONE)
831                 needs_clflush_before = 1;
832
833         ret = i915_gem_object_get_pages(obj);
834         if (ret)
835                 return ret;
836
837         i915_gem_object_pin_pages(obj);
838
839         offset = args->offset;
840         obj->dirty = 1;
841
842         VM_OBJECT_WLOCK(obj->base.vm_obj);
843         for (vm_page_t page = vm_page_find_least(obj->base.vm_obj,
844             OFF_TO_IDX(offset));; page = vm_page_next(page)) {
845                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
846                 int partial_cacheline_write;
847
848                 if (remain <= 0)
849                         break;
850
851                 /* Operation in this page
852                  *
853                  * shmem_page_offset = offset within page in shmem file
854                  * page_length = bytes to copy for this page
855                  */
856                 shmem_page_offset = offset_in_page(offset);
857
858                 page_length = remain;
859                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
860                         page_length = PAGE_SIZE - shmem_page_offset;
861
862                 /* If we don't overwrite a cacheline completely we need to be
863                  * careful to have up-to-date data by first clflushing. Don't
864                  * overcomplicate things and flush the entire patch. */
865                 partial_cacheline_write = needs_clflush_before &&
866                         ((shmem_page_offset | page_length)
867                                 & (cpu_clflush_line_size - 1));
868
869                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
870                         (page_to_phys(page) & (1 << 17)) != 0;
871
872                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
873                                         user_data, page_do_bit17_swizzling,
874                                         partial_cacheline_write,
875                                         needs_clflush_after);
876                 if (ret == 0)
877                         goto next_page;
878
879                 hit_slowpath = 1;
880                 DRM_UNLOCK(dev);
881                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
882                                         user_data, page_do_bit17_swizzling,
883                                         partial_cacheline_write,
884                                         needs_clflush_after);
885
886                 DRM_LOCK(dev);
887
888 next_page:
889                 vm_page_dirty(page);
890                 vm_page_reference(page);
891
892                 if (ret)
893                         goto out;
894
895                 remain -= page_length;
896                 user_data += page_length;
897                 offset += page_length;
898                 VM_OBJECT_WLOCK(obj->base.vm_obj);
899         }
900
901 out:
902         i915_gem_object_unpin_pages(obj);
903
904         if (hit_slowpath) {
905                 /* Fixup: Kill any reinstated backing storage pages */
906                 if (obj->madv == __I915_MADV_PURGED)
907                         i915_gem_object_truncate(obj);
908                 /* and flush dirty cachelines in case the object isn't in the cpu write
909                  * domain anymore. */
910                 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
911                         i915_gem_clflush_object(obj);
912                         i915_gem_chipset_flush(dev);
913                 }
914         }
915
916         if (needs_clflush_after)
917                 i915_gem_chipset_flush(dev);
918
919         return ret;
920 }
921
922 /**
923  * Writes data to the object referenced by handle.
924  *
925  * On error, the contents of the buffer that were to be modified are undefined.
926  */
927 int
928 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
929                       struct drm_file *file)
930 {
931         struct drm_i915_gem_pwrite *args = data;
932         struct drm_i915_gem_object *obj;
933         int ret;
934
935         if (args->size == 0)
936                 return 0;
937
938         if (!useracc(to_user_ptr(args->data_ptr), args->size, VM_PROT_READ))
939                 return -EFAULT;
940
941         ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
942                                            args->size);
943         if (ret)
944                 return -EFAULT;
945
946         ret = i915_mutex_lock_interruptible(dev);
947         if (ret)
948                 return ret;
949
950         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
951         if (&obj->base == NULL) {
952                 ret = -ENOENT;
953                 goto unlock;
954         }
955
956         /* Bounds check destination. */
957         if (args->offset > obj->base.size ||
958             args->size > obj->base.size - args->offset) {
959                 ret = -EINVAL;
960                 goto out;
961         }
962
963 #ifdef FREEBSD_WIP
964         /* prime objects have no backing filp to GEM pread/pwrite
965          * pages from.
966          */
967         if (!obj->base.filp) {
968                 ret = -EINVAL;
969                 goto out;
970         }
971 #endif /* FREEBSD_WIP */
972
973         CTR3(KTR_DRM, "pwrite %p %jx %jx", obj, args->offset, args->size);
974
975         ret = -EFAULT;
976         /* We can only do the GTT pwrite on untiled buffers, as otherwise
977          * it would end up going through the fenced access, and we'll get
978          * different detiling behavior between reading and writing.
979          * pread/pwrite currently are reading and writing from the CPU
980          * perspective, requiring manual detiling by the client.
981          */
982         if (obj->phys_obj) {
983                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
984                 goto out;
985         }
986
987         if (obj->cache_level == I915_CACHE_NONE &&
988             obj->tiling_mode == I915_TILING_NONE &&
989             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
990                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
991                 /* Note that the gtt paths might fail with non-page-backed user
992                  * pointers (e.g. gtt mappings when moving data between
993                  * textures). Fallback to the shmem path in that case. */
994         }
995
996         if (ret == -EFAULT || ret == -ENOSPC)
997                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
998
999 out:
1000         drm_gem_object_unreference(&obj->base);
1001 unlock:
1002         DRM_UNLOCK(dev);
1003         return ret;
1004 }
1005
1006 int
1007 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1008                      bool interruptible)
1009 {
1010         if (atomic_read(&dev_priv->mm.wedged)) {
1011                 struct completion *x = &dev_priv->error_completion;
1012                 bool recovery_complete;
1013
1014                 /* Give the error handler a chance to run. */
1015                 mtx_lock(&x->lock);
1016                 recovery_complete = x->done > 0;
1017                 mtx_unlock(&x->lock);
1018
1019                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1020                  * -EIO unconditionally for these. */
1021                 if (!interruptible)
1022                         return -EIO;
1023
1024                 /* Recovery complete, but still wedged means reset failure. */
1025                 if (recovery_complete)
1026                         return -EIO;
1027
1028                 return -EAGAIN;
1029         }
1030
1031         return 0;
1032 }
1033
1034 /*
1035  * Compare seqno against outstanding lazy request. Emit a request if they are
1036  * equal.
1037  */
1038 static int
1039 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1040 {
1041         int ret;
1042
1043         DRM_LOCK_ASSERT(ring->dev);
1044
1045         ret = 0;
1046         if (seqno == ring->outstanding_lazy_request)
1047                 ret = i915_add_request(ring, NULL, NULL);
1048
1049         return ret;
1050 }
1051
1052 /**
1053  * __wait_seqno - wait until execution of seqno has finished
1054  * @ring: the ring expected to report seqno
1055  * @seqno: duh!
1056  * @interruptible: do an interruptible wait (normally yes)
1057  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1058  *
1059  * Returns 0 if the seqno was found within the alloted time. Else returns the
1060  * errno with remaining time filled in timeout argument.
1061  */
1062 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1063                         bool interruptible, struct timespec *timeout)
1064 {
1065         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1066         struct timespec before, now, wait_time={1,0};
1067         sbintime_t timeout_sbt;
1068         long end;
1069         bool wait_forever = true;
1070         int ret, flags;
1071
1072         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1073                 return 0;
1074
1075         CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
1076
1077         if (timeout != NULL) {
1078                 wait_time = *timeout;
1079                 wait_forever = false;
1080         }
1081
1082         timeout_sbt = tstosbt(wait_time);
1083
1084         if (WARN_ON(!ring->irq_get(ring)))
1085                 return -ENODEV;
1086
1087         /* Record current time in case interrupted by signal, or wedged * */
1088         getrawmonotonic(&before);
1089
1090 #define EXIT_COND \
1091         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1092         atomic_read(&dev_priv->mm.wedged))
1093         flags = interruptible ? PCATCH : 0;
1094         mtx_lock(&dev_priv->irq_lock);
1095         do {
1096                 if (EXIT_COND) {
1097                         end = 1;
1098                 } else {
1099                         ret = -msleep_sbt(&ring->irq_queue, &dev_priv->irq_lock, flags,
1100                             "915gwr", timeout_sbt, 0, 0);
1101
1102                         /*
1103                          * NOTE Linux<->FreeBSD: Convert msleep_sbt() return
1104                          * value to something close to wait_event*_timeout()
1105                          * functions used on Linux.
1106                          *
1107                          * >0 -> condition is true (end = time remaining)
1108                          * =0 -> sleep timed out
1109                          * <0 -> error (interrupted)
1110                          *
1111                          * We fake the remaining time by returning 1. We
1112                          * compute a proper value later.
1113                          */
1114                         if (EXIT_COND)
1115                                 /* We fake a remaining time of 1 tick. */
1116                                 end = 1;
1117                         else if (ret == -EINTR || ret == -ERESTART)
1118                                 /* Interrupted. */
1119                                 end = -ERESTARTSYS;
1120                         else
1121                                 /* Timeout. */
1122                                 end = 0;
1123                 }
1124
1125                 ret = i915_gem_check_wedge(dev_priv, interruptible);
1126                 if (ret)
1127                         end = ret;
1128         } while (end == 0 && wait_forever);
1129         mtx_unlock(&dev_priv->irq_lock);
1130
1131         getrawmonotonic(&now);
1132
1133         ring->irq_put(ring);
1134         CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, end);
1135 #undef EXIT_COND
1136
1137         if (timeout) {
1138                 timespecsub(&now, &before);
1139                 timespecsub(timeout, &now);
1140         }
1141
1142         switch (end) {
1143         case -EIO:
1144         case -EAGAIN: /* Wedged */
1145         case -ERESTARTSYS: /* Signal */
1146         case -ETIMEDOUT: /* Timeout */
1147                 return (int)end;
1148         case 0: /* Timeout */
1149                 return -ETIMEDOUT;
1150         default: /* Completed */
1151                 WARN_ON(end < 0); /* We're not aware of other errors */
1152                 return 0;
1153         }
1154 }
1155
1156 /**
1157  * Waits for a sequence number to be signaled, and cleans up the
1158  * request and object lists appropriately for that event.
1159  */
1160 int
1161 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1162 {
1163         struct drm_device *dev = ring->dev;
1164         struct drm_i915_private *dev_priv = dev->dev_private;
1165         bool interruptible = dev_priv->mm.interruptible;
1166         int ret;
1167
1168         DRM_LOCK_ASSERT(dev);
1169         BUG_ON(seqno == 0);
1170
1171         ret = i915_gem_check_wedge(dev_priv, interruptible);
1172         if (ret)
1173                 return ret;
1174
1175         ret = i915_gem_check_olr(ring, seqno);
1176         if (ret)
1177                 return ret;
1178
1179         return __wait_seqno(ring, seqno, interruptible, NULL);
1180 }
1181
1182 /**
1183  * Ensures that all rendering to the object has completed and the object is
1184  * safe to unbind from the GTT or access from the CPU.
1185  */
1186 static __must_check int
1187 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1188                                bool readonly)
1189 {
1190         struct intel_ring_buffer *ring = obj->ring;
1191         u32 seqno;
1192         int ret;
1193
1194         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1195         if (seqno == 0)
1196                 return 0;
1197
1198         ret = i915_wait_seqno(ring, seqno);
1199         if (ret)
1200                 return ret;
1201
1202         i915_gem_retire_requests_ring(ring);
1203
1204         /* Manually manage the write flush as we may have not yet
1205          * retired the buffer.
1206          */
1207         if (obj->last_write_seqno &&
1208             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1209                 obj->last_write_seqno = 0;
1210                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1211         }
1212
1213         return 0;
1214 }
1215
1216 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1217  * as the object state may change during this call.
1218  */
1219 static __must_check int
1220 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1221                                             bool readonly)
1222 {
1223         struct drm_device *dev = obj->base.dev;
1224         struct drm_i915_private *dev_priv = dev->dev_private;
1225         struct intel_ring_buffer *ring = obj->ring;
1226         u32 seqno;
1227         int ret;
1228
1229         DRM_LOCK_ASSERT(dev);
1230         BUG_ON(!dev_priv->mm.interruptible);
1231
1232         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1233         if (seqno == 0)
1234                 return 0;
1235
1236         ret = i915_gem_check_wedge(dev_priv, true);
1237         if (ret)
1238                 return ret;
1239
1240         ret = i915_gem_check_olr(ring, seqno);
1241         if (ret)
1242                 return ret;
1243
1244         DRM_UNLOCK(dev);
1245         ret = __wait_seqno(ring, seqno, true, NULL);
1246         DRM_LOCK(dev);
1247
1248         i915_gem_retire_requests_ring(ring);
1249
1250         /* Manually manage the write flush as we may have not yet
1251          * retired the buffer.
1252          */
1253         if (ret == 0 &&
1254             obj->last_write_seqno &&
1255             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1256                 obj->last_write_seqno = 0;
1257                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1258         }
1259
1260         return ret;
1261 }
1262
1263 /**
1264  * Called when user space prepares to use an object with the CPU, either
1265  * through the mmap ioctl's mapping or a GTT mapping.
1266  */
1267 int
1268 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1269                           struct drm_file *file)
1270 {
1271         struct drm_i915_gem_set_domain *args = data;
1272         struct drm_i915_gem_object *obj;
1273         uint32_t read_domains = args->read_domains;
1274         uint32_t write_domain = args->write_domain;
1275         int ret;
1276
1277         /* Only handle setting domains to types used by the CPU. */
1278         if (write_domain & I915_GEM_GPU_DOMAINS)
1279                 return -EINVAL;
1280
1281         if (read_domains & I915_GEM_GPU_DOMAINS)
1282                 return -EINVAL;
1283
1284         /* Having something in the write domain implies it's in the read
1285          * domain, and only that read domain.  Enforce that in the request.
1286          */
1287         if (write_domain != 0 && read_domains != write_domain)
1288                 return -EINVAL;
1289
1290         ret = i915_mutex_lock_interruptible(dev);
1291         if (ret)
1292                 return ret;
1293
1294         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1295         if (&obj->base == NULL) {
1296                 ret = -ENOENT;
1297                 goto unlock;
1298         }
1299
1300         /* Try to flush the object off the GPU without holding the lock.
1301          * We will repeat the flush holding the lock in the normal manner
1302          * to catch cases where we are gazumped.
1303          */
1304         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1305         if (ret)
1306                 goto unref;
1307
1308         if (read_domains & I915_GEM_DOMAIN_GTT) {
1309                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1310
1311                 /* Silently promote "you're not bound, there was nothing to do"
1312                  * to success, since the client was just asking us to
1313                  * make sure everything was done.
1314                  */
1315                 if (ret == -EINVAL)
1316                         ret = 0;
1317         } else {
1318                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1319         }
1320
1321 unref:
1322         drm_gem_object_unreference(&obj->base);
1323 unlock:
1324         DRM_UNLOCK(dev);
1325         return ret;
1326 }
1327
1328 /**
1329  * Called when user space has done writes to this buffer
1330  */
1331 int
1332 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1333                          struct drm_file *file)
1334 {
1335         struct drm_i915_gem_sw_finish *args = data;
1336         struct drm_i915_gem_object *obj;
1337         int ret = 0;
1338
1339         ret = i915_mutex_lock_interruptible(dev);
1340         if (ret)
1341                 return ret;
1342
1343         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1344         if (&obj->base == NULL) {
1345                 ret = -ENOENT;
1346                 goto unlock;
1347         }
1348
1349         /* Pinned buffers may be scanout, so flush the cache */
1350         if (obj->pin_count)
1351                 i915_gem_object_flush_cpu_write_domain(obj);
1352
1353         drm_gem_object_unreference(&obj->base);
1354 unlock:
1355         DRM_UNLOCK(dev);
1356         return ret;
1357 }
1358
1359 /**
1360  * Maps the contents of an object, returning the address it is mapped
1361  * into.
1362  *
1363  * While the mapping holds a reference on the contents of the object, it doesn't
1364  * imply a ref on the object itself.
1365  */
1366 int
1367 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1368                     struct drm_file *file)
1369 {
1370         struct drm_i915_gem_mmap *args = data;
1371         struct drm_gem_object *obj;
1372         struct proc *p;
1373         vm_map_t map;
1374         vm_offset_t addr;
1375         vm_size_t size;
1376         int error, rv;
1377
1378         obj = drm_gem_object_lookup(dev, file, args->handle);
1379         if (obj == NULL)
1380                 return -ENOENT;
1381
1382 #ifdef FREEBSD_WIP
1383         /* prime objects have no backing filp to GEM mmap
1384          * pages from.
1385          */
1386         if (!obj->filp) {
1387                 drm_gem_object_unreference_unlocked(obj);
1388                 return -EINVAL;
1389         }
1390 #endif /* FREEBSD_WIP */
1391
1392         error = 0;
1393         if (args->size == 0)
1394                 goto out;
1395         p = curproc;
1396         map = &p->p_vmspace->vm_map;
1397         size = round_page(args->size);
1398         PROC_LOCK(p);
1399         if (map->size + size > lim_cur_proc(p, RLIMIT_VMEM)) {
1400                 PROC_UNLOCK(p);
1401                 error = -ENOMEM;
1402                 goto out;
1403         }
1404         PROC_UNLOCK(p);
1405
1406         addr = 0;
1407         vm_object_reference(obj->vm_obj);
1408         rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
1409             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1410             VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
1411         if (rv != KERN_SUCCESS) {
1412                 vm_object_deallocate(obj->vm_obj);
1413                 error = -vm_mmap_to_errno(rv);
1414         } else {
1415                 args->addr_ptr = (uint64_t)addr;
1416         }
1417 out:
1418         drm_gem_object_unreference_unlocked(obj);
1419         return (error);
1420 }
1421
1422 static int
1423 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1424     vm_ooffset_t foff, struct ucred *cred, u_short *color)
1425 {
1426
1427         /*
1428          * NOTE Linux<->FreeBSD: drm_gem_mmap_single() takes care of
1429          * calling drm_gem_object_reference(). That's why we don't
1430          * do this here. i915_gem_pager_dtor(), below, will call
1431          * drm_gem_object_unreference().
1432          *
1433          * On Linux, drm_gem_vm_open() references the object because
1434          * it's called the mapping is copied. drm_gem_vm_open() is not
1435          * called when the mapping is created. So the possible sequences
1436          * are:
1437          *     1. drm_gem_mmap():     ref++
1438          *     2. drm_gem_vm_close(): ref--
1439          *
1440          *     1. drm_gem_mmap():     ref++
1441          *     2. drm_gem_vm_open():  ref++ (for the copied vma)
1442          *     3. drm_gem_vm_close(): ref-- (for the copied vma)
1443          *     4. drm_gem_vm_close(): ref-- (for the initial vma)
1444          *
1445          * On FreeBSD, i915_gem_pager_ctor() is called once during the
1446          * creation of the mapping. No callback is called when the
1447          * mapping is shared during a fork(). i915_gem_pager_dtor() is
1448          * called when the last reference to the mapping is dropped. So
1449          * the only sequence is:
1450          *     1. drm_gem_mmap_single(): ref++
1451          *     2. i915_gem_pager_ctor(): <noop>
1452          *     3. i915_gem_pager_dtor(): ref--
1453          */
1454
1455         *color = 0; /* XXXKIB */
1456         return (0);
1457 }
1458
1459 /**
1460  * i915_gem_fault - fault a page into the GTT
1461  * vma: VMA in question
1462  * vmf: fault info
1463  *
1464  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1465  * from userspace.  The fault handler takes care of binding the object to
1466  * the GTT (if needed), allocating and programming a fence register (again,
1467  * only if needed based on whether the old reg is still valid or the object
1468  * is tiled) and inserting a new PTE into the faulting process.
1469  *
1470  * Note that the faulting process may involve evicting existing objects
1471  * from the GTT and/or fence registers to make room.  So performance may
1472  * suffer if the GTT working set is large or there are few fence registers
1473  * left.
1474  */
1475
1476 int i915_intr_pf;
1477
1478 static int
1479 i915_gem_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
1480     vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
1481 {
1482         struct drm_gem_object *gem_obj = vm_obj->handle;
1483         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
1484         struct drm_device *dev = obj->base.dev;
1485         drm_i915_private_t *dev_priv = dev->dev_private;
1486         vm_page_t page;
1487         int ret = 0;
1488         bool write = (max_prot & VM_PROT_WRITE) != 0;
1489         bool pinned;
1490
1491         VM_OBJECT_WUNLOCK(vm_obj);
1492 retry:
1493         ret = 0;
1494         pinned = 0;
1495         page = NULL;
1496
1497         if (i915_intr_pf) {
1498                 ret = i915_mutex_lock_interruptible(dev);
1499                 if (ret != 0)
1500                         goto out;
1501         } else
1502                 DRM_LOCK(dev);
1503
1504         /*
1505          * Since the object lock was dropped, other thread might have
1506          * faulted on the same GTT address and instantiated the
1507          * mapping for the page.  Recheck.
1508          */
1509         VM_OBJECT_WLOCK(vm_obj);
1510         page = vm_page_lookup(vm_obj, pidx);
1511         if (page != NULL) {
1512                 if (vm_page_busied(page)) {
1513                         DRM_UNLOCK(dev);
1514                         vm_page_lock(page);
1515                         VM_OBJECT_WUNLOCK(vm_obj);
1516                         vm_page_busy_sleep(page, "915pee", false);
1517                         goto retry;
1518                 }
1519                 goto have_page;
1520         } else
1521                 VM_OBJECT_WUNLOCK(vm_obj);
1522
1523         /* Now bind it into the GTT if needed */
1524         ret = i915_gem_object_pin(obj, 0, true, false);
1525         if (ret)
1526                 goto unlock;
1527         pinned = 1;
1528
1529         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1530         if (ret)
1531                 goto unpin;
1532
1533         ret = i915_gem_object_get_fence(obj);
1534         if (ret)
1535                 goto unpin;
1536
1537         obj->fault_mappable = true;
1538
1539         page = PHYS_TO_VM_PAGE(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
1540             IDX_TO_OFF(pidx));
1541         if (page == NULL) {
1542                 ret = -EFAULT;
1543                 goto unpin;
1544         }
1545         KASSERT((page->flags & PG_FICTITIOUS) != 0,
1546             ("physical address %#jx not fictitious, page %p",
1547             (uintmax_t)(dev_priv->mm.gtt_base_addr + obj->gtt_offset +
1548             IDX_TO_OFF(pidx)), page));
1549         KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
1550
1551         VM_OBJECT_WLOCK(vm_obj);
1552         if (vm_page_busied(page)) {
1553                 i915_gem_object_unpin(obj);
1554                 DRM_UNLOCK(dev);
1555                 vm_page_lock(page);
1556                 VM_OBJECT_WUNLOCK(vm_obj);
1557                 vm_page_busy_sleep(page, "915pbs", false);
1558                 goto retry;
1559         }
1560         if (vm_page_insert(page, vm_obj, pidx)) {
1561                 i915_gem_object_unpin(obj);
1562                 DRM_UNLOCK(dev);
1563                 VM_OBJECT_WUNLOCK(vm_obj);
1564                 vm_wait(vm_obj);
1565                 goto retry;
1566         }
1567         page->valid = VM_PAGE_BITS_ALL;
1568 have_page:
1569         vm_page_xbusy(page);
1570
1571         CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, pidx, fault_type,
1572             page->phys_addr);
1573         if (pinned) {
1574                 /*
1575                  * We may have not pinned the object if the page was
1576                  * found by the call to vm_page_lookup().
1577                  */
1578                 i915_gem_object_unpin(obj);
1579         }
1580         DRM_UNLOCK(dev);
1581         *first = *last = pidx;
1582         return (VM_PAGER_OK);
1583
1584 unpin:
1585         i915_gem_object_unpin(obj);
1586 unlock:
1587         DRM_UNLOCK(dev);
1588 out:
1589         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1590         CTR4(KTR_DRM, "fault_fail %p %jx %x err %d", gem_obj, pidx, fault_type,
1591             -ret);
1592         if (ret == -ERESTARTSYS) {
1593                 /*
1594                  * NOTE Linux<->FreeBSD: Convert Linux' -ERESTARTSYS to
1595                  * the more common -EINTR, so the page fault is retried.
1596                  */
1597                 ret = -EINTR;
1598         }
1599         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1600                 kern_yield(PRI_USER);
1601                 goto retry;
1602         }
1603         VM_OBJECT_WLOCK(vm_obj);
1604         return (VM_PAGER_ERROR);
1605 }
1606
1607 static void
1608 i915_gem_pager_dtor(void *handle)
1609 {
1610         struct drm_gem_object *obj = handle;
1611         struct drm_device *dev = obj->dev;
1612
1613         DRM_LOCK(dev);
1614         drm_gem_object_unreference(obj);
1615         DRM_UNLOCK(dev);
1616 }
1617
1618 struct cdev_pager_ops i915_gem_pager_ops = {
1619         .cdev_pg_populate       = i915_gem_pager_populate,
1620         .cdev_pg_ctor           = i915_gem_pager_ctor,
1621         .cdev_pg_dtor           = i915_gem_pager_dtor,
1622 };
1623
1624 /**
1625  * i915_gem_release_mmap - remove physical page mappings
1626  * @obj: obj in question
1627  *
1628  * Preserve the reservation of the mmapping with the DRM core code, but
1629  * relinquish ownership of the pages back to the system.
1630  *
1631  * It is vital that we remove the page mapping if we have mapped a tiled
1632  * object through the GTT and then lose the fence register due to
1633  * resource pressure. Similarly if the object has been moved out of the
1634  * aperture, than pages mapped into userspace must be revoked. Removing the
1635  * mapping will then trigger a page fault on the next user access, allowing
1636  * fixup by i915_gem_fault().
1637  */
1638 void
1639 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1640 {
1641         vm_object_t devobj;
1642         vm_page_t page;
1643         int i, page_count;
1644
1645         if (!obj->fault_mappable)
1646                 return;
1647
1648         CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset,
1649             OFF_TO_IDX(obj->base.size));
1650         devobj = cdev_pager_lookup(obj);
1651         if (devobj != NULL) {
1652                 page_count = OFF_TO_IDX(obj->base.size);
1653
1654                 VM_OBJECT_WLOCK(devobj);
1655 retry:
1656                 for (i = 0; i < page_count; i++) {
1657                         page = vm_page_lookup(devobj, i);
1658                         if (page == NULL)
1659                                 continue;
1660                         if (vm_page_sleep_if_busy(page, "915unm"))
1661                                 goto retry;
1662                         cdev_pager_free_page(devobj, page);
1663                 }
1664                 VM_OBJECT_WUNLOCK(devobj);
1665                 vm_object_deallocate(devobj);
1666         }
1667
1668         obj->fault_mappable = false;
1669 }
1670
1671 static uint32_t
1672 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1673 {
1674         uint32_t gtt_size;
1675
1676         if (INTEL_INFO(dev)->gen >= 4 ||
1677             tiling_mode == I915_TILING_NONE)
1678                 return size;
1679
1680         /* Previous chips need a power-of-two fence region when tiling */
1681         if (INTEL_INFO(dev)->gen == 3)
1682                 gtt_size = 1024*1024;
1683         else
1684                 gtt_size = 512*1024;
1685
1686         while (gtt_size < size)
1687                 gtt_size <<= 1;
1688
1689         return gtt_size;
1690 }
1691
1692 /**
1693  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1694  * @obj: object to check
1695  *
1696  * Return the required GTT alignment for an object, taking into account
1697  * potential fence register mapping.
1698  */
1699 static uint32_t
1700 i915_gem_get_gtt_alignment(struct drm_device *dev,
1701                            uint32_t size,
1702                            int tiling_mode)
1703 {
1704         /*
1705          * Minimum alignment is 4k (GTT page size), but might be greater
1706          * if a fence register is needed for the object.
1707          */
1708         if (INTEL_INFO(dev)->gen >= 4 ||
1709             tiling_mode == I915_TILING_NONE)
1710                 return 4096;
1711
1712         /*
1713          * Previous chips need to be aligned to the size of the smallest
1714          * fence register that can contain the object.
1715          */
1716         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1717 }
1718
1719 /**
1720  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1721  *                                       unfenced object
1722  * @dev: the device
1723  * @size: size of the object
1724  * @tiling_mode: tiling mode of the object
1725  *
1726  * Return the required GTT alignment for an object, only taking into account
1727  * unfenced tiled surface requirements.
1728  */
1729 uint32_t
1730 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1731                                     uint32_t size,
1732                                     int tiling_mode)
1733 {
1734         /*
1735          * Minimum alignment is 4k (GTT page size) for sane hw.
1736          */
1737         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1738             tiling_mode == I915_TILING_NONE)
1739                 return 4096;
1740
1741         /* Previous hardware however needs to be aligned to a power-of-two
1742          * tile height. The simplest method for determining this is to reuse
1743          * the power-of-tile object size.
1744          */
1745         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1746 }
1747
1748 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1749 {
1750         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1751         int ret;
1752
1753         if (obj->base.on_map)
1754                 return 0;
1755
1756         dev_priv->mm.shrinker_no_lock_stealing = true;
1757
1758         ret = drm_gem_create_mmap_offset(&obj->base);
1759         if (ret != -ENOSPC)
1760                 goto out;
1761
1762         /* Badly fragmented mmap space? The only way we can recover
1763          * space is by destroying unwanted objects. We can't randomly release
1764          * mmap_offsets as userspace expects them to be persistent for the
1765          * lifetime of the objects. The closest we can is to release the
1766          * offsets on purgeable objects by truncating it and marking it purged,
1767          * which prevents userspace from ever using that object again.
1768          */
1769         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1770         ret = drm_gem_create_mmap_offset(&obj->base);
1771         if (ret != -ENOSPC)
1772                 goto out;
1773
1774         i915_gem_shrink_all(dev_priv);
1775         ret = drm_gem_create_mmap_offset(&obj->base);
1776 out:
1777         dev_priv->mm.shrinker_no_lock_stealing = false;
1778
1779         return ret;
1780 }
1781
1782 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1783 {
1784         if (!obj->base.on_map)
1785                 return;
1786
1787         drm_gem_free_mmap_offset(&obj->base);
1788 }
1789
1790 int
1791 i915_gem_mmap_gtt(struct drm_file *file,
1792                   struct drm_device *dev,
1793                   uint32_t handle,
1794                   uint64_t *offset)
1795 {
1796         struct drm_i915_private *dev_priv = dev->dev_private;
1797         struct drm_i915_gem_object *obj;
1798         int ret;
1799
1800         ret = i915_mutex_lock_interruptible(dev);
1801         if (ret)
1802                 return ret;
1803
1804         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1805         if (&obj->base == NULL) {
1806                 ret = -ENOENT;
1807                 goto unlock;
1808         }
1809
1810         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1811                 ret = -E2BIG;
1812                 goto out;
1813         }
1814
1815         if (obj->madv != I915_MADV_WILLNEED) {
1816                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1817                 ret = -EINVAL;
1818                 goto out;
1819         }
1820
1821         ret = i915_gem_object_create_mmap_offset(obj);
1822         if (ret)
1823                 goto out;
1824
1825         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1826             DRM_GEM_MAPPING_KEY;
1827
1828 out:
1829         drm_gem_object_unreference(&obj->base);
1830 unlock:
1831         DRM_UNLOCK(dev);
1832         return ret;
1833 }
1834
1835 /**
1836  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1837  * @dev: DRM device
1838  * @data: GTT mapping ioctl data
1839  * @file: GEM object info
1840  *
1841  * Simply returns the fake offset to userspace so it can mmap it.
1842  * The mmap call will end up in drm_gem_mmap(), which will set things
1843  * up so we can get faults in the handler above.
1844  *
1845  * The fault handler will take care of binding the object into the GTT
1846  * (since it may have been evicted to make room for something), allocating
1847  * a fence register, and mapping the appropriate aperture address into
1848  * userspace.
1849  */
1850 int
1851 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1852                         struct drm_file *file)
1853 {
1854         struct drm_i915_gem_mmap_gtt *args = data;
1855
1856         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1857 }
1858
1859 /* Immediately discard the backing storage */
1860 static void
1861 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1862 {
1863         vm_object_t vm_obj;
1864
1865         vm_obj = obj->base.vm_obj;
1866         VM_OBJECT_WLOCK(vm_obj);
1867         vm_object_page_remove(vm_obj, 0, 0, false);
1868         VM_OBJECT_WUNLOCK(vm_obj);
1869         i915_gem_object_free_mmap_offset(obj);
1870
1871         obj->madv = __I915_MADV_PURGED;
1872 }
1873
1874 static inline int
1875 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1876 {
1877         return obj->madv == I915_MADV_DONTNEED;
1878 }
1879
1880 static void
1881 i915_gem_object_put_pages_range_locked(struct drm_i915_gem_object *obj,
1882     vm_pindex_t si, vm_pindex_t ei)
1883 {
1884         vm_object_t vm_obj;
1885         vm_page_t page;
1886         vm_pindex_t i;
1887
1888         vm_obj = obj->base.vm_obj;
1889         VM_OBJECT_ASSERT_LOCKED(vm_obj);
1890         for (i = si,  page = vm_page_lookup(vm_obj, i); i < ei;
1891             page = vm_page_next(page), i++) {
1892                 KASSERT(page->pindex == i, ("pindex %jx %jx",
1893                     (uintmax_t)page->pindex, (uintmax_t)i));
1894                 vm_page_lock(page);
1895                 if (vm_page_unwire(page, PQ_INACTIVE))
1896                         atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1897                 vm_page_unlock(page);
1898         }
1899 }
1900
1901 #define GEM_PARANOID_CHECK_GTT 0
1902 #if GEM_PARANOID_CHECK_GTT
1903 static void
1904 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
1905     int page_count)
1906 {
1907         struct drm_i915_private *dev_priv;
1908         vm_paddr_t pa;
1909         unsigned long start, end;
1910         u_int i;
1911         int j;
1912
1913         dev_priv = dev->dev_private;
1914         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
1915         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
1916         for (i = start; i < end; i++) {
1917                 pa = intel_gtt_read_pte_paddr(i);
1918                 for (j = 0; j < page_count; j++) {
1919                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
1920                                 panic("Page %p in GTT pte index %d pte %x",
1921                                     ma[i], i, intel_gtt_read_pte(i));
1922                         }
1923                 }
1924         }
1925 }
1926 #endif
1927
1928 static void
1929 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1930 {
1931         int page_count = obj->base.size / PAGE_SIZE;
1932         int ret, i;
1933
1934         BUG_ON(obj->madv == __I915_MADV_PURGED);
1935
1936         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1937         if (ret) {
1938                 /* In the event of a disaster, abandon all caches and
1939                  * hope for the best.
1940                  */
1941                 WARN_ON(ret != -EIO);
1942                 i915_gem_clflush_object(obj);
1943                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1944         }
1945
1946         if (i915_gem_object_needs_bit17_swizzle(obj))
1947                 i915_gem_object_save_bit_17_swizzle(obj);
1948
1949         if (obj->madv == I915_MADV_DONTNEED)
1950                 obj->dirty = 0;
1951
1952         VM_OBJECT_WLOCK(obj->base.vm_obj);
1953 #if GEM_PARANOID_CHECK_GTT
1954         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1955 #endif
1956         for (i = 0; i < page_count; i++) {
1957                 vm_page_t page = obj->pages[i];
1958
1959                 if (obj->dirty)
1960                         vm_page_dirty(page);
1961
1962                 if (obj->madv == I915_MADV_WILLNEED)
1963                         vm_page_reference(page);
1964
1965                 vm_page_lock(page);
1966                 vm_page_unwire(obj->pages[i], PQ_ACTIVE);
1967                 vm_page_unlock(page);
1968                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1969         }
1970         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
1971         obj->dirty = 0;
1972
1973         free(obj->pages, DRM_I915_GEM);
1974         obj->pages = NULL;
1975 }
1976
1977 static int
1978 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1979 {
1980         const struct drm_i915_gem_object_ops *ops = obj->ops;
1981
1982         if (obj->pages == NULL)
1983                 return 0;
1984
1985         BUG_ON(obj->gtt_space);
1986
1987         if (obj->pages_pin_count)
1988                 return -EBUSY;
1989
1990         /* ->put_pages might need to allocate memory for the bit17 swizzle
1991          * array, hence protect them from being reaped by removing them from gtt
1992          * lists early. */
1993         list_del(&obj->gtt_list);
1994
1995         ops->put_pages(obj);
1996         obj->pages = NULL;
1997
1998         if (i915_gem_object_is_purgeable(obj))
1999                 i915_gem_object_truncate(obj);
2000
2001         return 0;
2002 }
2003
2004 static long
2005 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
2006                   bool purgeable_only)
2007 {
2008         struct drm_i915_gem_object *obj, *next;
2009         long count = 0;
2010
2011         list_for_each_entry_safe(obj, next,
2012                                  &dev_priv->mm.unbound_list,
2013                                  gtt_list) {
2014                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2015                     i915_gem_object_put_pages(obj) == 0) {
2016                         count += obj->base.size >> PAGE_SHIFT;
2017                         if (target != -1 && count >= target)
2018                                 return count;
2019                 }
2020         }
2021
2022         list_for_each_entry_safe(obj, next,
2023                                  &dev_priv->mm.inactive_list,
2024                                  mm_list) {
2025                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2026                     i915_gem_object_unbind(obj) == 0 &&
2027                     i915_gem_object_put_pages(obj) == 0) {
2028                         count += obj->base.size >> PAGE_SHIFT;
2029                         if (target != -1 && count >= target)
2030                                 return count;
2031                 }
2032         }
2033
2034         return count;
2035 }
2036
2037 static long
2038 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2039 {
2040         return __i915_gem_shrink(dev_priv, target, true);
2041 }
2042
2043 static void
2044 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2045 {
2046         struct drm_i915_gem_object *obj, *next;
2047
2048         i915_gem_evict_everything(dev_priv->dev);
2049
2050         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
2051                 i915_gem_object_put_pages(obj);
2052 }
2053
2054 static int
2055 i915_gem_object_get_pages_range(struct drm_i915_gem_object *obj,
2056     off_t start, off_t end)
2057 {
2058         vm_object_t vm_obj;
2059         vm_page_t page;
2060         vm_pindex_t si, ei, i;
2061         bool need_swizzle, fresh;
2062
2063         need_swizzle = i915_gem_object_needs_bit17_swizzle(obj) != 0;
2064         vm_obj = obj->base.vm_obj;
2065         si = OFF_TO_IDX(trunc_page(start));
2066         ei = OFF_TO_IDX(round_page(end));
2067         VM_OBJECT_WLOCK(vm_obj);
2068         for (i = si; i < ei; i++) {
2069                 page = i915_gem_wire_page(vm_obj, i, &fresh);
2070                 if (page == NULL)
2071                         goto failed;
2072                 if (need_swizzle && fresh)
2073                         i915_gem_object_do_bit_17_swizzle_page(obj, page);
2074         }
2075         VM_OBJECT_WUNLOCK(vm_obj);
2076         return (0);
2077 failed:
2078         i915_gem_object_put_pages_range_locked(obj, si, i);
2079         VM_OBJECT_WUNLOCK(vm_obj);
2080         return (-EIO);
2081 }
2082
2083 static int
2084 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2085 {
2086         vm_object_t vm_obj;
2087         vm_page_t page;
2088         vm_pindex_t i, page_count;
2089         int res;
2090
2091         /* Assert that the object is not currently in any GPU domain. As it
2092          * wasn't in the GTT, there shouldn't be any way it could have been in
2093          * a GPU cache
2094          */
2095         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2096         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2097         KASSERT(obj->pages == NULL, ("Obj already has pages"));
2098
2099         page_count = OFF_TO_IDX(obj->base.size);
2100         obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
2101             M_WAITOK);
2102         res = i915_gem_object_get_pages_range(obj, 0, obj->base.size);
2103         if (res != 0) {
2104                 free(obj->pages, DRM_I915_GEM);
2105                 obj->pages = NULL;
2106                 return (res);
2107         }
2108         vm_obj = obj->base.vm_obj;
2109         VM_OBJECT_WLOCK(vm_obj);
2110         for (i = 0, page = vm_page_lookup(vm_obj, 0); i < page_count;
2111             i++, page = vm_page_next(page)) {
2112                 KASSERT(page->pindex == i, ("pindex %jx %jx",
2113                     (uintmax_t)page->pindex, (uintmax_t)i));
2114                 obj->pages[i] = page;
2115         }
2116         VM_OBJECT_WUNLOCK(vm_obj);
2117         return (0);
2118 }
2119
2120 /* Ensure that the associated pages are gathered from the backing storage
2121  * and pinned into our object. i915_gem_object_get_pages() may be called
2122  * multiple times before they are released by a single call to
2123  * i915_gem_object_put_pages() - once the pages are no longer referenced
2124  * either as a result of memory pressure (reaping pages under the shrinker)
2125  * or as the object is itself released.
2126  */
2127 int
2128 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2129 {
2130         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2131         const struct drm_i915_gem_object_ops *ops = obj->ops;
2132         int ret;
2133
2134         if (obj->pages)
2135                 return 0;
2136
2137         BUG_ON(obj->pages_pin_count);
2138
2139         ret = ops->get_pages(obj);
2140         if (ret)
2141                 return ret;
2142
2143         list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2144         return 0;
2145 }
2146
2147 void
2148 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2149                                struct intel_ring_buffer *ring)
2150 {
2151         struct drm_device *dev = obj->base.dev;
2152         struct drm_i915_private *dev_priv = dev->dev_private;
2153         u32 seqno = intel_ring_get_seqno(ring);
2154
2155         BUG_ON(ring == NULL);
2156         obj->ring = ring;
2157
2158         /* Add a reference if we're newly entering the active list. */
2159         if (!obj->active) {
2160                 drm_gem_object_reference(&obj->base);
2161                 obj->active = 1;
2162         }
2163
2164         /* Move from whatever list we were on to the tail of execution. */
2165         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
2166         list_move_tail(&obj->ring_list, &ring->active_list);
2167
2168         obj->last_read_seqno = seqno;
2169
2170         if (obj->fenced_gpu_access) {
2171                 obj->last_fenced_seqno = seqno;
2172
2173                 /* Bump MRU to take account of the delayed flush */
2174                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2175                         struct drm_i915_fence_reg *reg;
2176
2177                         reg = &dev_priv->fence_regs[obj->fence_reg];
2178                         list_move_tail(&reg->lru_list,
2179                                        &dev_priv->mm.fence_list);
2180                 }
2181         }
2182 }
2183
2184 static void
2185 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2186 {
2187         struct drm_device *dev = obj->base.dev;
2188         struct drm_i915_private *dev_priv = dev->dev_private;
2189
2190         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2191         BUG_ON(!obj->active);
2192
2193         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2194
2195         list_del_init(&obj->ring_list);
2196         obj->ring = NULL;
2197
2198         obj->last_read_seqno = 0;
2199         obj->last_write_seqno = 0;
2200         obj->base.write_domain = 0;
2201
2202         obj->last_fenced_seqno = 0;
2203         obj->fenced_gpu_access = false;
2204
2205         obj->active = 0;
2206         drm_gem_object_unreference(&obj->base);
2207
2208         WARN_ON(i915_verify_lists(dev));
2209 }
2210
2211 static int
2212 i915_gem_handle_seqno_wrap(struct drm_device *dev)
2213 {
2214         struct drm_i915_private *dev_priv = dev->dev_private;
2215         struct intel_ring_buffer *ring;
2216         int ret, i, j;
2217
2218         /* The hardware uses various monotonic 32-bit counters, if we
2219          * detect that they will wraparound we need to idle the GPU
2220          * and reset those counters.
2221          */
2222         ret = 0;
2223         for_each_ring(ring, dev_priv, i) {
2224                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2225                         ret |= ring->sync_seqno[j] != 0;
2226         }
2227         if (ret == 0)
2228                 return ret;
2229
2230         ret = i915_gpu_idle(dev);
2231         if (ret)
2232                 return ret;
2233
2234         i915_gem_retire_requests(dev);
2235         for_each_ring(ring, dev_priv, i) {
2236                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2237                         ring->sync_seqno[j] = 0;
2238         }
2239
2240         return 0;
2241 }
2242
2243 int
2244 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2245 {
2246         struct drm_i915_private *dev_priv = dev->dev_private;
2247
2248         /* reserve 0 for non-seqno */
2249         if (dev_priv->next_seqno == 0) {
2250                 int ret = i915_gem_handle_seqno_wrap(dev);
2251                 if (ret)
2252                         return ret;
2253
2254                 dev_priv->next_seqno = 1;
2255         }
2256
2257         *seqno = dev_priv->next_seqno++;
2258         return 0;
2259 }
2260
2261 int
2262 i915_add_request(struct intel_ring_buffer *ring,
2263                  struct drm_file *file,
2264                  u32 *out_seqno)
2265 {
2266         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2267         struct drm_i915_gem_request *request;
2268         u32 request_ring_position;
2269         int was_empty;
2270         int ret;
2271
2272         /*
2273          * Emit any outstanding flushes - execbuf can fail to emit the flush
2274          * after having emitted the batchbuffer command. Hence we need to fix
2275          * things up similar to emitting the lazy request. The difference here
2276          * is that the flush _must_ happen before the next request, no matter
2277          * what.
2278          */
2279         ret = intel_ring_flush_all_caches(ring);
2280         if (ret)
2281                 return ret;
2282
2283         request = malloc(sizeof(*request), DRM_I915_GEM, M_NOWAIT);
2284         if (request == NULL)
2285                 return -ENOMEM;
2286
2287
2288         /* Record the position of the start of the request so that
2289          * should we detect the updated seqno part-way through the
2290          * GPU processing the request, we never over-estimate the
2291          * position of the head.
2292          */
2293         request_ring_position = intel_ring_get_tail(ring);
2294
2295         ret = ring->add_request(ring);
2296         if (ret) {
2297                 free(request, DRM_I915_GEM);
2298                 return ret;
2299         }
2300
2301         request->seqno = intel_ring_get_seqno(ring);
2302         request->ring = ring;
2303         request->tail = request_ring_position;
2304         request->emitted_jiffies = jiffies;
2305         was_empty = list_empty(&ring->request_list);
2306         list_add_tail(&request->list, &ring->request_list);
2307         request->file_priv = NULL;
2308
2309         if (file) {
2310                 struct drm_i915_file_private *file_priv = file->driver_priv;
2311
2312                 mtx_lock(&file_priv->mm.lock);
2313                 request->file_priv = file_priv;
2314                 list_add_tail(&request->client_list,
2315                               &file_priv->mm.request_list);
2316                 mtx_unlock(&file_priv->mm.lock);
2317         }
2318
2319         CTR2(KTR_DRM, "request_add %s %d", ring->name, request->seqno);
2320         ring->outstanding_lazy_request = 0;
2321
2322         if (!dev_priv->mm.suspended) {
2323                 if (i915_enable_hangcheck) {
2324                         callout_schedule(&dev_priv->hangcheck_timer,
2325                             DRM_I915_HANGCHECK_PERIOD);
2326                 }
2327                 if (was_empty) {
2328                         taskqueue_enqueue_timeout(dev_priv->wq,
2329                             &dev_priv->mm.retire_work, hz);
2330                         intel_mark_busy(dev_priv->dev);
2331                 }
2332         }
2333
2334         if (out_seqno)
2335                 *out_seqno = request->seqno;
2336         return 0;
2337 }
2338
2339 static inline void
2340 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2341 {
2342         struct drm_i915_file_private *file_priv = request->file_priv;
2343
2344         if (!file_priv)
2345                 return;
2346
2347         mtx_lock(&file_priv->mm.lock);
2348         if (request->file_priv) {
2349                 list_del(&request->client_list);
2350                 request->file_priv = NULL;
2351         }
2352         mtx_unlock(&file_priv->mm.lock);
2353 }
2354
2355 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2356                                       struct intel_ring_buffer *ring)
2357 {
2358         if (ring->dev != NULL)
2359                 DRM_LOCK_ASSERT(ring->dev);
2360
2361         while (!list_empty(&ring->request_list)) {
2362                 struct drm_i915_gem_request *request;
2363
2364                 request = list_first_entry(&ring->request_list,
2365                                            struct drm_i915_gem_request,
2366                                            list);
2367
2368                 list_del(&request->list);
2369                 i915_gem_request_remove_from_client(request);
2370                 free(request, DRM_I915_GEM);
2371         }
2372
2373         while (!list_empty(&ring->active_list)) {
2374                 struct drm_i915_gem_object *obj;
2375
2376                 obj = list_first_entry(&ring->active_list,
2377                                        struct drm_i915_gem_object,
2378                                        ring_list);
2379
2380                 i915_gem_object_move_to_inactive(obj);
2381         }
2382 }
2383
2384 static void i915_gem_reset_fences(struct drm_device *dev)
2385 {
2386         struct drm_i915_private *dev_priv = dev->dev_private;
2387         int i;
2388
2389         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2390                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2391
2392                 i915_gem_write_fence(dev, i, NULL);
2393
2394                 if (reg->obj)
2395                         i915_gem_object_fence_lost(reg->obj);
2396
2397                 reg->pin_count = 0;
2398                 reg->obj = NULL;
2399                 INIT_LIST_HEAD(&reg->lru_list);
2400         }
2401
2402         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2403 }
2404
2405 void i915_gem_reset(struct drm_device *dev)
2406 {
2407         struct drm_i915_private *dev_priv = dev->dev_private;
2408         struct drm_i915_gem_object *obj;
2409         struct intel_ring_buffer *ring;
2410         int i;
2411
2412         for_each_ring(ring, dev_priv, i)
2413                 i915_gem_reset_ring_lists(dev_priv, ring);
2414
2415         /* Move everything out of the GPU domains to ensure we do any
2416          * necessary invalidation upon reuse.
2417          */
2418         list_for_each_entry(obj,
2419                             &dev_priv->mm.inactive_list,
2420                             mm_list)
2421         {
2422                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2423         }
2424
2425         /* The fence registers are invalidated so clear them out */
2426         i915_gem_reset_fences(dev);
2427 }
2428
2429 /**
2430  * This function clears the request list as sequence numbers are passed.
2431  */
2432 void
2433 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2434 {
2435         uint32_t seqno;
2436
2437         if (list_empty(&ring->request_list))
2438                 return;
2439
2440         WARN_ON(i915_verify_lists(ring->dev));
2441
2442         seqno = ring->get_seqno(ring, true);
2443         CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
2444
2445         while (!list_empty(&ring->request_list)) {
2446                 struct drm_i915_gem_request *request;
2447
2448                 request = list_first_entry(&ring->request_list,
2449                                            struct drm_i915_gem_request,
2450                                            list);
2451
2452                 if (!i915_seqno_passed(seqno, request->seqno))
2453                         break;
2454
2455                 CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
2456                     ring->name, seqno);
2457                 /* We know the GPU must have read the request to have
2458                  * sent us the seqno + interrupt, so use the position
2459                  * of tail of the request to update the last known position
2460                  * of the GPU head.
2461                  */
2462                 ring->last_retired_head = request->tail;
2463
2464                 list_del(&request->list);
2465                 i915_gem_request_remove_from_client(request);
2466                 free(request, DRM_I915_GEM);
2467         }
2468
2469         /* Move any buffers on the active list that are no longer referenced
2470          * by the ringbuffer to the flushing/inactive lists as appropriate.
2471          */
2472         while (!list_empty(&ring->active_list)) {
2473                 struct drm_i915_gem_object *obj;
2474
2475                 obj = list_first_entry(&ring->active_list,
2476                                       struct drm_i915_gem_object,
2477                                       ring_list);
2478
2479                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2480                         break;
2481
2482                 i915_gem_object_move_to_inactive(obj);
2483         }
2484
2485         if (unlikely(ring->trace_irq_seqno &&
2486                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2487                 ring->irq_put(ring);
2488                 ring->trace_irq_seqno = 0;
2489         }
2490
2491         WARN_ON(i915_verify_lists(ring->dev));
2492 }
2493
2494 void
2495 i915_gem_retire_requests(struct drm_device *dev)
2496 {
2497         drm_i915_private_t *dev_priv = dev->dev_private;
2498         struct intel_ring_buffer *ring;
2499         int i;
2500
2501         for_each_ring(ring, dev_priv, i)
2502                 i915_gem_retire_requests_ring(ring);
2503 }
2504
2505 static void
2506 i915_gem_retire_work_handler(void *arg, int pending)
2507 {
2508         drm_i915_private_t *dev_priv;
2509         struct drm_device *dev;
2510         struct intel_ring_buffer *ring;
2511         bool idle;
2512         int i;
2513
2514         dev_priv = arg;
2515         dev = dev_priv->dev;
2516
2517         /* Come back later if the device is busy... */
2518         if (!sx_try_xlock(&dev->dev_struct_lock)) {
2519                 taskqueue_enqueue_timeout(dev_priv->wq,
2520                     &dev_priv->mm.retire_work, hz);
2521                 return;
2522         }
2523
2524         CTR0(KTR_DRM, "retire_task");
2525
2526         i915_gem_retire_requests(dev);
2527
2528         /* Send a periodic flush down the ring so we don't hold onto GEM
2529          * objects indefinitely.
2530          */
2531         idle = true;
2532         for_each_ring(ring, dev_priv, i) {
2533                 if (ring->gpu_caches_dirty)
2534                         i915_add_request(ring, NULL, NULL);
2535
2536                 idle &= list_empty(&ring->request_list);
2537         }
2538
2539         if (!dev_priv->mm.suspended && !idle)
2540                 taskqueue_enqueue_timeout(dev_priv->wq,
2541                     &dev_priv->mm.retire_work, hz);
2542         if (idle)
2543                 intel_mark_idle(dev);
2544
2545         DRM_UNLOCK(dev);
2546 }
2547
2548 /**
2549  * Ensures that an object will eventually get non-busy by flushing any required
2550  * write domains, emitting any outstanding lazy request and retiring and
2551  * completed requests.
2552  */
2553 static int
2554 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2555 {
2556         int ret;
2557
2558         if (obj->active) {
2559                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2560                 if (ret)
2561                         return ret;
2562
2563                 i915_gem_retire_requests_ring(obj->ring);
2564         }
2565
2566         return 0;
2567 }
2568
2569 /**
2570  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2571  * @DRM_IOCTL_ARGS: standard ioctl arguments
2572  *
2573  * Returns 0 if successful, else an error is returned with the remaining time in
2574  * the timeout parameter.
2575  *  -ETIME: object is still busy after timeout
2576  *  -ERESTARTSYS: signal interrupted the wait
2577  *  -ENONENT: object doesn't exist
2578  * Also possible, but rare:
2579  *  -EAGAIN: GPU wedged
2580  *  -ENOMEM: damn
2581  *  -ENODEV: Internal IRQ fail
2582  *  -E?: The add request failed
2583  *
2584  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2585  * non-zero timeout parameter the wait ioctl will wait for the given number of
2586  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2587  * without holding struct_mutex the object may become re-busied before this
2588  * function completes. A similar but shorter * race condition exists in the busy
2589  * ioctl
2590  */
2591 int
2592 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2593 {
2594         struct drm_i915_gem_wait *args = data;
2595         struct drm_i915_gem_object *obj;
2596         struct intel_ring_buffer *ring = NULL;
2597         struct timespec timeout_stack, *timeout = NULL;
2598         u32 seqno = 0;
2599         int ret = 0;
2600
2601         if (args->timeout_ns >= 0) {
2602                 timeout_stack.tv_sec = args->timeout_ns / 1000000;
2603                 timeout_stack.tv_nsec = args->timeout_ns % 1000000;
2604                 timeout = &timeout_stack;
2605         }
2606
2607         ret = i915_mutex_lock_interruptible(dev);
2608         if (ret)
2609                 return ret;
2610
2611         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2612         if (&obj->base == NULL) {
2613                 DRM_UNLOCK(dev);
2614                 return -ENOENT;
2615         }
2616
2617         /* Need to make sure the object gets inactive eventually. */
2618         ret = i915_gem_object_flush_active(obj);
2619         if (ret)
2620                 goto out;
2621
2622         if (obj->active) {
2623                 seqno = obj->last_read_seqno;
2624                 ring = obj->ring;
2625         }
2626
2627         if (seqno == 0)
2628                  goto out;
2629
2630         /* Do this after OLR check to make sure we make forward progress polling
2631          * on this IOCTL with a 0 timeout (like busy ioctl)
2632          */
2633         if (!args->timeout_ns) {
2634                 ret = -ETIMEDOUT;
2635                 goto out;
2636         }
2637
2638         drm_gem_object_unreference(&obj->base);
2639         DRM_UNLOCK(dev);
2640
2641         ret = __wait_seqno(ring, seqno, true, timeout);
2642         if (timeout) {
2643                 args->timeout_ns = timeout->tv_sec * 1000000 + timeout->tv_nsec;
2644         }
2645         return ret;
2646
2647 out:
2648         drm_gem_object_unreference(&obj->base);
2649         DRM_UNLOCK(dev);
2650         return ret;
2651 }
2652
2653 /**
2654  * i915_gem_object_sync - sync an object to a ring.
2655  *
2656  * @obj: object which may be in use on another ring.
2657  * @to: ring we wish to use the object on. May be NULL.
2658  *
2659  * This code is meant to abstract object synchronization with the GPU.
2660  * Calling with NULL implies synchronizing the object with the CPU
2661  * rather than a particular GPU ring.
2662  *
2663  * Returns 0 if successful, else propagates up the lower layer error.
2664  */
2665 int
2666 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2667                      struct intel_ring_buffer *to)
2668 {
2669         struct intel_ring_buffer *from = obj->ring;
2670         u32 seqno;
2671         int ret, idx;
2672
2673         if (from == NULL || to == from)
2674                 return 0;
2675
2676         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2677                 return i915_gem_object_wait_rendering(obj, false);
2678
2679         idx = intel_ring_sync_index(from, to);
2680
2681         seqno = obj->last_read_seqno;
2682         if (seqno <= from->sync_seqno[idx])
2683                 return 0;
2684
2685         ret = i915_gem_check_olr(obj->ring, seqno);
2686         if (ret)
2687                 return ret;
2688
2689         ret = to->sync_to(to, from, seqno);
2690         if (!ret)
2691                 /* We use last_read_seqno because sync_to()
2692                  * might have just caused seqno wrap under
2693                  * the radar.
2694                  */
2695                 from->sync_seqno[idx] = obj->last_read_seqno;
2696
2697         return ret;
2698 }
2699
2700 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2701 {
2702         u32 old_write_domain, old_read_domains;
2703
2704         /* Act a barrier for all accesses through the GTT */
2705         mb();
2706
2707         /* Force a pagefault for domain tracking on next user access */
2708         i915_gem_release_mmap(obj);
2709
2710         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2711                 return;
2712
2713         old_read_domains = obj->base.read_domains;
2714         old_write_domain = obj->base.write_domain;
2715
2716         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2717         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2718
2719         CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x",
2720             obj, old_read_domains, old_write_domain);
2721 }
2722
2723 /**
2724  * Unbinds an object from the GTT aperture.
2725  */
2726 int
2727 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2728 {
2729         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2730         int ret = 0;
2731
2732         if (obj->gtt_space == NULL)
2733                 return 0;
2734
2735         if (obj->pin_count)
2736                 return -EBUSY;
2737
2738         BUG_ON(obj->pages == NULL);
2739
2740         ret = i915_gem_object_finish_gpu(obj);
2741         if (ret)
2742                 return ret;
2743         /* Continue on if we fail due to EIO, the GPU is hung so we
2744          * should be safe and we need to cleanup or else we might
2745          * cause memory corruption through use-after-free.
2746          */
2747
2748         i915_gem_object_finish_gtt(obj);
2749
2750         /* release the fence reg _after_ flushing */
2751         ret = i915_gem_object_put_fence(obj);
2752         if (ret)
2753                 return ret;
2754
2755         if (obj->has_global_gtt_mapping)
2756                 i915_gem_gtt_unbind_object(obj);
2757         if (obj->has_aliasing_ppgtt_mapping) {
2758                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2759                 obj->has_aliasing_ppgtt_mapping = 0;
2760         }
2761         i915_gem_gtt_finish_object(obj);
2762
2763         list_del(&obj->mm_list);
2764         list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2765         /* Avoid an unnecessary call to unbind on rebind. */
2766         obj->map_and_fenceable = true;
2767
2768         drm_mm_put_block(obj->gtt_space);
2769         obj->gtt_space = NULL;
2770         obj->gtt_offset = 0;
2771
2772         return 0;
2773 }
2774
2775 int i915_gpu_idle(struct drm_device *dev)
2776 {
2777         drm_i915_private_t *dev_priv = dev->dev_private;
2778         struct intel_ring_buffer *ring;
2779         int ret, i;
2780
2781         /* Flush everything onto the inactive list. */
2782         for_each_ring(ring, dev_priv, i) {
2783                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2784                 if (ret)
2785                         return ret;
2786
2787                 ret = intel_ring_idle(ring);
2788                 if (ret)
2789                         return ret;
2790         }
2791
2792         return 0;
2793 }
2794
2795 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2796                                         struct drm_i915_gem_object *obj)
2797 {
2798         drm_i915_private_t *dev_priv = dev->dev_private;
2799         uint64_t val;
2800
2801         if (obj) {
2802                 u32 size = obj->gtt_space->size;
2803
2804                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2805                                  0xfffff000) << 32;
2806                 val |= obj->gtt_offset & 0xfffff000;
2807                 val |= (uint64_t)((obj->stride / 128) - 1) <<
2808                         SANDYBRIDGE_FENCE_PITCH_SHIFT;
2809
2810                 if (obj->tiling_mode == I915_TILING_Y)
2811                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2812                 val |= I965_FENCE_REG_VALID;
2813         } else
2814                 val = 0;
2815
2816         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2817         POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2818 }
2819
2820 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2821                                  struct drm_i915_gem_object *obj)
2822 {
2823         drm_i915_private_t *dev_priv = dev->dev_private;
2824         uint64_t val;
2825
2826         if (obj) {
2827                 u32 size = obj->gtt_space->size;
2828
2829                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2830                                  0xfffff000) << 32;
2831                 val |= obj->gtt_offset & 0xfffff000;
2832                 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2833                 if (obj->tiling_mode == I915_TILING_Y)
2834                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2835                 val |= I965_FENCE_REG_VALID;
2836         } else
2837                 val = 0;
2838
2839         I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2840         POSTING_READ(FENCE_REG_965_0 + reg * 8);
2841 }
2842
2843 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2844                                  struct drm_i915_gem_object *obj)
2845 {
2846         drm_i915_private_t *dev_priv = dev->dev_private;
2847         u32 val;
2848
2849         if (obj) {
2850                 u32 size = obj->gtt_space->size;
2851                 int pitch_val;
2852                 int tile_width;
2853
2854                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2855                      (size & -size) != size ||
2856                      (obj->gtt_offset & (size - 1)),
2857                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2858                      obj->gtt_offset, obj->map_and_fenceable, size);
2859
2860                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2861                         tile_width = 128;
2862                 else
2863                         tile_width = 512;
2864
2865                 /* Note: pitch better be a power of two tile widths */
2866                 pitch_val = obj->stride / tile_width;
2867                 pitch_val = ffs(pitch_val) - 1;
2868
2869                 val = obj->gtt_offset;
2870                 if (obj->tiling_mode == I915_TILING_Y)
2871                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2872                 val |= I915_FENCE_SIZE_BITS(size);
2873                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2874                 val |= I830_FENCE_REG_VALID;
2875         } else
2876                 val = 0;
2877
2878         if (reg < 8)
2879                 reg = FENCE_REG_830_0 + reg * 4;
2880         else
2881                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2882
2883         I915_WRITE(reg, val);
2884         POSTING_READ(reg);
2885 }
2886
2887 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2888                                 struct drm_i915_gem_object *obj)
2889 {
2890         drm_i915_private_t *dev_priv = dev->dev_private;
2891         uint32_t val;
2892
2893         if (obj) {
2894                 u32 size = obj->gtt_space->size;
2895                 uint32_t pitch_val;
2896
2897                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2898                      (size & -size) != size ||
2899                      (obj->gtt_offset & (size - 1)),
2900                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2901                      obj->gtt_offset, size);
2902
2903                 pitch_val = obj->stride / 128;
2904                 pitch_val = ffs(pitch_val) - 1;
2905
2906                 val = obj->gtt_offset;
2907                 if (obj->tiling_mode == I915_TILING_Y)
2908                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2909                 val |= I830_FENCE_SIZE_BITS(size);
2910                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2911                 val |= I830_FENCE_REG_VALID;
2912         } else
2913                 val = 0;
2914
2915         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2916         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2917 }
2918
2919 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2920                                  struct drm_i915_gem_object *obj)
2921 {
2922         switch (INTEL_INFO(dev)->gen) {
2923         case 7:
2924         case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2925         case 5:
2926         case 4: i965_write_fence_reg(dev, reg, obj); break;
2927         case 3: i915_write_fence_reg(dev, reg, obj); break;
2928         case 2: i830_write_fence_reg(dev, reg, obj); break;
2929         default: break;
2930         }
2931 }
2932
2933 static inline int fence_number(struct drm_i915_private *dev_priv,
2934                                struct drm_i915_fence_reg *fence)
2935 {
2936         return fence - dev_priv->fence_regs;
2937 }
2938
2939 static void i915_gem_write_fence__ipi(void *data)
2940 {
2941         wbinvd();
2942 }
2943
2944 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2945                                          struct drm_i915_fence_reg *fence,
2946                                          bool enable)
2947 {
2948         struct drm_device *dev = obj->base.dev;
2949         struct drm_i915_private *dev_priv = dev->dev_private;
2950         int fence_reg = fence_number(dev_priv, fence);
2951
2952         /* In order to fully serialize access to the fenced region and
2953          * the update to the fence register we need to take extreme
2954          * measures on SNB+. In theory, the write to the fence register
2955          * flushes all memory transactions before, and coupled with the
2956          * mb() placed around the register write we serialise all memory
2957          * operations with respect to the changes in the tiler. Yet, on
2958          * SNB+ we need to take a step further and emit an explicit wbinvd()
2959          * on each processor in order to manually flush all memory
2960          * transactions before updating the fence register.
2961          */
2962         if (HAS_LLC(obj->base.dev))
2963                 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2964         i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2965
2966         if (enable) {
2967                 obj->fence_reg = fence_reg;
2968                 fence->obj = obj;
2969                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2970         } else {
2971                 obj->fence_reg = I915_FENCE_REG_NONE;
2972                 fence->obj = NULL;
2973                 list_del_init(&fence->lru_list);
2974         }
2975 }
2976
2977 static int
2978 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2979 {
2980         if (obj->last_fenced_seqno) {
2981                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2982                 if (ret)
2983                         return ret;
2984
2985                 obj->last_fenced_seqno = 0;
2986         }
2987
2988         /* Ensure that all CPU reads are completed before installing a fence
2989          * and all writes before removing the fence.
2990          */
2991         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2992                 mb();
2993
2994         obj->fenced_gpu_access = false;
2995         return 0;
2996 }
2997
2998 int
2999 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3000 {
3001         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3002         int ret;
3003
3004         ret = i915_gem_object_flush_fence(obj);
3005         if (ret)
3006                 return ret;
3007
3008         if (obj->fence_reg == I915_FENCE_REG_NONE)
3009                 return 0;
3010
3011         i915_gem_object_update_fence(obj,
3012                                      &dev_priv->fence_regs[obj->fence_reg],
3013                                      false);
3014         i915_gem_object_fence_lost(obj);
3015
3016         return 0;
3017 }
3018
3019 static struct drm_i915_fence_reg *
3020 i915_find_fence_reg(struct drm_device *dev)
3021 {
3022         struct drm_i915_private *dev_priv = dev->dev_private;
3023         struct drm_i915_fence_reg *reg, *avail;
3024         int i;
3025
3026         /* First try to find a free reg */
3027         avail = NULL;
3028         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3029                 reg = &dev_priv->fence_regs[i];
3030                 if (!reg->obj)
3031                         return reg;
3032
3033                 if (!reg->pin_count)
3034                         avail = reg;
3035         }
3036
3037         if (avail == NULL)
3038                 return NULL;
3039
3040         /* None available, try to steal one or wait for a user to finish */
3041         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3042                 if (reg->pin_count)
3043                         continue;
3044
3045                 return reg;
3046         }
3047
3048         return NULL;
3049 }
3050
3051 /**
3052  * i915_gem_object_get_fence - set up fencing for an object
3053  * @obj: object to map through a fence reg
3054  *
3055  * When mapping objects through the GTT, userspace wants to be able to write
3056  * to them without having to worry about swizzling if the object is tiled.
3057  * This function walks the fence regs looking for a free one for @obj,
3058  * stealing one if it can't find any.
3059  *
3060  * It then sets up the reg based on the object's properties: address, pitch
3061  * and tiling format.
3062  *
3063  * For an untiled surface, this removes any existing fence.
3064  */
3065 int
3066 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3067 {
3068         struct drm_device *dev = obj->base.dev;
3069         struct drm_i915_private *dev_priv = dev->dev_private;
3070         bool enable = obj->tiling_mode != I915_TILING_NONE;
3071         struct drm_i915_fence_reg *reg;
3072         int ret;
3073
3074         /* Have we updated the tiling parameters upon the object and so
3075          * will need to serialise the write to the associated fence register?
3076          */
3077         if (obj->fence_dirty) {
3078                 ret = i915_gem_object_flush_fence(obj);
3079                 if (ret)
3080                         return ret;
3081         }
3082
3083         /* Just update our place in the LRU if our fence is getting reused. */
3084         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3085                 reg = &dev_priv->fence_regs[obj->fence_reg];
3086                 if (!obj->fence_dirty) {
3087                         list_move_tail(&reg->lru_list,
3088                                        &dev_priv->mm.fence_list);
3089                         return 0;
3090                 }
3091         } else if (enable) {
3092                 reg = i915_find_fence_reg(dev);
3093                 if (reg == NULL)
3094                         return -EDEADLK;
3095
3096                 if (reg->obj) {
3097                         struct drm_i915_gem_object *old = reg->obj;
3098
3099                         ret = i915_gem_object_flush_fence(old);
3100                         if (ret)
3101                                 return ret;
3102
3103                         i915_gem_object_fence_lost(old);
3104                 }
3105         } else
3106                 return 0;
3107
3108         i915_gem_object_update_fence(obj, reg, enable);
3109         obj->fence_dirty = false;
3110
3111         return 0;
3112 }
3113
3114 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3115                                      struct drm_mm_node *gtt_space,
3116                                      unsigned long cache_level)
3117 {
3118         struct drm_mm_node *other;
3119
3120         /* On non-LLC machines we have to be careful when putting differing
3121          * types of snoopable memory together to avoid the prefetcher
3122          * crossing memory domains and dying.
3123          */
3124         if (HAS_LLC(dev))
3125                 return true;
3126
3127         if (gtt_space == NULL)
3128                 return true;
3129
3130         if (list_empty(&gtt_space->node_list))
3131                 return true;
3132
3133         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3134         if (other->allocated && !other->hole_follows && other->color != cache_level)
3135                 return false;
3136
3137         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3138         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3139                 return false;
3140
3141         return true;
3142 }
3143
3144 static void i915_gem_verify_gtt(struct drm_device *dev)
3145 {
3146 #if WATCH_GTT
3147         struct drm_i915_private *dev_priv = dev->dev_private;
3148         struct drm_i915_gem_object *obj;
3149         int err = 0;
3150
3151         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
3152                 if (obj->gtt_space == NULL) {
3153                         DRM_ERROR("object found on GTT list with no space reserved\n");
3154                         err++;
3155                         continue;
3156                 }
3157
3158                 if (obj->cache_level != obj->gtt_space->color) {
3159                         DRM_ERROR("object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3160                                obj->gtt_space->start,
3161                                obj->gtt_space->start + obj->gtt_space->size,
3162                                obj->cache_level,
3163                                obj->gtt_space->color);
3164                         err++;
3165                         continue;
3166                 }
3167
3168                 if (!i915_gem_valid_gtt_space(dev,
3169                                               obj->gtt_space,
3170                                               obj->cache_level)) {
3171                         DRM_ERROR("invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3172                                obj->gtt_space->start,
3173                                obj->gtt_space->start + obj->gtt_space->size,
3174                                obj->cache_level);
3175                         err++;
3176                         continue;
3177                 }
3178         }
3179
3180         WARN_ON(err);
3181 #endif
3182 }
3183
3184 /**
3185  * Finds free space in the GTT aperture and binds the object there.
3186  */
3187 static int
3188 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3189                             unsigned alignment,
3190                             bool map_and_fenceable,
3191                             bool nonblocking)
3192 {
3193         struct drm_device *dev = obj->base.dev;
3194         drm_i915_private_t *dev_priv = dev->dev_private;
3195         struct drm_mm_node *node;
3196         u32 size, fence_size, fence_alignment, unfenced_alignment;
3197         bool mappable, fenceable;
3198         int ret;
3199
3200         if (obj->madv != I915_MADV_WILLNEED) {
3201                 DRM_ERROR("Attempting to bind a purgeable object\n");
3202                 return -EINVAL;
3203         }
3204
3205         fence_size = i915_gem_get_gtt_size(dev,
3206                                            obj->base.size,
3207                                            obj->tiling_mode);
3208         fence_alignment = i915_gem_get_gtt_alignment(dev,
3209                                                      obj->base.size,
3210                                                      obj->tiling_mode);
3211         unfenced_alignment =
3212                 i915_gem_get_unfenced_gtt_alignment(dev,
3213                                                     obj->base.size,
3214                                                     obj->tiling_mode);
3215
3216         if (alignment == 0)
3217                 alignment = map_and_fenceable ? fence_alignment :
3218                                                 unfenced_alignment;
3219         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3220                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3221                 return -EINVAL;
3222         }
3223
3224         size = map_and_fenceable ? fence_size : obj->base.size;
3225
3226         /* If the object is bigger than the entire aperture, reject it early
3227          * before evicting everything in a vain attempt to find space.
3228          */
3229         if (obj->base.size >
3230             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
3231                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
3232                 return -E2BIG;
3233         }
3234
3235         ret = i915_gem_object_get_pages(obj);
3236         if (ret)
3237                 return ret;
3238
3239         i915_gem_object_pin_pages(obj);
3240
3241         node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
3242         if (node == NULL) {
3243                 i915_gem_object_unpin_pages(obj);
3244                 return -ENOMEM;
3245         }
3246
3247  search_free:
3248         if (map_and_fenceable)
3249                 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
3250                                                           size, alignment, obj->cache_level,
3251                                                           0, dev_priv->mm.gtt_mappable_end);
3252         else
3253                 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
3254                                                  size, alignment, obj->cache_level);
3255         if (ret) {
3256                 ret = i915_gem_evict_something(dev, size, alignment,
3257                                                obj->cache_level,
3258                                                map_and_fenceable,
3259                                                nonblocking);
3260                 if (ret == 0)
3261                         goto search_free;
3262
3263                 i915_gem_object_unpin_pages(obj);
3264                 free(node, DRM_MEM_MM);
3265                 return ret;
3266         }
3267         if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
3268                 i915_gem_object_unpin_pages(obj);
3269                 drm_mm_put_block(node);
3270                 return -EINVAL;
3271         }
3272
3273         ret = i915_gem_gtt_prepare_object(obj);
3274         if (ret) {
3275                 i915_gem_object_unpin_pages(obj);
3276                 drm_mm_put_block(node);
3277                 return ret;
3278         }
3279
3280         list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
3281         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3282
3283         obj->gtt_space = node;
3284         obj->gtt_offset = node->start;
3285
3286         fenceable =
3287                 node->size == fence_size &&
3288                 (node->start & (fence_alignment - 1)) == 0;
3289
3290         mappable =
3291                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
3292
3293         obj->map_and_fenceable = mappable && fenceable;
3294
3295         i915_gem_object_unpin_pages(obj);
3296         CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
3297             obj->base.size, map_and_fenceable);
3298         i915_gem_verify_gtt(dev);
3299         return 0;
3300 }
3301
3302 void
3303 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3304 {
3305         /* If we don't have a page list set up, then we're not pinned
3306          * to GPU, and we can ignore the cache flush because it'll happen
3307          * again at bind time.
3308          */
3309         if (obj->pages == NULL)
3310                 return;
3311
3312         /* If the GPU is snooping the contents of the CPU cache,
3313          * we do not need to manually clear the CPU cache lines.  However,
3314          * the caches are only snooped when the render cache is
3315          * flushed/invalidated.  As we always have to emit invalidations
3316          * and flushes when moving into and out of the RENDER domain, correct
3317          * snooping behaviour occurs naturally as the result of our domain
3318          * tracking.
3319          */
3320         if (obj->cache_level != I915_CACHE_NONE)
3321                 return;
3322
3323         CTR1(KTR_DRM, "object_clflush %p", obj);
3324
3325         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3326 }
3327
3328 /** Flushes the GTT write domain for the object if it's dirty. */
3329 static void
3330 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3331 {
3332         uint32_t old_write_domain;
3333
3334         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3335                 return;
3336
3337         /* No actual flushing is required for the GTT write domain.  Writes
3338          * to it immediately go to main memory as far as we know, so there's
3339          * no chipset flush.  It also doesn't land in render cache.
3340          *
3341          * However, we do have to enforce the order so that all writes through
3342          * the GTT land before any writes to the device, such as updates to
3343          * the GATT itself.
3344          */
3345         wmb();
3346
3347         old_write_domain = obj->base.write_domain;
3348         obj->base.write_domain = 0;
3349
3350         CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj,
3351             obj->base.read_domains, old_write_domain);
3352 }
3353
3354 /** Flushes the CPU write domain for the object if it's dirty. */
3355 static void
3356 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3357 {
3358         uint32_t old_write_domain;
3359
3360         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3361                 return;
3362
3363         i915_gem_clflush_object(obj);
3364         i915_gem_chipset_flush(obj->base.dev);
3365         old_write_domain = obj->base.write_domain;
3366         obj->base.write_domain = 0;
3367
3368         CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj,
3369             obj->base.read_domains, old_write_domain);
3370 }
3371
3372 /**
3373  * Moves a single object to the GTT read, and possibly write domain.
3374  *
3375  * This function returns when the move is complete, including waiting on
3376  * flushes to occur.
3377  */
3378 int
3379 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3380 {
3381         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3382         uint32_t old_write_domain, old_read_domains;
3383         int ret;
3384
3385         /* Not valid to be called on unbound objects. */
3386         if (obj->gtt_space == NULL)
3387                 return -EINVAL;
3388
3389         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3390                 return 0;
3391
3392         ret = i915_gem_object_wait_rendering(obj, !write);
3393         if (ret)
3394                 return ret;
3395
3396         i915_gem_object_flush_cpu_write_domain(obj);
3397
3398         old_write_domain = obj->base.write_domain;
3399         old_read_domains = obj->base.read_domains;
3400
3401         /* It should now be out of any other write domains, and we can update
3402          * the domain values for our changes.
3403          */
3404         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3405         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3406         if (write) {
3407                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3408                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3409                 obj->dirty = 1;
3410         }
3411
3412         CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
3413             old_read_domains, old_write_domain);
3414
3415         /* And bump the LRU for this access */
3416         if (i915_gem_object_is_inactive(obj))
3417                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3418
3419         return 0;
3420 }
3421
3422 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3423                                     enum i915_cache_level cache_level)
3424 {
3425         struct drm_device *dev = obj->base.dev;
3426         drm_i915_private_t *dev_priv = dev->dev_private;
3427         int ret;
3428
3429         if (obj->cache_level == cache_level)
3430                 return 0;
3431
3432         if (obj->pin_count) {
3433                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3434                 return -EBUSY;
3435         }
3436
3437         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3438                 ret = i915_gem_object_unbind(obj);
3439                 if (ret)
3440                         return ret;
3441         }
3442
3443         if (obj->gtt_space) {
3444                 ret = i915_gem_object_finish_gpu(obj);
3445                 if (ret)
3446                         return ret;
3447
3448                 i915_gem_object_finish_gtt(obj);
3449
3450                 /* Before SandyBridge, you could not use tiling or fence
3451                  * registers with snooped memory, so relinquish any fences
3452                  * currently pointing to our region in the aperture.
3453                  */
3454                 if (INTEL_INFO(dev)->gen < 6) {
3455                         ret = i915_gem_object_put_fence(obj);
3456                         if (ret)
3457                                 return ret;
3458                 }
3459
3460                 if (obj->has_global_gtt_mapping)
3461                         i915_gem_gtt_bind_object(obj, cache_level);
3462                 if (obj->has_aliasing_ppgtt_mapping)
3463                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3464                                                obj, cache_level);
3465
3466                 obj->gtt_space->color = cache_level;
3467         }
3468
3469         if (cache_level == I915_CACHE_NONE) {
3470                 u32 old_read_domains, old_write_domain;
3471
3472                 /* If we're coming from LLC cached, then we haven't
3473                  * actually been tracking whether the data is in the
3474                  * CPU cache or not, since we only allow one bit set
3475                  * in obj->write_domain and have been skipping the clflushes.
3476                  * Just set it to the CPU cache for now.
3477                  */
3478                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3479                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3480
3481                 old_read_domains = obj->base.read_domains;
3482                 old_write_domain = obj->base.write_domain;
3483
3484                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3485                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3486
3487                 CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x",
3488                     obj, old_read_domains, old_write_domain);
3489         }
3490
3491         obj->cache_level = cache_level;
3492         i915_gem_verify_gtt(dev);
3493         return 0;
3494 }
3495
3496 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3497                                struct drm_file *file)
3498 {
3499         struct drm_i915_gem_caching *args = data;
3500         struct drm_i915_gem_object *obj;
3501         int ret;
3502
3503         ret = i915_mutex_lock_interruptible(dev);
3504         if (ret)
3505                 return ret;
3506
3507         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3508         if (&obj->base == NULL) {
3509                 ret = -ENOENT;
3510                 goto unlock;
3511         }
3512
3513         args->caching = obj->cache_level != I915_CACHE_NONE;
3514
3515         drm_gem_object_unreference(&obj->base);
3516 unlock:
3517         DRM_UNLOCK(dev);
3518         return ret;
3519 }
3520
3521 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3522                                struct drm_file *file)
3523 {
3524         struct drm_i915_gem_caching *args = data;
3525         struct drm_i915_gem_object *obj;
3526         enum i915_cache_level level;
3527         int ret;
3528
3529         switch (args->caching) {
3530         case I915_CACHING_NONE:
3531                 level = I915_CACHE_NONE;
3532                 break;
3533         case I915_CACHING_CACHED:
3534                 level = I915_CACHE_LLC;
3535                 break;
3536         default:
3537                 return -EINVAL;
3538         }
3539
3540         ret = i915_mutex_lock_interruptible(dev);
3541         if (ret)
3542                 return ret;
3543
3544         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3545         if (&obj->base == NULL) {
3546                 ret = -ENOENT;
3547                 goto unlock;
3548         }
3549
3550         ret = i915_gem_object_set_cache_level(obj, level);
3551
3552         drm_gem_object_unreference(&obj->base);
3553 unlock:
3554         DRM_UNLOCK(dev);
3555         return ret;
3556 }
3557
3558 static bool is_pin_display(struct drm_i915_gem_object *obj)
3559 {
3560         /* There are 3 sources that pin objects:
3561          *   1. The display engine (scanouts, sprites, cursors);
3562          *   2. Reservations for execbuffer;
3563          *   3. The user.
3564          *
3565          * We can ignore reservations as we hold the struct_mutex and
3566          * are only called outside of the reservation path.  The user
3567          * can only increment pin_count once, and so if after
3568          * subtracting the potential reference by the user, any pin_count
3569          * remains, it must be due to another use by the display engine.
3570          */
3571         return obj->pin_count - !!obj->user_pin_count;
3572 }
3573
3574 /*
3575  * Prepare buffer for display plane (scanout, cursors, etc).
3576  * Can be called from an uninterruptible phase (modesetting) and allows
3577  * any flushes to be pipelined (for pageflips).
3578  */
3579 int
3580 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3581                                      u32 alignment,
3582                                      struct intel_ring_buffer *pipelined)
3583 {
3584         u32 old_read_domains, old_write_domain;
3585         int ret;
3586
3587         if (pipelined != obj->ring) {
3588                 ret = i915_gem_object_sync(obj, pipelined);
3589                 if (ret)
3590                         return ret;
3591         }
3592
3593         /* Mark the pin_display early so that we account for the
3594          * display coherency whilst setting up the cache domains.
3595          */
3596         obj->pin_display = true;
3597
3598         /* The display engine is not coherent with the LLC cache on gen6.  As
3599          * a result, we make sure that the pinning that is about to occur is
3600          * done with uncached PTEs. This is lowest common denominator for all
3601          * chipsets.
3602          *
3603          * However for gen6+, we could do better by using the GFDT bit instead
3604          * of uncaching, which would allow us to flush all the LLC-cached data
3605          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3606          */
3607         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3608         if (ret)
3609                 goto err_unpin_display;
3610
3611         /* As the user may map the buffer once pinned in the display plane
3612          * (e.g. libkms for the bootup splash), we have to ensure that we
3613          * always use map_and_fenceable for all scanout buffers.
3614          */
3615         ret = i915_gem_object_pin(obj, alignment, true, false);
3616         if (ret)
3617                 goto err_unpin_display;
3618
3619         i915_gem_object_flush_cpu_write_domain(obj);
3620
3621         old_write_domain = obj->base.write_domain;
3622         old_read_domains = obj->base.read_domains;
3623
3624         /* It should now be out of any other write domains, and we can update
3625          * the domain values for our changes.
3626          */
3627         obj->base.write_domain = 0;
3628         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3629
3630         CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
3631             obj, old_read_domains, old_write_domain);
3632
3633         return 0;
3634
3635 err_unpin_display:
3636         obj->pin_display = is_pin_display(obj);
3637         return ret;
3638 }
3639
3640 void
3641 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3642 {
3643         i915_gem_object_unpin(obj);
3644         obj->pin_display = is_pin_display(obj);
3645 }
3646
3647 int
3648 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3649 {
3650         int ret;
3651
3652         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3653                 return 0;
3654
3655         ret = i915_gem_object_wait_rendering(obj, false);
3656         if (ret)
3657                 return ret;
3658
3659         /* Ensure that we invalidate the GPU's caches and TLBs. */
3660         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3661         return 0;
3662 }
3663
3664 /**
3665  * Moves a single object to the CPU read, and possibly write domain.
3666  *
3667  * This function returns when the move is complete, including waiting on
3668  * flushes to occur.
3669  */
3670 int
3671 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3672 {
3673         uint32_t old_write_domain, old_read_domains;
3674         int ret;
3675
3676         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3677                 return 0;
3678
3679         ret = i915_gem_object_wait_rendering(obj, !write);
3680         if (ret)
3681                 return ret;
3682
3683         i915_gem_object_flush_gtt_write_domain(obj);
3684
3685         old_write_domain = obj->base.write_domain;
3686         old_read_domains = obj->base.read_domains;
3687
3688         /* Flush the CPU cache if it's still invalid. */
3689         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3690                 i915_gem_clflush_object(obj);
3691
3692                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3693         }
3694
3695         /* It should now be out of any other write domains, and we can update
3696          * the domain values for our changes.
3697          */
3698         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3699
3700         /* If we're writing through the CPU, then the GPU read domains will
3701          * need to be invalidated at next use.
3702          */
3703         if (write) {
3704                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3705                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3706         }
3707
3708         CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj,
3709             old_read_domains, old_write_domain);
3710
3711         return 0;
3712 }
3713
3714 /* Throttle our rendering by waiting until the ring has completed our requests
3715  * emitted over 20 msec ago.
3716  *
3717  * Note that if we were to use the current jiffies each time around the loop,
3718  * we wouldn't escape the function with any frames outstanding if the time to
3719  * render a frame was over 20ms.
3720  *
3721  * This should get us reasonable parallelism between CPU and GPU but also
3722  * relatively low latency when blocking on a particular request to finish.
3723  */
3724 static int
3725 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3726 {
3727         struct drm_i915_private *dev_priv = dev->dev_private;
3728         struct drm_i915_file_private *file_priv = file->driver_priv;
3729         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3730         struct drm_i915_gem_request *request;
3731         struct intel_ring_buffer *ring = NULL;
3732         u32 seqno = 0;
3733         int ret;
3734
3735         if (atomic_read(&dev_priv->mm.wedged))
3736                 return -EIO;
3737
3738         mtx_lock(&file_priv->mm.lock);
3739         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3740                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3741                         break;
3742
3743                 ring = request->ring;
3744                 seqno = request->seqno;
3745         }
3746         mtx_unlock(&file_priv->mm.lock);
3747
3748         if (seqno == 0)
3749                 return 0;
3750
3751         ret = __wait_seqno(ring, seqno, true, NULL);
3752         if (ret == 0)
3753                 taskqueue_enqueue_timeout(dev_priv->wq,
3754                     &dev_priv->mm.retire_work, 0);
3755
3756         return ret;
3757 }
3758
3759 int
3760 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3761                     uint32_t alignment,
3762                     bool map_and_fenceable,
3763                     bool nonblocking)
3764 {
3765         int ret;
3766
3767         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3768                 return -EBUSY;
3769
3770         if (obj->gtt_space != NULL) {
3771                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3772                     (map_and_fenceable && !obj->map_and_fenceable)) {
3773                         WARN(obj->pin_count,
3774                              "bo is already pinned with incorrect alignment:"
3775                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3776                              " obj->map_and_fenceable=%d\n",
3777                              obj->gtt_offset, alignment,
3778                              map_and_fenceable,
3779                              obj->map_and_fenceable);
3780                         ret = i915_gem_object_unbind(obj);
3781                         if (ret)
3782                                 return ret;
3783                 }
3784         }
3785
3786         if (obj->gtt_space == NULL) {
3787                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3788
3789                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3790                                                   map_and_fenceable,
3791                                                   nonblocking);
3792                 if (ret)
3793                         return ret;
3794
3795                 if (!dev_priv->mm.aliasing_ppgtt)
3796                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3797         }
3798
3799         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3800                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3801
3802         obj->pin_count++;
3803         obj->pin_mappable |= map_and_fenceable;
3804
3805         return 0;
3806 }
3807
3808 void
3809 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3810 {
3811         BUG_ON(obj->pin_count == 0);
3812         BUG_ON(obj->gtt_space == NULL);
3813
3814         if (--obj->pin_count == 0)
3815                 obj->pin_mappable = false;
3816 }
3817
3818 int
3819 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3820                    struct drm_file *file)
3821 {
3822         struct drm_i915_gem_pin *args = data;
3823         struct drm_i915_gem_object *obj;
3824         int ret;
3825
3826         ret = i915_mutex_lock_interruptible(dev);
3827         if (ret)
3828                 return ret;
3829
3830         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3831         if (&obj->base == NULL) {
3832                 ret = -ENOENT;
3833                 goto unlock;
3834         }
3835
3836         if (obj->madv != I915_MADV_WILLNEED) {
3837                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3838                 ret = -EINVAL;
3839                 goto out;
3840         }
3841
3842         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3843                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3844                           args->handle);
3845                 ret = -EINVAL;
3846                 goto out;
3847         }
3848
3849         if (obj->user_pin_count == 0) {
3850                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3851                 if (ret)
3852                         goto out;
3853         }
3854
3855         obj->user_pin_count++;
3856         obj->pin_filp = file;
3857
3858         /* XXX - flush the CPU caches for pinned objects
3859          * as the X server doesn't manage domains yet
3860          */
3861         i915_gem_object_flush_cpu_write_domain(obj);
3862         args->offset = obj->gtt_offset;
3863 out:
3864         drm_gem_object_unreference(&obj->base);
3865 unlock:
3866         DRM_UNLOCK(dev);
3867         return ret;
3868 }
3869
3870 int
3871 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3872                      struct drm_file *file)
3873 {
3874         struct drm_i915_gem_pin *args = data;
3875         struct drm_i915_gem_object *obj;
3876         int ret;
3877
3878         ret = i915_mutex_lock_interruptible(dev);
3879         if (ret)
3880                 return ret;
3881
3882         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3883         if (&obj->base == NULL) {
3884                 ret = -ENOENT;
3885                 goto unlock;
3886         }
3887
3888         if (obj->pin_filp != file) {
3889                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3890                           args->handle);
3891                 ret = -EINVAL;
3892                 goto out;
3893         }
3894         obj->user_pin_count--;
3895         if (obj->user_pin_count == 0) {
3896                 obj->pin_filp = NULL;
3897                 i915_gem_object_unpin(obj);
3898         }
3899
3900 out:
3901         drm_gem_object_unreference(&obj->base);
3902 unlock:
3903         DRM_UNLOCK(dev);
3904         return ret;
3905 }
3906
3907 int
3908 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3909                     struct drm_file *file)
3910 {
3911         struct drm_i915_gem_busy *args = data;
3912         struct drm_i915_gem_object *obj;
3913         int ret;
3914
3915         ret = i915_mutex_lock_interruptible(dev);
3916         if (ret)
3917                 return ret;
3918
3919         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3920         if (&obj->base == NULL) {
3921                 ret = -ENOENT;
3922                 goto unlock;
3923         }
3924
3925         /* Count all active objects as busy, even if they are currently not used
3926          * by the gpu. Users of this interface expect objects to eventually
3927          * become non-busy without any further actions, therefore emit any
3928          * necessary flushes here.
3929          */
3930         ret = i915_gem_object_flush_active(obj);
3931
3932         args->busy = obj->active;
3933         if (obj->ring) {
3934                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3935                 args->busy |= intel_ring_flag(obj->ring) << 16;
3936         }
3937
3938         drm_gem_object_unreference(&obj->base);
3939 unlock:
3940         DRM_UNLOCK(dev);
3941         return ret;
3942 }
3943
3944 int
3945 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3946                         struct drm_file *file_priv)
3947 {
3948         return i915_gem_ring_throttle(dev, file_priv);
3949 }
3950
3951 int
3952 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3953                        struct drm_file *file_priv)
3954 {
3955         struct drm_i915_gem_madvise *args = data;
3956         struct drm_i915_gem_object *obj;
3957         int ret;
3958
3959         switch (args->madv) {
3960         case I915_MADV_DONTNEED:
3961         case I915_MADV_WILLNEED:
3962             break;
3963         default:
3964             return -EINVAL;
3965         }
3966
3967         ret = i915_mutex_lock_interruptible(dev);
3968         if (ret)
3969                 return ret;
3970
3971         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3972         if (&obj->base == NULL) {
3973                 ret = -ENOENT;
3974                 goto unlock;
3975         }
3976
3977         if (obj->pin_count) {
3978                 ret = -EINVAL;
3979                 goto out;
3980         }
3981
3982         if (obj->madv != __I915_MADV_PURGED)
3983                 obj->madv = args->madv;
3984
3985         /* if the object is no longer attached, discard its backing storage */
3986         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3987                 i915_gem_object_truncate(obj);
3988
3989         args->retained = obj->madv != __I915_MADV_PURGED;
3990
3991 out:
3992         drm_gem_object_unreference(&obj->base);
3993 unlock:
3994         DRM_UNLOCK(dev);
3995         return ret;
3996 }
3997
3998 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3999                           const struct drm_i915_gem_object_ops *ops)
4000 {
4001         INIT_LIST_HEAD(&obj->mm_list);
4002         INIT_LIST_HEAD(&obj->gtt_list);
4003         INIT_LIST_HEAD(&obj->ring_list);
4004         INIT_LIST_HEAD(&obj->exec_list);
4005
4006         obj->ops = ops;
4007
4008         obj->fence_reg = I915_FENCE_REG_NONE;
4009         obj->madv = I915_MADV_WILLNEED;
4010         /* Avoid an unnecessary call to unbind on the first bind. */
4011         obj->map_and_fenceable = true;
4012
4013         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4014 }
4015
4016 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4017         .get_pages = i915_gem_object_get_pages_gtt,
4018         .put_pages = i915_gem_object_put_pages_gtt,
4019 };
4020
4021 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4022                                                   size_t size)
4023 {
4024         struct drm_i915_gem_object *obj;
4025
4026         obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
4027         if (obj == NULL)
4028                 return NULL;
4029
4030         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4031                 free(obj, DRM_I915_GEM);
4032                 return NULL;
4033         }
4034
4035 #ifdef FREEBSD_WIP
4036         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4037         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4038                 /* 965gm cannot relocate objects above 4GiB. */
4039                 mask &= ~__GFP_HIGHMEM;
4040                 mask |= __GFP_DMA32;
4041         }
4042
4043         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4044         mapping_set_gfp_mask(mapping, mask);
4045 #endif /* FREEBSD_WIP */
4046
4047         i915_gem_object_init(obj, &i915_gem_object_ops);
4048
4049         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4050         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4051
4052         if (HAS_LLC(dev)) {
4053                 /* On some devices, we can have the GPU use the LLC (the CPU
4054                  * cache) for about a 10% performance improvement
4055                  * compared to uncached.  Graphics requests other than
4056                  * display scanout are coherent with the CPU in
4057                  * accessing this cache.  This means in this mode we
4058                  * don't need to clflush on the CPU side, and on the
4059                  * GPU side we only need to flush internal caches to
4060                  * get data visible to the CPU.
4061                  *
4062                  * However, we maintain the display planes as UC, and so
4063                  * need to rebind when first used as such.
4064                  */
4065                 obj->cache_level = I915_CACHE_LLC;
4066         } else
4067                 obj->cache_level = I915_CACHE_NONE;
4068
4069         return obj;
4070 }
4071
4072 int i915_gem_init_object(struct drm_gem_object *obj)
4073 {
4074         printf("i915_gem_init_object called\n");
4075
4076         return 0;
4077 }
4078
4079 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4080 {
4081         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4082         struct drm_device *dev = obj->base.dev;
4083         drm_i915_private_t *dev_priv = dev->dev_private;
4084
4085         CTR1(KTR_DRM, "object_destroy_tail %p", obj);
4086
4087         if (obj->phys_obj)
4088                 i915_gem_detach_phys_object(dev, obj);
4089
4090         obj->pin_count = 0;
4091         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
4092                 bool was_interruptible;
4093
4094                 was_interruptible = dev_priv->mm.interruptible;
4095                 dev_priv->mm.interruptible = false;
4096
4097                 WARN_ON(i915_gem_object_unbind(obj));
4098
4099                 dev_priv->mm.interruptible = was_interruptible;
4100         }
4101
4102         obj->pages_pin_count = 0;
4103         i915_gem_object_put_pages(obj);
4104         i915_gem_object_free_mmap_offset(obj);
4105
4106         BUG_ON(obj->pages);
4107
4108 #ifdef FREEBSD_WIP
4109         if (obj->base.import_attach)
4110                 drm_prime_gem_destroy(&obj->base, NULL);
4111 #endif /* FREEBSD_WIP */
4112
4113         drm_gem_object_release(&obj->base);
4114         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4115
4116         free(obj->bit_17, DRM_I915_GEM);
4117         free(obj, DRM_I915_GEM);
4118 }
4119
4120 int
4121 i915_gem_idle(struct drm_device *dev)
4122 {
4123         drm_i915_private_t *dev_priv = dev->dev_private;
4124         int ret;
4125
4126         DRM_LOCK(dev);
4127
4128         if (dev_priv->mm.suspended) {
4129                 DRM_UNLOCK(dev);
4130                 return 0;
4131         }
4132
4133         ret = i915_gpu_idle(dev);
4134         if (ret) {
4135                 DRM_UNLOCK(dev);
4136                 return ret;
4137         }
4138         i915_gem_retire_requests(dev);
4139
4140         /* Under UMS, be paranoid and evict. */
4141         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4142                 i915_gem_evict_everything(dev);
4143
4144         i915_gem_reset_fences(dev);
4145
4146         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4147          * We need to replace this with a semaphore, or something.
4148          * And not confound mm.suspended!
4149          */
4150         dev_priv->mm.suspended = 1;
4151         callout_stop(&dev_priv->hangcheck_timer);
4152
4153         i915_kernel_lost_context(dev);
4154         i915_gem_cleanup_ringbuffer(dev);
4155
4156         DRM_UNLOCK(dev);
4157
4158         /* Cancel the retire work handler, which should be idle now. */
4159         taskqueue_cancel_timeout(dev_priv->wq, &dev_priv->mm.retire_work, NULL);
4160
4161         return 0;
4162 }
4163
4164 void i915_gem_l3_remap(struct drm_device *dev)
4165 {
4166         drm_i915_private_t *dev_priv = dev->dev_private;
4167         u32 misccpctl;
4168         int i;
4169
4170         if (!HAS_L3_GPU_CACHE(dev))
4171                 return;
4172
4173         if (!dev_priv->l3_parity.remap_info)
4174                 return;
4175
4176         misccpctl = I915_READ(GEN7_MISCCPCTL);
4177         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4178         POSTING_READ(GEN7_MISCCPCTL);
4179
4180         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4181                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4182                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4183                         DRM_DEBUG("0x%x was already programmed to %x\n",
4184                                   GEN7_L3LOG_BASE + i, remap);
4185                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4186                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4187                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4188         }
4189
4190         /* Make sure all the writes land before disabling dop clock gating */
4191         POSTING_READ(GEN7_L3LOG_BASE);
4192
4193         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4194 }
4195
4196 void i915_gem_init_swizzling(struct drm_device *dev)
4197 {
4198         drm_i915_private_t *dev_priv = dev->dev_private;
4199
4200         if (INTEL_INFO(dev)->gen < 5 ||
4201             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4202                 return;
4203
4204         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4205                                  DISP_TILE_SURFACE_SWIZZLING);
4206
4207         if (IS_GEN5(dev))
4208                 return;
4209
4210         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4211         if (IS_GEN6(dev))
4212                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4213         else
4214                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4215 }
4216
4217 static bool
4218 intel_enable_blt(struct drm_device *dev)
4219 {
4220         if (!HAS_BLT(dev))
4221                 return false;
4222
4223         /* The blitter was dysfunctional on early prototypes */
4224         if (IS_GEN6(dev) && pci_get_revid(dev->dev) < 8) {
4225                 DRM_INFO("BLT not supported on this pre-production hardware;"
4226                          " graphics performance will be degraded.\n");
4227                 return false;
4228         }
4229
4230         return true;
4231 }
4232
4233 int
4234 i915_gem_init_hw(struct drm_device *dev)
4235 {
4236         drm_i915_private_t *dev_priv = dev->dev_private;
4237         int ret;
4238
4239 #ifdef FREEBSD_WIP
4240         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4241                 return -EIO;
4242 #endif /* FREEBSD_WIP */
4243
4244         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4245                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4246
4247         i915_gem_l3_remap(dev);
4248
4249         i915_gem_init_swizzling(dev);
4250
4251         ret = intel_init_render_ring_buffer(dev);
4252         if (ret)
4253                 return ret;
4254
4255         if (HAS_BSD(dev)) {
4256                 ret = intel_init_bsd_ring_buffer(dev);
4257                 if (ret)
4258                         goto cleanup_render_ring;
4259         }
4260
4261         if (intel_enable_blt(dev)) {
4262                 ret = intel_init_blt_ring_buffer(dev);
4263                 if (ret)
4264                         goto cleanup_bsd_ring;
4265         }
4266
4267         dev_priv->next_seqno = 1;
4268
4269         /*
4270          * XXX: There was some w/a described somewhere suggesting loading
4271          * contexts before PPGTT.
4272          */
4273         i915_gem_context_init(dev);
4274         i915_gem_init_ppgtt(dev);
4275
4276         return 0;
4277
4278 cleanup_bsd_ring:
4279         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4280 cleanup_render_ring:
4281         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4282         return ret;
4283 }
4284
4285 static bool
4286 intel_enable_ppgtt(struct drm_device *dev)
4287 {
4288         if (i915_enable_ppgtt >= 0)
4289                 return i915_enable_ppgtt;
4290
4291 #ifdef CONFIG_INTEL_IOMMU
4292         /* Disable ppgtt on SNB if VT-d is on. */
4293         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
4294                 return false;
4295 #endif
4296
4297         return true;
4298 }
4299
4300 int i915_gem_init(struct drm_device *dev)
4301 {
4302         struct drm_i915_private *dev_priv = dev->dev_private;
4303         unsigned long gtt_size, mappable_size;
4304         int ret;
4305
4306         gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
4307         mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
4308
4309         DRM_LOCK(dev);
4310         if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
4311                 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
4312                  * aperture accordingly when using aliasing ppgtt. */
4313                 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
4314
4315                 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
4316
4317                 ret = i915_gem_init_aliasing_ppgtt(dev);
4318                 if (ret) {
4319                         DRM_UNLOCK(dev);
4320                         return ret;
4321                 }
4322         } else {
4323                 /* Let GEM Manage all of the aperture.
4324                  *
4325                  * However, leave one page at the end still bound to the scratch
4326                  * page.  There are a number of places where the hardware
4327                  * apparently prefetches past the end of the object, and we've
4328                  * seen multiple hangs with the GPU head pointer stuck in a
4329                  * batchbuffer bound at the last page of the aperture.  One page
4330                  * should be enough to keep any prefetching inside of the
4331                  * aperture.
4332                  */
4333                 i915_gem_init_global_gtt(dev, 0, mappable_size,
4334                                          gtt_size);
4335         }
4336
4337         ret = i915_gem_init_hw(dev);
4338         DRM_UNLOCK(dev);
4339         if (ret) {
4340                 i915_gem_cleanup_aliasing_ppgtt(dev);
4341                 return ret;
4342         }
4343
4344         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4345         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4346                 dev_priv->dri1.allow_batchbuffer = 1;
4347         return 0;
4348 }
4349
4350 void
4351 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4352 {
4353         drm_i915_private_t *dev_priv = dev->dev_private;
4354         struct intel_ring_buffer *ring;
4355         int i;
4356
4357         for_each_ring(ring, dev_priv, i)
4358                 intel_cleanup_ring_buffer(ring);
4359 }
4360
4361 int
4362 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4363                        struct drm_file *file_priv)
4364 {
4365         drm_i915_private_t *dev_priv = dev->dev_private;
4366         int ret;
4367
4368         if (drm_core_check_feature(dev, DRIVER_MODESET))
4369                 return 0;
4370
4371         if (atomic_read(&dev_priv->mm.wedged)) {
4372                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4373                 atomic_set(&dev_priv->mm.wedged, 0);
4374         }
4375
4376         DRM_LOCK(dev);
4377         dev_priv->mm.suspended = 0;
4378
4379         ret = i915_gem_init_hw(dev);
4380         if (ret != 0) {
4381                 DRM_UNLOCK(dev);
4382                 return ret;
4383         }
4384
4385         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4386         DRM_UNLOCK(dev);
4387
4388         ret = drm_irq_install(dev);
4389         if (ret)
4390                 goto cleanup_ringbuffer;
4391
4392         return 0;
4393
4394 cleanup_ringbuffer:
4395         DRM_LOCK(dev);
4396         i915_gem_cleanup_ringbuffer(dev);
4397         dev_priv->mm.suspended = 1;
4398         DRM_UNLOCK(dev);
4399
4400         return ret;
4401 }
4402
4403 int
4404 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4405                        struct drm_file *file_priv)
4406 {
4407         if (drm_core_check_feature(dev, DRIVER_MODESET))
4408                 return 0;
4409
4410         drm_irq_uninstall(dev);
4411         return i915_gem_idle(dev);
4412 }
4413
4414 void
4415 i915_gem_lastclose(struct drm_device *dev)
4416 {
4417         int ret;
4418
4419         if (drm_core_check_feature(dev, DRIVER_MODESET))
4420                 return;
4421
4422         ret = i915_gem_idle(dev);
4423         if (ret)
4424                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4425 }
4426
4427 static void
4428 init_ring_lists(struct intel_ring_buffer *ring)
4429 {
4430         INIT_LIST_HEAD(&ring->active_list);
4431         INIT_LIST_HEAD(&ring->request_list);
4432 }
4433
4434 void
4435 i915_gem_load(struct drm_device *dev)
4436 {
4437         int i;
4438         drm_i915_private_t *dev_priv = dev->dev_private;
4439
4440         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4441         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4442         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4443         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4444         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4445         for (i = 0; i < I915_NUM_RINGS; i++)
4446                 init_ring_lists(&dev_priv->ring[i]);
4447         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4448                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4449         TIMEOUT_TASK_INIT(dev_priv->wq, &dev_priv->mm.retire_work, 0,
4450             i915_gem_retire_work_handler, dev_priv);
4451         init_completion(&dev_priv->error_completion);
4452
4453         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4454         if (IS_GEN3(dev)) {
4455                 I915_WRITE(MI_ARB_STATE,
4456                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4457         }
4458
4459         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4460
4461         /* Old X drivers will take 0-2 for front, back, depth buffers */
4462         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4463                 dev_priv->fence_reg_start = 3;
4464
4465         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4466                 dev_priv->num_fence_regs = 16;
4467         else
4468                 dev_priv->num_fence_regs = 8;
4469
4470         /* Initialize fence registers to zero */
4471         i915_gem_reset_fences(dev);
4472
4473         i915_gem_detect_bit_6_swizzle(dev);
4474         DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue);
4475
4476         dev_priv->mm.interruptible = true;
4477
4478         dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4479             i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
4480 }
4481
4482 /*
4483  * Create a physically contiguous memory object for this object
4484  * e.g. for cursor + overlay regs
4485  */
4486 static int i915_gem_init_phys_object(struct drm_device *dev,
4487                                      int id, int size, int align)
4488 {
4489         drm_i915_private_t *dev_priv = dev->dev_private;
4490         struct drm_i915_gem_phys_object *phys_obj;
4491         int ret;
4492
4493         if (dev_priv->mm.phys_objs[id - 1] || !size)
4494                 return 0;
4495
4496         phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object),
4497             DRM_I915_GEM, M_WAITOK | M_ZERO);
4498         if (!phys_obj)
4499                 return -ENOMEM;
4500
4501         phys_obj->id = id;
4502
4503         phys_obj->handle = drm_pci_alloc(dev, size, align, BUS_SPACE_MAXADDR);
4504         if (!phys_obj->handle) {
4505                 ret = -ENOMEM;
4506                 goto kfree_obj;
4507         }
4508 #ifdef CONFIG_X86
4509         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4510             size / PAGE_SIZE, PAT_WRITE_COMBINING);
4511 #endif
4512
4513         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4514
4515         return 0;
4516 kfree_obj:
4517         free(phys_obj, DRM_I915_GEM);
4518         return ret;
4519 }
4520
4521 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4522 {
4523         drm_i915_private_t *dev_priv = dev->dev_private;
4524         struct drm_i915_gem_phys_object *phys_obj;
4525
4526         if (!dev_priv->mm.phys_objs[id - 1])
4527                 return;
4528
4529         phys_obj = dev_priv->mm.phys_objs[id - 1];
4530         if (phys_obj->cur_obj) {
4531                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4532         }
4533
4534 #ifdef FREEBSD_WIP
4535 #ifdef CONFIG_X86
4536         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4537 #endif
4538 #endif /* FREEBSD_WIP */
4539
4540         drm_pci_free(dev, phys_obj->handle);
4541         free(phys_obj, DRM_I915_GEM);
4542         dev_priv->mm.phys_objs[id - 1] = NULL;
4543 }
4544
4545 void i915_gem_free_all_phys_object(struct drm_device *dev)
4546 {
4547         int i;
4548
4549         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4550                 i915_gem_free_phys_object(dev, i);
4551 }
4552
4553 void i915_gem_detach_phys_object(struct drm_device *dev,
4554                                  struct drm_i915_gem_object *obj)
4555 {
4556         struct sf_buf *sf;
4557         char *vaddr;
4558         char *dst;
4559         int i;
4560         int page_count;
4561
4562         if (!obj->phys_obj)
4563                 return;
4564         vaddr = obj->phys_obj->handle->vaddr;
4565
4566         page_count = obj->base.size / PAGE_SIZE;
4567         VM_OBJECT_WLOCK(obj->base.vm_obj);
4568         for (i = 0; i < page_count; i++) {
4569                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4570                 if (page == NULL)
4571                         continue; /* XXX */
4572
4573                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4574                 sf = sf_buf_alloc(page, 0);
4575                 if (sf != NULL) {
4576                         dst = (char *)sf_buf_kva(sf);
4577                         memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
4578                         sf_buf_free(sf);
4579                 }
4580                 drm_clflush_pages(&page, 1);
4581
4582                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4583                 vm_page_reference(page);
4584                 vm_page_lock(page);
4585                 vm_page_dirty(page);
4586                 vm_page_unwire(page, PQ_INACTIVE);
4587                 vm_page_unlock(page);
4588                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4589         }
4590         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4591         i915_gem_chipset_flush(dev);
4592
4593         obj->phys_obj->cur_obj = NULL;
4594         obj->phys_obj = NULL;
4595 }
4596
4597 int
4598 i915_gem_attach_phys_object(struct drm_device *dev,
4599                             struct drm_i915_gem_object *obj,
4600                             int id,
4601                             int align)
4602 {
4603         drm_i915_private_t *dev_priv = dev->dev_private;
4604         struct sf_buf *sf;
4605         char *dst, *src;
4606         int ret = 0;
4607         int page_count;
4608         int i;
4609
4610         if (id > I915_MAX_PHYS_OBJECT)
4611                 return -EINVAL;
4612
4613         if (obj->phys_obj) {
4614                 if (obj->phys_obj->id == id)
4615                         return 0;
4616                 i915_gem_detach_phys_object(dev, obj);
4617         }
4618
4619         /* create a new object */
4620         if (!dev_priv->mm.phys_objs[id - 1]) {
4621                 ret = i915_gem_init_phys_object(dev, id,
4622                                                 obj->base.size, align);
4623                 if (ret) {
4624                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4625                                   id, obj->base.size);
4626                         return ret;
4627                 }
4628         }
4629
4630         /* bind to the object */
4631         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4632         obj->phys_obj->cur_obj = obj;
4633
4634         page_count = obj->base.size / PAGE_SIZE;
4635
4636         VM_OBJECT_WLOCK(obj->base.vm_obj);
4637         for (i = 0; i < page_count; i++) {
4638                 vm_page_t page = i915_gem_wire_page(obj->base.vm_obj, i, NULL);
4639                 if (page == NULL) {
4640                         ret = -EIO;
4641                         break;
4642                 }
4643                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4644                 sf = sf_buf_alloc(page, 0);
4645                 src = (char *)sf_buf_kva(sf);
4646                 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
4647                 memcpy(dst, src, PAGE_SIZE);
4648                 sf_buf_free(sf);
4649
4650                 VM_OBJECT_WLOCK(obj->base.vm_obj);
4651
4652                 vm_page_reference(page);
4653                 vm_page_lock(page);
4654                 vm_page_unwire(page, PQ_INACTIVE);
4655                 vm_page_unlock(page);
4656                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4657         }
4658         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4659
4660         return ret;
4661 }
4662
4663 static int
4664 i915_gem_phys_pwrite(struct drm_device *dev,
4665                      struct drm_i915_gem_object *obj,
4666                      struct drm_i915_gem_pwrite *args,
4667                      struct drm_file *file_priv)
4668 {
4669         void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4670         char __user *user_data = to_user_ptr(args->data_ptr);
4671
4672         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4673                 unsigned long unwritten;
4674
4675                 /* The physical object once assigned is fixed for the lifetime
4676                  * of the obj, so we can safely drop the lock and continue
4677                  * to access vaddr.
4678                  */
4679                 DRM_UNLOCK(dev);
4680                 unwritten = copy_from_user(vaddr, user_data, args->size);
4681                 DRM_LOCK(dev);
4682                 if (unwritten)
4683                         return -EFAULT;
4684         }
4685
4686         i915_gem_chipset_flush(dev);
4687         return 0;
4688 }
4689
4690 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4691 {
4692         struct drm_i915_file_private *file_priv = file->driver_priv;
4693
4694         /* Clean up our request list when the client is going away, so that
4695          * later retire_requests won't dereference our soon-to-be-gone
4696          * file_priv.
4697          */
4698         mtx_lock(&file_priv->mm.lock);
4699         while (!list_empty(&file_priv->mm.request_list)) {
4700                 struct drm_i915_gem_request *request;
4701
4702                 request = list_first_entry(&file_priv->mm.request_list,
4703                                            struct drm_i915_gem_request,
4704                                            client_list);
4705                 list_del(&request->client_list);
4706                 request->file_priv = NULL;
4707         }
4708         mtx_unlock(&file_priv->mm.lock);
4709 }
4710
4711 static void
4712 i915_gem_inactive_shrink(void *arg)
4713 {
4714         struct drm_device *dev = arg;
4715         struct drm_i915_private *dev_priv = dev->dev_private;
4716         int pass1, pass2;
4717
4718         if (!sx_try_xlock(&dev->dev_struct_lock)) {
4719                 return;
4720         }
4721
4722         CTR0(KTR_DRM, "gem_lowmem");
4723
4724         pass1 = i915_gem_purge(dev_priv, -1);
4725         pass2 = __i915_gem_shrink(dev_priv, -1, false);
4726
4727         if (pass2 <= pass1 / 100)
4728                 i915_gem_shrink_all(dev_priv);
4729
4730         DRM_UNLOCK(dev);
4731 }
4732
4733 static vm_page_t
4734 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex, bool *fresh)
4735 {
4736         vm_page_t page;
4737         int rv;
4738
4739         VM_OBJECT_ASSERT_WLOCKED(object);
4740         page = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
4741             VM_ALLOC_WIRED);
4742         if (page->valid != VM_PAGE_BITS_ALL) {
4743                 vm_page_xbusy(page);
4744                 if (vm_pager_has_page(object, pindex, NULL, NULL)) {
4745                         rv = vm_pager_get_pages(object, &page, 1, NULL, NULL);
4746                         if (rv != VM_PAGER_OK) {
4747                                 vm_page_lock(page);
4748                                 vm_page_unwire(page, PQ_NONE);
4749                                 vm_page_free(page);
4750                                 vm_page_unlock(page);
4751                                 return (NULL);
4752                         }
4753                         if (fresh != NULL)
4754                                 *fresh = true;
4755                 } else {
4756                         pmap_zero_page(page);
4757                         page->valid = VM_PAGE_BITS_ALL;
4758                         page->dirty = 0;
4759                         if (fresh != NULL)
4760                                 *fresh = false;
4761                 }
4762                 vm_page_xunbusy(page);
4763         } else if (fresh != NULL)
4764                 *fresh = false;
4765         atomic_add_long(&i915_gem_wired_pages_cnt, 1);
4766         return (page);
4767 }