]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/i915/i915_gem.c
- Move videodev headers from compat/linux to contrib/v4l (cp from vendor and
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / i915 / i915_gem.c
1 /*-
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  * Copyright (c) 2011 The FreeBSD Foundation
27  * All rights reserved.
28  *
29  * This software was developed by Konstantin Belousov under sponsorship from
30  * the FreeBSD Foundation.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
56
57 #include <dev/drm2/drmP.h>
58 #include <dev/drm2/drm.h>
59 #include <dev/drm2/i915/i915_drm.h>
60 #include <dev/drm2/i915/i915_drv.h>
61 #include <dev/drm2/i915/intel_drv.h>
62 #include <dev/drm2/i915/intel_ringbuffer.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sched.h>
65 #include <sys/sf_buf.h>
66
67 static void i915_gem_object_flush_cpu_write_domain(
68     struct drm_i915_gem_object *obj);
69 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
70     int tiling_mode);
71 static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
72     uint32_t size, int tiling_mode);
73 static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
74     unsigned alignment, bool map_and_fenceable);
75 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
76     int flags);
77 static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj);
78 static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
79     bool write);
80 static void i915_gem_object_set_to_full_cpu_read_domain(
81     struct drm_i915_gem_object *obj);
82 static int i915_gem_object_set_cpu_read_domain_range(
83     struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size);
84 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
85 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
86 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
87 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
88 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
89 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
90 static void i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
91     uint32_t flush_domains);
92 static void i915_gem_clear_fence_reg(struct drm_device *dev,
93     struct drm_i915_fence_reg *reg);
94 static void i915_gem_reset_fences(struct drm_device *dev);
95 static void i915_gem_retire_task_handler(void *arg, int pending);
96 static int i915_gem_phys_pwrite(struct drm_device *dev,
97     struct drm_i915_gem_object *obj, uint64_t data_ptr, uint64_t offset,
98     uint64_t size, struct drm_file *file_priv);
99 static void i915_gem_lowmem(void *arg);
100
101 MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
102 long i915_gem_wired_pages_cnt;
103
104 static void
105 i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size)
106 {
107
108         dev_priv->mm.object_count++;
109         dev_priv->mm.object_memory += size;
110 }
111
112 static void
113 i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, size_t size)
114 {
115
116         dev_priv->mm.object_count--;
117         dev_priv->mm.object_memory -= size;
118 }
119
120 static int
121 i915_gem_wait_for_error(struct drm_device *dev)
122 {
123         struct drm_i915_private *dev_priv;
124         int ret;
125
126         dev_priv = dev->dev_private;
127         if (!atomic_load_acq_int(&dev_priv->mm.wedged))
128                 return (0);
129
130         mtx_lock(&dev_priv->error_completion_lock);
131         while (dev_priv->error_completion == 0) {
132                 ret = -msleep(&dev_priv->error_completion,
133                     &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
134                 if (ret != 0) {
135                         mtx_unlock(&dev_priv->error_completion_lock);
136                         return (ret);
137                 }
138         }
139         mtx_unlock(&dev_priv->error_completion_lock);
140
141         if (atomic_read(&dev_priv->mm.wedged)) {
142                 mtx_lock(&dev_priv->error_completion_lock);
143                 dev_priv->error_completion++;
144                 mtx_unlock(&dev_priv->error_completion_lock);
145         }
146         return (0);
147 }
148
149 int
150 i915_mutex_lock_interruptible(struct drm_device *dev)
151 {
152         struct drm_i915_private *dev_priv;
153         int ret;
154
155         dev_priv = dev->dev_private;
156         ret = i915_gem_wait_for_error(dev);
157         if (ret != 0)
158                 return (ret);
159
160         /*
161          * interruptible shall it be. might indeed be if dev_lock is
162          * changed to sx
163          */
164         ret = sx_xlock_sig(&dev->dev_struct_lock);
165         if (ret != 0)
166                 return (-ret);
167
168         return (0);
169 }
170
171
172 static void
173 i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
174 {
175         struct drm_device *dev;
176         drm_i915_private_t *dev_priv;
177         int ret;
178
179         dev = obj->base.dev;
180         dev_priv = dev->dev_private;
181
182         ret = i915_gem_object_unbind(obj);
183         if (ret == -ERESTART) {
184                 list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list);
185                 return;
186         }
187
188         CTR1(KTR_DRM, "object_destroy_tail %p", obj);
189         drm_gem_free_mmap_offset(&obj->base);
190         drm_gem_object_release(&obj->base);
191         i915_gem_info_remove_obj(dev_priv, obj->base.size);
192
193         free(obj->page_cpu_valid, DRM_I915_GEM);
194         free(obj->bit_17, DRM_I915_GEM);
195         free(obj, DRM_I915_GEM);
196 }
197
198 void
199 i915_gem_free_object(struct drm_gem_object *gem_obj)
200 {
201         struct drm_i915_gem_object *obj;
202         struct drm_device *dev;
203
204         obj = to_intel_bo(gem_obj);
205         dev = obj->base.dev;
206
207         while (obj->pin_count > 0)
208                 i915_gem_object_unpin(obj);
209
210         if (obj->phys_obj != NULL)
211                 i915_gem_detach_phys_object(dev, obj);
212
213         i915_gem_free_object_tail(obj);
214 }
215
216 static void
217 init_ring_lists(struct intel_ring_buffer *ring)
218 {
219
220         INIT_LIST_HEAD(&ring->active_list);
221         INIT_LIST_HEAD(&ring->request_list);
222         INIT_LIST_HEAD(&ring->gpu_write_list);
223 }
224
225 void
226 i915_gem_load(struct drm_device *dev)
227 {
228         drm_i915_private_t *dev_priv;
229         int i;
230
231         dev_priv = dev->dev_private;
232
233         INIT_LIST_HEAD(&dev_priv->mm.active_list);
234         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
235         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
236         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
237         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
238         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
239         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
240         for (i = 0; i < I915_NUM_RINGS; i++)
241                 init_ring_lists(&dev_priv->rings[i]);
242         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
243                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
244         TIMEOUT_TASK_INIT(dev_priv->tq, &dev_priv->mm.retire_task, 0,
245             i915_gem_retire_task_handler, dev_priv);
246         dev_priv->error_completion = 0;
247
248         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
249         if (IS_GEN3(dev)) {
250                 u32 tmp = I915_READ(MI_ARB_STATE);
251                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
252                         /*
253                          * arb state is a masked write, so set bit +
254                          * bit in mask.
255                          */
256                         tmp = MI_ARB_C3_LP_WRITE_ENABLE |
257                             (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
258                         I915_WRITE(MI_ARB_STATE, tmp);
259                 }
260         }
261
262         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
263
264         /* Old X drivers will take 0-2 for front, back, depth buffers */
265         if (!drm_core_check_feature(dev, DRIVER_MODESET))
266                 dev_priv->fence_reg_start = 3;
267
268         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) ||
269             IS_G33(dev))
270                 dev_priv->num_fence_regs = 16;
271         else
272                 dev_priv->num_fence_regs = 8;
273
274         /* Initialize fence registers to zero */
275         for (i = 0; i < dev_priv->num_fence_regs; i++) {
276                 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
277         }
278         i915_gem_detect_bit_6_swizzle(dev);
279         dev_priv->mm.interruptible = true;
280
281         dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
282             i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
283 }
284
285 int
286 i915_gem_do_init(struct drm_device *dev, unsigned long start,
287     unsigned long mappable_end, unsigned long end)
288 {
289         drm_i915_private_t *dev_priv;
290         unsigned long mappable;
291         int error;
292
293         dev_priv = dev->dev_private;
294         mappable = min(end, mappable_end) - start;
295
296         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
297
298         dev_priv->mm.gtt_start = start;
299         dev_priv->mm.gtt_mappable_end = mappable_end;
300         dev_priv->mm.gtt_end = end;
301         dev_priv->mm.gtt_total = end - start;
302         dev_priv->mm.mappable_gtt_total = mappable;
303
304         /* Take over this portion of the GTT */
305         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
306         device_printf(dev->device,
307             "taking over the fictitious range 0x%lx-0x%lx\n",
308             dev->agp->base + start, dev->agp->base + start + mappable);
309         error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
310             dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
311         return (error);
312 }
313
314 int
315 i915_gem_init_ioctl(struct drm_device *dev, void *data,
316     struct drm_file *file)
317 {
318         struct drm_i915_gem_init *args;
319         drm_i915_private_t *dev_priv;
320
321         dev_priv = dev->dev_private;
322         args = data;
323
324         if (args->gtt_start >= args->gtt_end ||
325             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
326                 return (-EINVAL);
327
328         if (mtx_initialized(&dev_priv->mm.gtt_space.unused_lock))
329                 return (-EBUSY);
330         /*
331          * XXXKIB. The second-time initialization should be guarded
332          * against.
333          */
334         return (i915_gem_do_init(dev, args->gtt_start, args->gtt_end,
335             args->gtt_end));
336 }
337
338 int
339 i915_gem_idle(struct drm_device *dev)
340 {
341         drm_i915_private_t *dev_priv;
342         int ret;
343
344         dev_priv = dev->dev_private;
345         if (dev_priv->mm.suspended)
346                 return (0);
347
348         ret = i915_gpu_idle(dev, true);
349         if (ret != 0)
350                 return (ret);
351
352         /* Under UMS, be paranoid and evict. */
353         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
354                 ret = i915_gem_evict_inactive(dev, false);
355                 if (ret != 0)
356                         return ret;
357         }
358
359         i915_gem_reset_fences(dev);
360
361         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
362          * We need to replace this with a semaphore, or something.
363          * And not confound mm.suspended!
364          */
365         dev_priv->mm.suspended = 1;
366         callout_stop(&dev_priv->hangcheck_timer);
367
368         i915_kernel_lost_context(dev);
369         i915_gem_cleanup_ringbuffer(dev);
370
371         /* Cancel the retire work handler, which should be idle now. */
372         taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
373         return (ret);
374 }
375
376 void
377 i915_gem_init_swizzling(struct drm_device *dev)
378 {
379         drm_i915_private_t *dev_priv;
380
381         dev_priv = dev->dev_private;
382
383         if (INTEL_INFO(dev)->gen < 5 ||
384             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
385                 return;
386
387         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
388                                  DISP_TILE_SURFACE_SWIZZLING);
389
390         if (IS_GEN5(dev))
391                 return;
392
393         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
394         if (IS_GEN6(dev))
395                 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
396         else
397                 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
398 }
399
400 void
401 i915_gem_init_ppgtt(struct drm_device *dev)
402 {
403         drm_i915_private_t *dev_priv;
404         struct i915_hw_ppgtt *ppgtt;
405         uint32_t pd_offset, pd_entry;
406         vm_paddr_t pt_addr;
407         struct intel_ring_buffer *ring;
408         u_int first_pd_entry_in_global_pt, i;
409
410         dev_priv = dev->dev_private;
411         ppgtt = dev_priv->mm.aliasing_ppgtt;
412         if (ppgtt == NULL)
413                 return;
414
415         first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
416         for (i = 0; i < ppgtt->num_pd_entries; i++) {
417                 pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
418                 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
419                 pd_entry |= GEN6_PDE_VALID;
420                 intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
421         }
422         intel_gtt_read_pte(first_pd_entry_in_global_pt);
423
424         pd_offset = ppgtt->pd_offset;
425         pd_offset /= 64; /* in cachelines, */
426         pd_offset <<= 16;
427
428         if (INTEL_INFO(dev)->gen == 6) {
429                 uint32_t ecochk = I915_READ(GAM_ECOCHK);
430                 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
431                                        ECOCHK_PPGTT_CACHE64B);
432                 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
433         } else if (INTEL_INFO(dev)->gen >= 7) {
434                 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
435                 /* GFX_MODE is per-ring on gen7+ */
436         }
437
438         for (i = 0; i < I915_NUM_RINGS; i++) {
439                 ring = &dev_priv->rings[i];
440
441                 if (INTEL_INFO(dev)->gen >= 7)
442                         I915_WRITE(RING_MODE_GEN7(ring),
443                                    GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
444
445                 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
446                 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
447         }
448 }
449
450 int
451 i915_gem_init_hw(struct drm_device *dev)
452 {
453         drm_i915_private_t *dev_priv;
454         int ret;
455
456         dev_priv = dev->dev_private;
457
458         i915_gem_init_swizzling(dev);
459
460         ret = intel_init_render_ring_buffer(dev);
461         if (ret != 0)
462                 return (ret);
463
464         if (HAS_BSD(dev)) {
465                 ret = intel_init_bsd_ring_buffer(dev);
466                 if (ret != 0)
467                         goto cleanup_render_ring;
468         }
469
470         if (HAS_BLT(dev)) {
471                 ret = intel_init_blt_ring_buffer(dev);
472                 if (ret != 0)
473                         goto cleanup_bsd_ring;
474         }
475
476         dev_priv->next_seqno = 1;
477         i915_gem_init_ppgtt(dev);
478         return (0);
479
480 cleanup_bsd_ring:
481         intel_cleanup_ring_buffer(&dev_priv->rings[VCS]);
482 cleanup_render_ring:
483         intel_cleanup_ring_buffer(&dev_priv->rings[RCS]);
484         return (ret);
485 }
486
487 int
488 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
489     struct drm_file *file)
490 {
491         struct drm_i915_private *dev_priv;
492         struct drm_i915_gem_get_aperture *args;
493         struct drm_i915_gem_object *obj;
494         size_t pinned;
495
496         dev_priv = dev->dev_private;
497         args = data;
498
499         if (!(dev->driver->driver_features & DRIVER_GEM))
500                 return (-ENODEV);
501
502         pinned = 0;
503         DRM_LOCK(dev);
504         list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
505                 pinned += obj->gtt_space->size;
506         DRM_UNLOCK(dev);
507
508         args->aper_size = dev_priv->mm.gtt_total;
509         args->aper_available_size = args->aper_size - pinned;
510
511         return (0);
512 }
513
514 int
515 i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
516      bool map_and_fenceable)
517 {
518         struct drm_device *dev;
519         struct drm_i915_private *dev_priv;
520         int ret;
521
522         dev = obj->base.dev;
523         dev_priv = dev->dev_private;
524
525         KASSERT(obj->pin_count != DRM_I915_GEM_OBJECT_MAX_PIN_COUNT,
526             ("Max pin count"));
527
528         if (obj->gtt_space != NULL) {
529                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
530                     (map_and_fenceable && !obj->map_and_fenceable)) {
531                         DRM_DEBUG("bo is already pinned with incorrect alignment:"
532                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
533                              " obj->map_and_fenceable=%d\n",
534                              obj->gtt_offset, alignment,
535                              map_and_fenceable,
536                              obj->map_and_fenceable);
537                         ret = i915_gem_object_unbind(obj);
538                         if (ret != 0)
539                                 return (ret);
540                 }
541         }
542
543         if (obj->gtt_space == NULL) {
544                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
545                     map_and_fenceable);
546                 if (ret)
547                         return (ret);
548         }
549
550         if (obj->pin_count++ == 0 && !obj->active)
551                 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
552         obj->pin_mappable |= map_and_fenceable;
553
554 #if 1
555         KIB_NOTYET();
556 #else
557         WARN_ON(i915_verify_lists(dev));
558 #endif
559         return (0);
560 }
561
562 void
563 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
564 {
565         struct drm_device *dev;
566         drm_i915_private_t *dev_priv;
567
568         dev = obj->base.dev;
569         dev_priv = dev->dev_private;
570
571 #if 1
572         KIB_NOTYET();
573 #else
574         WARN_ON(i915_verify_lists(dev));
575 #endif
576         
577         KASSERT(obj->pin_count != 0, ("zero pin count"));
578         KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
579
580         if (--obj->pin_count == 0) {
581                 if (!obj->active)
582                         list_move_tail(&obj->mm_list,
583                             &dev_priv->mm.inactive_list);
584                 obj->pin_mappable = false;
585         }
586 #if 1
587         KIB_NOTYET();
588 #else
589         WARN_ON(i915_verify_lists(dev));
590 #endif
591 }
592
593 int
594 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
595     struct drm_file *file)
596 {
597         struct drm_i915_gem_pin *args;
598         struct drm_i915_gem_object *obj;
599         struct drm_gem_object *gobj;
600         int ret;
601
602         args = data;
603
604         ret = i915_mutex_lock_interruptible(dev);
605         if (ret != 0)
606                 return ret;
607
608         gobj = drm_gem_object_lookup(dev, file, args->handle);
609         if (gobj == NULL) {
610                 ret = -ENOENT;
611                 goto unlock;
612         }
613         obj = to_intel_bo(gobj);
614
615         if (obj->madv != I915_MADV_WILLNEED) {
616                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
617                 ret = -EINVAL;
618                 goto out;
619         }
620
621         if (obj->pin_filp != NULL && obj->pin_filp != file) {
622                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
623                     args->handle);
624                 ret = -EINVAL;
625                 goto out;
626         }
627
628         obj->user_pin_count++;
629         obj->pin_filp = file;
630         if (obj->user_pin_count == 1) {
631                 ret = i915_gem_object_pin(obj, args->alignment, true);
632                 if (ret != 0)
633                         goto out;
634         }
635
636         /* XXX - flush the CPU caches for pinned objects
637          * as the X server doesn't manage domains yet
638          */
639         i915_gem_object_flush_cpu_write_domain(obj);
640         args->offset = obj->gtt_offset;
641 out:
642         drm_gem_object_unreference(&obj->base);
643 unlock:
644         DRM_UNLOCK(dev);
645         return (ret);
646 }
647
648 int
649 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
650     struct drm_file *file)
651 {
652         struct drm_i915_gem_pin *args;
653         struct drm_i915_gem_object *obj;
654         int ret;
655
656         args = data;
657         ret = i915_mutex_lock_interruptible(dev);
658         if (ret != 0)
659                 return (ret);
660
661         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
662         if (&obj->base == NULL) {
663                 ret = -ENOENT;
664                 goto unlock;
665         }
666
667         if (obj->pin_filp != file) {
668                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
669                     args->handle);
670                 ret = -EINVAL;
671                 goto out;
672         }
673         obj->user_pin_count--;
674         if (obj->user_pin_count == 0) {
675                 obj->pin_filp = NULL;
676                 i915_gem_object_unpin(obj);
677         }
678
679 out:
680         drm_gem_object_unreference(&obj->base);
681 unlock:
682         DRM_UNLOCK(dev);
683         return (ret);
684 }
685
686 int
687 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
688     struct drm_file *file)
689 {
690         struct drm_i915_gem_busy *args;
691         struct drm_i915_gem_object *obj;
692         struct drm_i915_gem_request *request;
693         int ret;
694
695         args = data;
696
697         ret = i915_mutex_lock_interruptible(dev);
698         if (ret != 0)
699                 return ret;
700
701         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
702         if (&obj->base == NULL) {
703                 ret = -ENOENT;
704                 goto unlock;
705         }
706
707         args->busy = obj->active;
708         if (args->busy) {
709                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
710                         ret = i915_gem_flush_ring(obj->ring,
711                             0, obj->base.write_domain);
712                 } else if (obj->ring->outstanding_lazy_request ==
713                     obj->last_rendering_seqno) {
714                         request = malloc(sizeof(*request), DRM_I915_GEM,
715                             M_WAITOK | M_ZERO);
716                         ret = i915_add_request(obj->ring, NULL, request);
717                         if (ret != 0)
718                                 free(request, DRM_I915_GEM);
719                 }
720
721                 i915_gem_retire_requests_ring(obj->ring);
722                 args->busy = obj->active;
723         }
724
725         drm_gem_object_unreference(&obj->base);
726 unlock:
727         DRM_UNLOCK(dev);
728         return (ret);
729 }
730
731 static int
732 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
733 {
734         struct drm_i915_private *dev_priv;
735         struct drm_i915_file_private *file_priv;
736         unsigned long recent_enough;
737         struct drm_i915_gem_request *request;
738         struct intel_ring_buffer *ring;
739         u32 seqno;
740         int ret;
741
742         dev_priv = dev->dev_private;
743         if (atomic_read(&dev_priv->mm.wedged))
744                 return (-EIO);
745
746         file_priv = file->driver_priv;
747         recent_enough = ticks - (20 * hz / 1000);
748         ring = NULL;
749         seqno = 0;
750
751         mtx_lock(&file_priv->mm.lck);
752         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
753                 if (time_after_eq(request->emitted_jiffies, recent_enough))
754                         break;
755                 ring = request->ring;
756                 seqno = request->seqno;
757         }
758         mtx_unlock(&file_priv->mm.lck);
759         if (seqno == 0)
760                 return (0);
761
762         ret = 0;
763         mtx_lock(&ring->irq_lock);
764         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
765                 if (ring->irq_get(ring)) {
766                         while (ret == 0 &&
767                             !(i915_seqno_passed(ring->get_seqno(ring), seqno) ||
768                             atomic_read(&dev_priv->mm.wedged)))
769                                 ret = -msleep(ring, &ring->irq_lock, PCATCH,
770                                     "915thr", 0);
771                         ring->irq_put(ring);
772                         if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
773                                 ret = -EIO;
774                 } else if (_intel_wait_for(dev,
775                     i915_seqno_passed(ring->get_seqno(ring), seqno) ||
776                     atomic_read(&dev_priv->mm.wedged), 3000, 0, "915rtr")) {
777                         ret = -EBUSY;
778                 }
779         }
780         mtx_unlock(&ring->irq_lock);
781
782         if (ret == 0)
783                 taskqueue_enqueue_timeout(dev_priv->tq,
784                     &dev_priv->mm.retire_task, 0);
785
786         return (ret);
787 }
788
789 int
790 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
791     struct drm_file *file_priv)
792 {
793
794         return (i915_gem_ring_throttle(dev, file_priv));
795 }
796
797 int
798 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
799     struct drm_file *file_priv)
800 {
801         struct drm_i915_gem_madvise *args;
802         struct drm_i915_gem_object *obj;
803         int ret;
804
805         args = data;
806         switch (args->madv) {
807         case I915_MADV_DONTNEED:
808         case I915_MADV_WILLNEED:
809                 break;
810         default:
811                 return (-EINVAL);
812         }
813
814         ret = i915_mutex_lock_interruptible(dev);
815         if (ret != 0)
816                 return (ret);
817
818         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
819         if (&obj->base == NULL) {
820                 ret = -ENOENT;
821                 goto unlock;
822         }
823
824         if (obj->pin_count != 0) {
825                 ret = -EINVAL;
826                 goto out;
827         }
828
829         if (obj->madv != I915_MADV_PURGED_INTERNAL)
830                 obj->madv = args->madv;
831         if (i915_gem_object_is_purgeable(obj) && obj->gtt_space == NULL)
832                 i915_gem_object_truncate(obj);
833         args->retained = obj->madv != I915_MADV_PURGED_INTERNAL;
834
835 out:
836         drm_gem_object_unreference(&obj->base);
837 unlock:
838         DRM_UNLOCK(dev);
839         return (ret);
840 }
841
842 void
843 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
844 {
845         drm_i915_private_t *dev_priv;
846         int i;
847
848         dev_priv = dev->dev_private;
849         for (i = 0; i < I915_NUM_RINGS; i++)
850                 intel_cleanup_ring_buffer(&dev_priv->rings[i]);
851 }
852
853 int
854 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
855     struct drm_file *file_priv)
856 {
857         drm_i915_private_t *dev_priv;
858         int ret, i;
859
860         if (drm_core_check_feature(dev, DRIVER_MODESET))
861                 return (0);
862         dev_priv = dev->dev_private;
863         if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
864                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
865                 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
866         }
867
868         dev_priv->mm.suspended = 0;
869
870         ret = i915_gem_init_hw(dev);
871         if (ret != 0) {
872                 return (ret);
873         }
874
875         KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
876         KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
877         KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
878         for (i = 0; i < I915_NUM_RINGS; i++) {
879                 KASSERT(list_empty(&dev_priv->rings[i].active_list),
880                     ("ring %d active list", i));
881                 KASSERT(list_empty(&dev_priv->rings[i].request_list),
882                     ("ring %d request list", i));
883         }
884
885         DRM_UNLOCK(dev);
886         ret = drm_irq_install(dev);
887         DRM_LOCK(dev);
888         if (ret)
889                 goto cleanup_ringbuffer;
890
891         return (0);
892
893 cleanup_ringbuffer:
894         i915_gem_cleanup_ringbuffer(dev);
895         dev_priv->mm.suspended = 1;
896
897         return (ret);
898 }
899
900 int
901 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
902     struct drm_file *file_priv)
903 {
904
905         if (drm_core_check_feature(dev, DRIVER_MODESET))
906                 return 0;
907
908         drm_irq_uninstall(dev);
909         return (i915_gem_idle(dev));
910 }
911
912 int
913 i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
914     uint32_t *handle_p)
915 {
916         struct drm_i915_gem_object *obj;
917         uint32_t handle;
918         int ret;
919
920         size = roundup(size, PAGE_SIZE);
921         if (size == 0)
922                 return (-EINVAL);
923
924         obj = i915_gem_alloc_object(dev, size);
925         if (obj == NULL)
926                 return (-ENOMEM);
927
928         handle = 0;
929         ret = drm_gem_handle_create(file, &obj->base, &handle);
930         if (ret != 0) {
931                 drm_gem_object_release(&obj->base);
932                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
933                 free(obj, DRM_I915_GEM);
934                 return (-ret);
935         }
936
937         /* drop reference from allocate - handle holds it now */
938         drm_gem_object_unreference(&obj->base);
939         CTR2(KTR_DRM, "object_create %p %x", obj, size);
940         *handle_p = handle;
941         return (0);
942 }
943
944 int
945 i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
946     struct drm_mode_create_dumb *args)
947 {
948
949         /* have to work out size/pitch and return them */
950         args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
951         args->size = args->pitch * args->height;
952         return (i915_gem_create(file, dev, args->size, &args->handle));
953 }
954
955 int
956 i915_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
957     uint32_t handle)
958 {
959
960         return (drm_gem_handle_delete(file, handle));
961 }
962
963 int
964 i915_gem_create_ioctl(struct drm_device *dev, void *data,
965     struct drm_file *file)
966 {
967         struct drm_i915_gem_create *args = data;
968
969         return (i915_gem_create(file, dev, args->size, &args->handle));
970 }
971
972 static int
973 i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
974     uint64_t data_ptr, uint64_t size, uint64_t offset, enum uio_rw rw,
975     struct drm_file *file)
976 {
977         vm_object_t vm_obj;
978         vm_page_t m;
979         struct sf_buf *sf;
980         vm_offset_t mkva;
981         vm_pindex_t obj_pi;
982         int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
983
984         if (obj->gtt_offset != 0 && rw == UIO_READ)
985                 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
986         else
987                 do_bit17_swizzling = 0;
988
989         obj->dirty = 1;
990         vm_obj = obj->base.vm_obj;
991         ret = 0;
992
993         VM_OBJECT_WLOCK(vm_obj);
994         vm_object_pip_add(vm_obj, 1);
995         while (size > 0) {
996                 obj_pi = OFF_TO_IDX(offset);
997                 obj_po = offset & PAGE_MASK;
998
999                 m = i915_gem_wire_page(vm_obj, obj_pi);
1000                 VM_OBJECT_WUNLOCK(vm_obj);
1001
1002                 sched_pin();
1003                 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
1004                 mkva = sf_buf_kva(sf);
1005                 length = min(size, PAGE_SIZE - obj_po);
1006                 while (length > 0) {
1007                         if (do_bit17_swizzling &&
1008                             (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
1009                                 cnt = roundup2(obj_po + 1, 64);
1010                                 cnt = min(cnt - obj_po, length);
1011                                 swizzled_po = obj_po ^ 64;
1012                         } else {
1013                                 cnt = length;
1014                                 swizzled_po = obj_po;
1015                         }
1016                         if (rw == UIO_READ)
1017                                 ret = -copyout_nofault(
1018                                     (char *)mkva + swizzled_po,
1019                                     (void *)(uintptr_t)data_ptr, cnt);
1020                         else
1021                                 ret = -copyin_nofault(
1022                                     (void *)(uintptr_t)data_ptr,
1023                                     (char *)mkva + swizzled_po, cnt);
1024                         if (ret != 0)
1025                                 break;
1026                         data_ptr += cnt;
1027                         size -= cnt;
1028                         length -= cnt;
1029                         offset += cnt;
1030                         obj_po += cnt;
1031                 }
1032                 sf_buf_free(sf);
1033                 sched_unpin();
1034                 VM_OBJECT_WLOCK(vm_obj);
1035                 if (rw == UIO_WRITE)
1036                         vm_page_dirty(m);
1037                 vm_page_reference(m);
1038                 vm_page_lock(m);
1039                 vm_page_unwire(m, 1);
1040                 vm_page_unlock(m);
1041                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1042
1043                 if (ret != 0)
1044                         break;
1045         }
1046         vm_object_pip_wakeup(vm_obj);
1047         VM_OBJECT_WUNLOCK(vm_obj);
1048
1049         return (ret);
1050 }
1051
1052 static int
1053 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
1054     uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
1055 {
1056         vm_offset_t mkva;
1057         vm_pindex_t obj_pi;
1058         int obj_po, ret;
1059
1060         obj_pi = OFF_TO_IDX(offset);
1061         obj_po = offset & PAGE_MASK;
1062
1063         mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
1064             IDX_TO_OFF(obj_pi), size, PAT_WRITE_COMBINING);
1065         ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva +
1066             obj_po, size);
1067         pmap_unmapdev(mkva, size);
1068         return (ret);
1069 }
1070
1071 static int
1072 i915_gem_obj_io(struct drm_device *dev, uint32_t handle, uint64_t data_ptr,
1073     uint64_t size, uint64_t offset, enum uio_rw rw, struct drm_file *file)
1074 {
1075         struct drm_i915_gem_object *obj;
1076         vm_page_t *ma;
1077         vm_offset_t start, end;
1078         int npages, ret;
1079
1080         if (size == 0)
1081                 return (0);
1082         start = trunc_page(data_ptr);
1083         end = round_page(data_ptr + size);
1084         npages = howmany(end - start, PAGE_SIZE);
1085         ma = malloc(npages * sizeof(vm_page_t), DRM_I915_GEM, M_WAITOK |
1086             M_ZERO);
1087         npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1088             (vm_offset_t)data_ptr, size,
1089             (rw == UIO_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, ma, npages);
1090         if (npages == -1) {
1091                 ret = -EFAULT;
1092                 goto free_ma;
1093         }
1094
1095         ret = i915_mutex_lock_interruptible(dev);
1096         if (ret != 0)
1097                 goto unlocked;
1098
1099         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1100         if (&obj->base == NULL) {
1101                 ret = -ENOENT;
1102                 goto unlock;
1103         }
1104         if (offset > obj->base.size || size > obj->base.size - offset) {
1105                 ret = -EINVAL;
1106                 goto out;
1107         }
1108
1109         if (rw == UIO_READ) {
1110                 CTR3(KTR_DRM, "object_pread %p %jx %jx", obj, offset, size);
1111                 ret = i915_gem_object_set_cpu_read_domain_range(obj,
1112                     offset, size);
1113                 if (ret != 0)
1114                         goto out;
1115                 ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
1116                     UIO_READ, file);
1117         } else {
1118                 if (obj->phys_obj) {
1119                         CTR3(KTR_DRM, "object_phys_write %p %jx %jx", obj,
1120                             offset, size);
1121                         ret = i915_gem_phys_pwrite(dev, obj, data_ptr, offset,
1122                             size, file);
1123                 } else if (obj->gtt_space &&
1124                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1125                         CTR3(KTR_DRM, "object_gtt_write %p %jx %jx", obj,
1126                             offset, size);
1127                         ret = i915_gem_object_pin(obj, 0, true);
1128                         if (ret != 0)
1129                                 goto out;
1130                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1131                         if (ret != 0)
1132                                 goto out_unpin;
1133                         ret = i915_gem_object_put_fence(obj);
1134                         if (ret != 0)
1135                                 goto out_unpin;
1136                         ret = i915_gem_gtt_write(dev, obj, data_ptr, size,
1137                             offset, file);
1138 out_unpin:
1139                         i915_gem_object_unpin(obj);
1140                 } else {
1141                         CTR3(KTR_DRM, "object_pwrite %p %jx %jx", obj,
1142                             offset, size);
1143                         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1144                         if (ret != 0)
1145                                 goto out;
1146                         ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
1147                             UIO_WRITE, file);
1148                 }
1149         }
1150 out:
1151         drm_gem_object_unreference(&obj->base);
1152 unlock:
1153         DRM_UNLOCK(dev);
1154 unlocked:
1155         vm_page_unhold_pages(ma, npages);
1156 free_ma:
1157         free(ma, DRM_I915_GEM);
1158         return (ret);
1159 }
1160
1161 int
1162 i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1163 {
1164         struct drm_i915_gem_pread *args;
1165
1166         args = data;
1167         return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
1168             args->offset, UIO_READ, file));
1169 }
1170
1171 int
1172 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1173 {
1174         struct drm_i915_gem_pwrite *args;
1175
1176         args = data;
1177         return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
1178             args->offset, UIO_WRITE, file));
1179 }
1180
1181 int
1182 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1183     struct drm_file *file)
1184 {
1185         struct drm_i915_gem_set_domain *args;
1186         struct drm_i915_gem_object *obj;
1187         uint32_t read_domains;
1188         uint32_t write_domain;
1189         int ret;
1190
1191         if ((dev->driver->driver_features & DRIVER_GEM) == 0)
1192                 return (-ENODEV);
1193
1194         args = data;
1195         read_domains = args->read_domains;
1196         write_domain = args->write_domain;
1197
1198         if ((write_domain & I915_GEM_GPU_DOMAINS) != 0 ||
1199             (read_domains & I915_GEM_GPU_DOMAINS) != 0 ||
1200             (write_domain != 0 && read_domains != write_domain))
1201                 return (-EINVAL);
1202
1203         ret = i915_mutex_lock_interruptible(dev);
1204         if (ret != 0)
1205                 return (ret);
1206
1207         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1208         if (&obj->base == NULL) {
1209                 ret = -ENOENT;
1210                 goto unlock;
1211         }
1212
1213         if ((read_domains & I915_GEM_DOMAIN_GTT) != 0) {
1214                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1215                 if (ret == -EINVAL)
1216                         ret = 0;
1217         } else
1218                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1219
1220         drm_gem_object_unreference(&obj->base);
1221 unlock:
1222         DRM_UNLOCK(dev);
1223         return (ret);
1224 }
1225
1226 int
1227 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1228     struct drm_file *file)
1229 {
1230         struct drm_i915_gem_sw_finish *args;
1231         struct drm_i915_gem_object *obj;
1232         int ret;
1233
1234         args = data;
1235         ret = 0;
1236         if ((dev->driver->driver_features & DRIVER_GEM) == 0)
1237                 return (ENODEV);
1238         ret = i915_mutex_lock_interruptible(dev);
1239         if (ret != 0)
1240                 return (ret);
1241         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1242         if (&obj->base == NULL) {
1243                 ret = -ENOENT;
1244                 goto unlock;
1245         }
1246         if (obj->pin_count != 0)
1247                 i915_gem_object_flush_cpu_write_domain(obj);
1248         drm_gem_object_unreference(&obj->base);
1249 unlock:
1250         DRM_UNLOCK(dev);
1251         return (ret);
1252 }
1253
1254 int
1255 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1256     struct drm_file *file)
1257 {
1258         struct drm_i915_gem_mmap *args;
1259         struct drm_gem_object *obj;
1260         struct proc *p;
1261         vm_map_t map;
1262         vm_offset_t addr;
1263         vm_size_t size;
1264         int error, rv;
1265
1266         args = data;
1267
1268         if ((dev->driver->driver_features & DRIVER_GEM) == 0)
1269                 return (-ENODEV);
1270
1271         obj = drm_gem_object_lookup(dev, file, args->handle);
1272         if (obj == NULL)
1273                 return (-ENOENT);
1274         error = 0;
1275         if (args->size == 0)
1276                 goto out;
1277         p = curproc;
1278         map = &p->p_vmspace->vm_map;
1279         size = round_page(args->size);
1280         PROC_LOCK(p);
1281         if (map->size + size > lim_cur(p, RLIMIT_VMEM)) {
1282                 PROC_UNLOCK(p);
1283                 error = ENOMEM;
1284                 goto out;
1285         }
1286         PROC_UNLOCK(p);
1287
1288         addr = 0;
1289         vm_object_reference(obj->vm_obj);
1290         DRM_UNLOCK(dev);
1291         rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size,
1292             VMFS_ANY_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1293             VM_PROT_READ | VM_PROT_WRITE, MAP_SHARED);
1294         if (rv != KERN_SUCCESS) {
1295                 vm_object_deallocate(obj->vm_obj);
1296                 error = -vm_mmap_to_errno(rv);
1297         } else {
1298                 args->addr_ptr = (uint64_t)addr;
1299         }
1300         DRM_LOCK(dev);
1301 out:
1302         drm_gem_object_unreference(obj);
1303         return (error);
1304 }
1305
1306 static int
1307 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1308     vm_ooffset_t foff, struct ucred *cred, u_short *color)
1309 {
1310
1311         *color = 0; /* XXXKIB */
1312         return (0);
1313 }
1314
1315 int i915_intr_pf;
1316
1317 static int
1318 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
1319     vm_page_t *mres)
1320 {
1321         struct drm_gem_object *gem_obj;
1322         struct drm_i915_gem_object *obj;
1323         struct drm_device *dev;
1324         drm_i915_private_t *dev_priv;
1325         vm_page_t m, oldm;
1326         int cause, ret;
1327         bool write;
1328
1329         gem_obj = vm_obj->handle;
1330         obj = to_intel_bo(gem_obj);
1331         dev = obj->base.dev;
1332         dev_priv = dev->dev_private;
1333 #if 0
1334         write = (prot & VM_PROT_WRITE) != 0;
1335 #else
1336         write = true;
1337 #endif
1338         vm_object_pip_add(vm_obj, 1);
1339
1340         /*
1341          * Remove the placeholder page inserted by vm_fault() from the
1342          * object before dropping the object lock. If
1343          * i915_gem_release_mmap() is active in parallel on this gem
1344          * object, then it owns the drm device sx and might find the
1345          * placeholder already. Then, since the page is busy,
1346          * i915_gem_release_mmap() sleeps waiting for the busy state
1347          * of the page cleared. We will be not able to acquire drm
1348          * device lock until i915_gem_release_mmap() is able to make a
1349          * progress.
1350          */
1351         if (*mres != NULL) {
1352                 oldm = *mres;
1353                 vm_page_lock(oldm);
1354                 vm_page_remove(oldm);
1355                 vm_page_unlock(oldm);
1356                 *mres = NULL;
1357         } else
1358                 oldm = NULL;
1359 retry:
1360         VM_OBJECT_WUNLOCK(vm_obj);
1361 unlocked_vmobj:
1362         cause = ret = 0;
1363         m = NULL;
1364
1365         if (i915_intr_pf) {
1366                 ret = i915_mutex_lock_interruptible(dev);
1367                 if (ret != 0) {
1368                         cause = 10;
1369                         goto out;
1370                 }
1371         } else
1372                 DRM_LOCK(dev);
1373
1374         /*
1375          * Since the object lock was dropped, other thread might have
1376          * faulted on the same GTT address and instantiated the
1377          * mapping for the page.  Recheck.
1378          */
1379         VM_OBJECT_WLOCK(vm_obj);
1380         m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1381         if (m != NULL) {
1382                 if ((m->flags & VPO_BUSY) != 0) {
1383                         DRM_UNLOCK(dev);
1384                         vm_page_sleep(m, "915pee");
1385                         goto retry;
1386                 }
1387                 goto have_page;
1388         } else
1389                 VM_OBJECT_WUNLOCK(vm_obj);
1390
1391         /* Now bind it into the GTT if needed */
1392         if (!obj->map_and_fenceable) {
1393                 ret = i915_gem_object_unbind(obj);
1394                 if (ret != 0) {
1395                         cause = 20;
1396                         goto unlock;
1397                 }
1398         }
1399         if (!obj->gtt_space) {
1400                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1401                 if (ret != 0) {
1402                         cause = 30;
1403                         goto unlock;
1404                 }
1405
1406                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1407                 if (ret != 0) {
1408                         cause = 40;
1409                         goto unlock;
1410                 }
1411         }
1412
1413         if (obj->tiling_mode == I915_TILING_NONE)
1414                 ret = i915_gem_object_put_fence(obj);
1415         else
1416                 ret = i915_gem_object_get_fence(obj, NULL);
1417         if (ret != 0) {
1418                 cause = 50;
1419                 goto unlock;
1420         }
1421
1422         if (i915_gem_object_is_inactive(obj))
1423                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1424
1425         obj->fault_mappable = true;
1426         VM_OBJECT_WLOCK(vm_obj);
1427         m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
1428             offset);
1429         if (m == NULL) {
1430                 cause = 60;
1431                 ret = -EFAULT;
1432                 goto unlock;
1433         }
1434         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1435             ("not fictitious %p", m));
1436         KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1437
1438         if ((m->flags & VPO_BUSY) != 0) {
1439                 DRM_UNLOCK(dev);
1440                 vm_page_sleep(m, "915pbs");
1441                 goto retry;
1442         }
1443         m->valid = VM_PAGE_BITS_ALL;
1444         vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
1445 have_page:
1446         *mres = m;
1447         vm_page_busy(m);
1448
1449         CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
1450             m->phys_addr);
1451         DRM_UNLOCK(dev);
1452         if (oldm != NULL) {
1453                 vm_page_lock(oldm);
1454                 vm_page_free(oldm);
1455                 vm_page_unlock(oldm);
1456         }
1457         vm_object_pip_wakeup(vm_obj);
1458         return (VM_PAGER_OK);
1459
1460 unlock:
1461         DRM_UNLOCK(dev);
1462 out:
1463         KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1464         CTR5(KTR_DRM, "fault_fail %p %jx %x err %d %d", gem_obj, offset, prot,
1465             -ret, cause);
1466         if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1467                 kern_yield(PRI_USER);
1468                 goto unlocked_vmobj;
1469         }
1470         VM_OBJECT_WLOCK(vm_obj);
1471         vm_object_pip_wakeup(vm_obj);
1472         return (VM_PAGER_ERROR);
1473 }
1474
1475 static void
1476 i915_gem_pager_dtor(void *handle)
1477 {
1478         struct drm_gem_object *obj;
1479         struct drm_device *dev;
1480
1481         obj = handle;
1482         dev = obj->dev;
1483
1484         DRM_LOCK(dev);
1485         drm_gem_free_mmap_offset(obj);
1486         i915_gem_release_mmap(to_intel_bo(obj));
1487         drm_gem_object_unreference(obj);
1488         DRM_UNLOCK(dev);
1489 }
1490
1491 struct cdev_pager_ops i915_gem_pager_ops = {
1492         .cdev_pg_fault  = i915_gem_pager_fault,
1493         .cdev_pg_ctor   = i915_gem_pager_ctor,
1494         .cdev_pg_dtor   = i915_gem_pager_dtor
1495 };
1496
1497 int
1498 i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev,
1499     uint32_t handle, uint64_t *offset)
1500 {
1501         struct drm_i915_private *dev_priv;
1502         struct drm_i915_gem_object *obj;
1503         int ret;
1504
1505         if (!(dev->driver->driver_features & DRIVER_GEM))
1506                 return (-ENODEV);
1507
1508         dev_priv = dev->dev_private;
1509
1510         ret = i915_mutex_lock_interruptible(dev);
1511         if (ret != 0)
1512                 return (ret);
1513
1514         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1515         if (&obj->base == NULL) {
1516                 ret = -ENOENT;
1517                 goto unlock;
1518         }
1519
1520         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1521                 ret = -E2BIG;
1522                 goto out;
1523         }
1524
1525         if (obj->madv != I915_MADV_WILLNEED) {
1526                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1527                 ret = -EINVAL;
1528                 goto out;
1529         }
1530
1531         ret = drm_gem_create_mmap_offset(&obj->base);
1532         if (ret != 0)
1533                 goto out;
1534
1535         *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1536             DRM_GEM_MAPPING_KEY;
1537 out:
1538         drm_gem_object_unreference(&obj->base);
1539 unlock:
1540         DRM_UNLOCK(dev);
1541         return (ret);
1542 }
1543
1544 int
1545 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1546     struct drm_file *file)
1547 {
1548         struct drm_i915_private *dev_priv;
1549         struct drm_i915_gem_mmap_gtt *args;
1550
1551         dev_priv = dev->dev_private;
1552         args = data;
1553
1554         return (i915_gem_mmap_gtt(file, dev, args->handle, &args->offset));
1555 }
1556
1557 struct drm_i915_gem_object *
1558 i915_gem_alloc_object(struct drm_device *dev, size_t size)
1559 {
1560         struct drm_i915_private *dev_priv;
1561         struct drm_i915_gem_object *obj;
1562
1563         dev_priv = dev->dev_private;
1564
1565         obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
1566
1567         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
1568                 free(obj, DRM_I915_GEM);
1569                 return (NULL);
1570         }
1571
1572         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1573         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
1574
1575         if (HAS_LLC(dev))
1576                 obj->cache_level = I915_CACHE_LLC;
1577         else
1578                 obj->cache_level = I915_CACHE_NONE;
1579         obj->base.driver_private = NULL;
1580         obj->fence_reg = I915_FENCE_REG_NONE;
1581         INIT_LIST_HEAD(&obj->mm_list);
1582         INIT_LIST_HEAD(&obj->gtt_list);
1583         INIT_LIST_HEAD(&obj->ring_list);
1584         INIT_LIST_HEAD(&obj->exec_list);
1585         INIT_LIST_HEAD(&obj->gpu_write_list);
1586         obj->madv = I915_MADV_WILLNEED;
1587         /* Avoid an unnecessary call to unbind on the first bind. */
1588         obj->map_and_fenceable = true;
1589
1590         i915_gem_info_add_obj(dev_priv, size);
1591
1592         return (obj);
1593 }
1594
1595 void
1596 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
1597 {
1598
1599         /* If we don't have a page list set up, then we're not pinned
1600          * to GPU, and we can ignore the cache flush because it'll happen
1601          * again at bind time.
1602          */
1603         if (obj->pages == NULL)
1604                 return;
1605
1606         /* If the GPU is snooping the contents of the CPU cache,
1607          * we do not need to manually clear the CPU cache lines.  However,
1608          * the caches are only snooped when the render cache is
1609          * flushed/invalidated.  As we always have to emit invalidations
1610          * and flushes when moving into and out of the RENDER domain, correct
1611          * snooping behaviour occurs naturally as the result of our domain
1612          * tracking.
1613          */
1614         if (obj->cache_level != I915_CACHE_NONE)
1615                 return;
1616
1617         CTR1(KTR_DRM, "object_clflush %p", obj);
1618         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
1619 }
1620
1621 static void
1622 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
1623 {
1624         uint32_t old_write_domain;
1625
1626         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
1627                 return;
1628
1629         i915_gem_clflush_object(obj);
1630         intel_gtt_chipset_flush();
1631         old_write_domain = obj->base.write_domain;
1632         obj->base.write_domain = 0;
1633
1634         CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj,
1635             obj->base.read_domains, old_write_domain);
1636 }
1637
1638 static int
1639 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
1640 {
1641
1642         if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
1643                 return (0);
1644         return (i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain));
1645 }
1646
1647 static void
1648 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
1649 {
1650         uint32_t old_write_domain;
1651
1652         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
1653                 return;
1654
1655         wmb();
1656
1657         old_write_domain = obj->base.write_domain;
1658         obj->base.write_domain = 0;
1659
1660         CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj,
1661             obj->base.read_domains, old_write_domain);
1662 }
1663
1664 int
1665 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
1666 {
1667         uint32_t old_write_domain, old_read_domains;
1668         int ret;
1669
1670         if (obj->gtt_space == NULL)
1671                 return (-EINVAL);
1672
1673         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
1674                 return 0;
1675
1676         ret = i915_gem_object_flush_gpu_write_domain(obj);
1677         if (ret != 0)
1678                 return (ret);
1679
1680         if (obj->pending_gpu_write || write) {
1681                 ret = i915_gem_object_wait_rendering(obj);
1682                 if (ret != 0)
1683                         return (ret);
1684         }
1685
1686         i915_gem_object_flush_cpu_write_domain(obj);
1687
1688         old_write_domain = obj->base.write_domain;
1689         old_read_domains = obj->base.read_domains;
1690
1691         KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
1692             ("In GTT write domain"));
1693         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
1694         if (write) {
1695                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
1696                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
1697                 obj->dirty = 1;
1698         }
1699
1700         CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
1701             old_read_domains, old_write_domain);
1702         return (0);
1703 }
1704
1705 int
1706 i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1707     enum i915_cache_level cache_level)
1708 {
1709         struct drm_device *dev;
1710         drm_i915_private_t *dev_priv;
1711         int ret;
1712
1713         if (obj->cache_level == cache_level)
1714                 return 0;
1715
1716         if (obj->pin_count) {
1717                 DRM_DEBUG("can not change the cache level of pinned objects\n");
1718                 return (-EBUSY);
1719         }
1720
1721         dev = obj->base.dev;
1722         dev_priv = dev->dev_private;
1723         if (obj->gtt_space) {
1724                 ret = i915_gem_object_finish_gpu(obj);
1725                 if (ret != 0)
1726                         return (ret);
1727
1728                 i915_gem_object_finish_gtt(obj);
1729
1730                 /* Before SandyBridge, you could not use tiling or fence
1731                  * registers with snooped memory, so relinquish any fences
1732                  * currently pointing to our region in the aperture.
1733                  */
1734                 if (INTEL_INFO(obj->base.dev)->gen < 6) {
1735                         ret = i915_gem_object_put_fence(obj);
1736                         if (ret != 0)
1737                                 return (ret);
1738                 }
1739
1740                 i915_gem_gtt_rebind_object(obj, cache_level);
1741                 if (obj->has_aliasing_ppgtt_mapping)
1742                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
1743                             obj, cache_level);
1744         }
1745
1746         if (cache_level == I915_CACHE_NONE) {
1747                 u32 old_read_domains, old_write_domain;
1748
1749                 /* If we're coming from LLC cached, then we haven't
1750                  * actually been tracking whether the data is in the
1751                  * CPU cache or not, since we only allow one bit set
1752                  * in obj->write_domain and have been skipping the clflushes.
1753                  * Just set it to the CPU cache for now.
1754                  */
1755                 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
1756                     ("obj %p in CPU write domain", obj));
1757                 KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
1758                     ("obj %p in CPU read domain", obj));
1759
1760                 old_read_domains = obj->base.read_domains;
1761                 old_write_domain = obj->base.write_domain;
1762
1763                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
1764                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1765
1766                 CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x",
1767                     obj, old_read_domains, old_write_domain);
1768         }
1769
1770         obj->cache_level = cache_level;
1771         return (0);
1772 }
1773
1774 int
1775 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1776     u32 alignment, struct intel_ring_buffer *pipelined)
1777 {
1778         u32 old_read_domains, old_write_domain;
1779         int ret;
1780
1781         ret = i915_gem_object_flush_gpu_write_domain(obj);
1782         if (ret != 0)
1783                 return (ret);
1784
1785         if (pipelined != obj->ring) {
1786                 ret = i915_gem_object_wait_rendering(obj);
1787                 if (ret == -ERESTART || ret == -EINTR)
1788                         return (ret);
1789         }
1790
1791         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
1792         if (ret != 0)
1793                 return (ret);
1794
1795         ret = i915_gem_object_pin(obj, alignment, true);
1796         if (ret != 0)
1797                 return (ret);
1798
1799         i915_gem_object_flush_cpu_write_domain(obj);
1800
1801         old_write_domain = obj->base.write_domain;
1802         old_read_domains = obj->base.read_domains;
1803
1804         KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
1805             ("obj %p in GTT write domain", obj));
1806         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
1807
1808         CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
1809             obj, old_read_domains, obj->base.write_domain);
1810         return (0);
1811 }
1812
1813 int
1814 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
1815 {
1816         int ret;
1817
1818         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
1819                 return (0);
1820
1821         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
1822                 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
1823                 if (ret != 0)
1824                         return (ret);
1825         }
1826
1827         ret = i915_gem_object_wait_rendering(obj);
1828         if (ret != 0)
1829                 return (ret);
1830
1831         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1832
1833         return (0);
1834 }
1835
1836 static int
1837 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
1838 {
1839         uint32_t old_write_domain, old_read_domains;
1840         int ret;
1841
1842         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
1843                 return 0;
1844
1845         ret = i915_gem_object_flush_gpu_write_domain(obj);
1846         if (ret != 0)
1847                 return (ret);
1848
1849         ret = i915_gem_object_wait_rendering(obj);
1850         if (ret != 0)
1851                 return (ret);
1852
1853         i915_gem_object_flush_gtt_write_domain(obj);
1854         i915_gem_object_set_to_full_cpu_read_domain(obj);
1855
1856         old_write_domain = obj->base.write_domain;
1857         old_read_domains = obj->base.read_domains;
1858
1859         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1860                 i915_gem_clflush_object(obj);
1861                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
1862         }
1863
1864         KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
1865             ("In cpu write domain"));
1866
1867         if (write) {
1868                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
1869                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1870         }
1871
1872         CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj,
1873             old_read_domains, old_write_domain);
1874         return (0);
1875 }
1876
1877 static void
1878 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
1879 {
1880         int i;
1881
1882         if (obj->page_cpu_valid == NULL)
1883                 return;
1884
1885         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) {
1886                 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
1887                         if (obj->page_cpu_valid[i] != 0)
1888                                 continue;
1889                         drm_clflush_pages(obj->pages + i, 1);
1890                 }
1891         }
1892
1893         free(obj->page_cpu_valid, DRM_I915_GEM);
1894         obj->page_cpu_valid = NULL;
1895 }
1896
1897 static int
1898 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
1899     uint64_t offset, uint64_t size)
1900 {
1901         uint32_t old_read_domains;
1902         int i, ret;
1903
1904         if (offset == 0 && size == obj->base.size)
1905                 return (i915_gem_object_set_to_cpu_domain(obj, 0));
1906
1907         ret = i915_gem_object_flush_gpu_write_domain(obj);
1908         if (ret != 0)
1909                 return (ret);
1910         ret = i915_gem_object_wait_rendering(obj);
1911         if (ret != 0)
1912                 return (ret);
1913
1914         i915_gem_object_flush_gtt_write_domain(obj);
1915
1916         if (obj->page_cpu_valid == NULL &&
1917             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
1918                 return (0);
1919
1920         if (obj->page_cpu_valid == NULL) {
1921                 obj->page_cpu_valid = malloc(obj->base.size / PAGE_SIZE,
1922                     DRM_I915_GEM, M_WAITOK | M_ZERO);
1923         } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1924                 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
1925
1926         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1927              i++) {
1928                 if (obj->page_cpu_valid[i])
1929                         continue;
1930                 drm_clflush_pages(obj->pages + i, 1);
1931                 obj->page_cpu_valid[i] = 1;
1932         }
1933
1934         KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
1935             ("In gpu write domain"));
1936
1937         old_read_domains = obj->base.read_domains;
1938         obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
1939
1940         CTR3(KTR_DRM, "object_change_domain set_cpu_read %p %x %x", obj,
1941             old_read_domains, obj->base.write_domain);
1942         return (0);
1943 }
1944
1945 static uint32_t
1946 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1947 {
1948         uint32_t gtt_size;
1949
1950         if (INTEL_INFO(dev)->gen >= 4 ||
1951             tiling_mode == I915_TILING_NONE)
1952                 return (size);
1953
1954         /* Previous chips need a power-of-two fence region when tiling */
1955         if (INTEL_INFO(dev)->gen == 3)
1956                 gtt_size = 1024*1024;
1957         else
1958                 gtt_size = 512*1024;
1959
1960         while (gtt_size < size)
1961                 gtt_size <<= 1;
1962
1963         return (gtt_size);
1964 }
1965
1966 /**
1967  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1968  * @obj: object to check
1969  *
1970  * Return the required GTT alignment for an object, taking into account
1971  * potential fence register mapping.
1972  */
1973 static uint32_t
1974 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1975      int tiling_mode)
1976 {
1977
1978         /*
1979          * Minimum alignment is 4k (GTT page size), but might be greater
1980          * if a fence register is needed for the object.
1981          */
1982         if (INTEL_INFO(dev)->gen >= 4 ||
1983             tiling_mode == I915_TILING_NONE)
1984                 return (4096);
1985
1986         /*
1987          * Previous chips need to be aligned to the size of the smallest
1988          * fence register that can contain the object.
1989          */
1990         return (i915_gem_get_gtt_size(dev, size, tiling_mode));
1991 }
1992
1993 uint32_t
1994 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, uint32_t size,
1995     int tiling_mode)
1996 {
1997
1998         if (tiling_mode == I915_TILING_NONE)
1999                 return (4096);
2000
2001         /*
2002          * Minimum alignment is 4k (GTT page size) for sane hw.
2003          */
2004         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev))
2005                 return (4096);
2006
2007         /*
2008          * Previous hardware however needs to be aligned to a power-of-two
2009          * tile height. The simplest method for determining this is to reuse
2010          * the power-of-tile object size.
2011          */
2012         return (i915_gem_get_gtt_size(dev, size, tiling_mode));
2013 }
2014
2015 static int
2016 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2017     unsigned alignment, bool map_and_fenceable)
2018 {
2019         struct drm_device *dev;
2020         struct drm_i915_private *dev_priv;
2021         struct drm_mm_node *free_space;
2022         uint32_t size, fence_size, fence_alignment, unfenced_alignment;
2023         bool mappable, fenceable;
2024         int ret;
2025
2026         dev = obj->base.dev;
2027         dev_priv = dev->dev_private;
2028
2029         if (obj->madv != I915_MADV_WILLNEED) {
2030                 DRM_ERROR("Attempting to bind a purgeable object\n");
2031                 return (-EINVAL);
2032         }
2033
2034         fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
2035             obj->tiling_mode);
2036         fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
2037             obj->tiling_mode);
2038         unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
2039             obj->base.size, obj->tiling_mode);
2040         if (alignment == 0)
2041                 alignment = map_and_fenceable ? fence_alignment :
2042                     unfenced_alignment;
2043         if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
2044                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2045                 return (-EINVAL);
2046         }
2047
2048         size = map_and_fenceable ? fence_size : obj->base.size;
2049
2050         /* If the object is bigger than the entire aperture, reject it early
2051          * before evicting everything in a vain attempt to find space.
2052          */
2053         if (obj->base.size > (map_and_fenceable ?
2054             dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2055                 DRM_ERROR(
2056 "Attempting to bind an object larger than the aperture\n");
2057                 return (-E2BIG);
2058         }
2059
2060  search_free:
2061         if (map_and_fenceable)
2062                 free_space = drm_mm_search_free_in_range(
2063                     &dev_priv->mm.gtt_space, size, alignment, 0,
2064                     dev_priv->mm.gtt_mappable_end, 0);
2065         else
2066                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2067                     size, alignment, 0);
2068         if (free_space != NULL) {
2069                 if (map_and_fenceable)
2070                         obj->gtt_space = drm_mm_get_block_range_generic(
2071                             free_space, size, alignment, 0,
2072                             dev_priv->mm.gtt_mappable_end, 1);
2073                 else
2074                         obj->gtt_space = drm_mm_get_block_generic(free_space,
2075                             size, alignment, 1);
2076         }
2077         if (obj->gtt_space == NULL) {
2078                 ret = i915_gem_evict_something(dev, size, alignment,
2079                     map_and_fenceable);
2080                 if (ret != 0)
2081                         return (ret);
2082                 goto search_free;
2083         }
2084         ret = i915_gem_object_get_pages_gtt(obj, 0);
2085         if (ret != 0) {
2086                 drm_mm_put_block(obj->gtt_space);
2087                 obj->gtt_space = NULL;
2088                 /*
2089                  * i915_gem_object_get_pages_gtt() cannot return
2090                  * ENOMEM, since we use vm_page_grab(VM_ALLOC_RETRY)
2091                  * (which does not support operation without a flag
2092                  * anyway).
2093                  */
2094                 return (ret);
2095         }
2096
2097         ret = i915_gem_gtt_bind_object(obj);
2098         if (ret != 0) {
2099                 i915_gem_object_put_pages_gtt(obj);
2100                 drm_mm_put_block(obj->gtt_space);
2101                 obj->gtt_space = NULL;
2102                 if (i915_gem_evict_everything(dev, false))
2103                         return (ret);
2104                 goto search_free;
2105         }
2106
2107         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2108         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2109
2110         KASSERT((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0,
2111             ("Object in gpu read domain"));
2112         KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
2113             ("Object in gpu write domain"));
2114
2115         obj->gtt_offset = obj->gtt_space->start;
2116
2117         fenceable =
2118                 obj->gtt_space->size == fence_size &&
2119                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2120
2121         mappable =
2122                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2123         obj->map_and_fenceable = mappable && fenceable;
2124
2125         CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
2126             obj->base.size, map_and_fenceable);
2127         return (0);
2128 }
2129
2130 static void
2131 i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2132 {
2133         u32 old_write_domain, old_read_domains;
2134
2135         /* Act a barrier for all accesses through the GTT */
2136         mb();
2137
2138         /* Force a pagefault for domain tracking on next user access */
2139         i915_gem_release_mmap(obj);
2140
2141         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2142                 return;
2143
2144         old_read_domains = obj->base.read_domains;
2145         old_write_domain = obj->base.write_domain;
2146
2147         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2148         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2149
2150         CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x",
2151             obj, old_read_domains, old_write_domain);
2152 }
2153
2154 int
2155 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2156 {
2157         drm_i915_private_t *dev_priv;
2158         int ret;
2159
2160         dev_priv = obj->base.dev->dev_private;
2161         ret = 0;
2162         if (obj->gtt_space == NULL)
2163                 return (0);
2164         if (obj->pin_count != 0) {
2165                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2166                 return (-EINVAL);
2167         }
2168
2169         ret = i915_gem_object_finish_gpu(obj);
2170         if (ret == -ERESTART || ret == -EINTR)
2171                 return (ret);
2172
2173         i915_gem_object_finish_gtt(obj);
2174
2175         if (ret == 0)
2176                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2177         if (ret == -ERESTART || ret == -EINTR)
2178                 return (ret);
2179         if (ret != 0) {
2180                 i915_gem_clflush_object(obj);
2181                 obj->base.read_domains = obj->base.write_domain =
2182                     I915_GEM_DOMAIN_CPU;
2183         }
2184
2185         ret = i915_gem_object_put_fence(obj);
2186         if (ret == -ERESTART)
2187                 return (ret);
2188
2189         i915_gem_gtt_unbind_object(obj);
2190         if (obj->has_aliasing_ppgtt_mapping) {
2191                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2192                 obj->has_aliasing_ppgtt_mapping = 0;
2193         }
2194         i915_gem_object_put_pages_gtt(obj);
2195
2196         list_del_init(&obj->gtt_list);
2197         list_del_init(&obj->mm_list);
2198         obj->map_and_fenceable = true;
2199
2200         drm_mm_put_block(obj->gtt_space);
2201         obj->gtt_space = NULL;
2202         obj->gtt_offset = 0;
2203
2204         if (i915_gem_object_is_purgeable(obj))
2205                 i915_gem_object_truncate(obj);
2206         CTR1(KTR_DRM, "object_unbind %p", obj);
2207
2208         return (ret);
2209 }
2210
2211 static int
2212 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
2213     int flags)
2214 {
2215         struct drm_device *dev;
2216         vm_object_t vm_obj;
2217         vm_page_t m;
2218         int page_count, i, j;
2219
2220         dev = obj->base.dev;
2221         KASSERT(obj->pages == NULL, ("Obj already has pages"));
2222         page_count = obj->base.size / PAGE_SIZE;
2223         obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
2224             M_WAITOK);
2225         vm_obj = obj->base.vm_obj;
2226         VM_OBJECT_WLOCK(vm_obj);
2227         for (i = 0; i < page_count; i++) {
2228                 if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
2229                         goto failed;
2230         }
2231         VM_OBJECT_WUNLOCK(vm_obj);
2232         if (i915_gem_object_needs_bit17_swizzle(obj))
2233                 i915_gem_object_do_bit_17_swizzle(obj);
2234         return (0);
2235
2236 failed:
2237         for (j = 0; j < i; j++) {
2238                 m = obj->pages[j];
2239                 vm_page_lock(m);
2240                 vm_page_unwire(m, 0);
2241                 vm_page_unlock(m);
2242                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
2243         }
2244         VM_OBJECT_WUNLOCK(vm_obj);
2245         free(obj->pages, DRM_I915_GEM);
2246         obj->pages = NULL;
2247         return (-EIO);
2248 }
2249
2250 #define GEM_PARANOID_CHECK_GTT 0
2251 #if GEM_PARANOID_CHECK_GTT
2252 static void
2253 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
2254     int page_count)
2255 {
2256         struct drm_i915_private *dev_priv;
2257         vm_paddr_t pa;
2258         unsigned long start, end;
2259         u_int i;
2260         int j;
2261
2262         dev_priv = dev->dev_private;
2263         start = OFF_TO_IDX(dev_priv->mm.gtt_start);
2264         end = OFF_TO_IDX(dev_priv->mm.gtt_end);
2265         for (i = start; i < end; i++) {
2266                 pa = intel_gtt_read_pte_paddr(i);
2267                 for (j = 0; j < page_count; j++) {
2268                         if (pa == VM_PAGE_TO_PHYS(ma[j])) {
2269                                 panic("Page %p in GTT pte index %d pte %x",
2270                                     ma[i], i, intel_gtt_read_pte(i));
2271                         }
2272                 }
2273         }
2274 }
2275 #endif
2276
2277 static void
2278 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2279 {
2280         vm_page_t m;
2281         int page_count, i;
2282
2283         KASSERT(obj->madv != I915_MADV_PURGED_INTERNAL, ("Purged object"));
2284
2285         if (obj->tiling_mode != I915_TILING_NONE)
2286                 i915_gem_object_save_bit_17_swizzle(obj);
2287         if (obj->madv == I915_MADV_DONTNEED)
2288                 obj->dirty = 0;
2289         page_count = obj->base.size / PAGE_SIZE;
2290         VM_OBJECT_WLOCK(obj->base.vm_obj);
2291 #if GEM_PARANOID_CHECK_GTT
2292         i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
2293 #endif
2294         for (i = 0; i < page_count; i++) {
2295                 m = obj->pages[i];
2296                 if (obj->dirty)
2297                         vm_page_dirty(m);
2298                 if (obj->madv == I915_MADV_WILLNEED)
2299                         vm_page_reference(m);
2300                 vm_page_lock(m);
2301                 vm_page_unwire(obj->pages[i], 1);
2302                 vm_page_unlock(m);
2303                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
2304         }
2305         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
2306         obj->dirty = 0;
2307         free(obj->pages, DRM_I915_GEM);
2308         obj->pages = NULL;
2309 }
2310
2311 void
2312 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2313 {
2314         vm_object_t devobj;
2315         vm_page_t m;
2316         int i, page_count;
2317
2318         if (!obj->fault_mappable)
2319                 return;
2320
2321         CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset,
2322             OFF_TO_IDX(obj->base.size));
2323         devobj = cdev_pager_lookup(obj);
2324         if (devobj != NULL) {
2325                 page_count = OFF_TO_IDX(obj->base.size);
2326
2327                 VM_OBJECT_WLOCK(devobj);
2328 retry:
2329                 for (i = 0; i < page_count; i++) {
2330                         m = vm_page_lookup(devobj, i);
2331                         if (m == NULL)
2332                                 continue;
2333                         if (vm_page_sleep_if_busy(m, true, "915unm"))
2334                                 goto retry;
2335                         cdev_pager_free_page(devobj, m);
2336                 }
2337                 VM_OBJECT_WUNLOCK(devobj);
2338                 vm_object_deallocate(devobj);
2339         }
2340
2341         obj->fault_mappable = false;
2342 }
2343
2344 int
2345 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2346 {
2347         int ret;
2348
2349         KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
2350             ("In GPU write domain"));
2351
2352         CTR5(KTR_DRM, "object_wait_rendering %p %s %x %d %d", obj,
2353             obj->ring != NULL ? obj->ring->name : "none", obj->gtt_offset,
2354             obj->active, obj->last_rendering_seqno);
2355         if (obj->active) {
2356                 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
2357                     true);
2358                 if (ret != 0)
2359                         return (ret);
2360         }
2361         return (0);
2362 }
2363
2364 void
2365 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2366     struct intel_ring_buffer *ring, uint32_t seqno)
2367 {
2368         struct drm_device *dev = obj->base.dev;
2369         struct drm_i915_private *dev_priv = dev->dev_private;
2370         struct drm_i915_fence_reg *reg;
2371
2372         obj->ring = ring;
2373         KASSERT(ring != NULL, ("NULL ring"));
2374
2375         /* Add a reference if we're newly entering the active list. */
2376         if (!obj->active) {
2377                 drm_gem_object_reference(&obj->base);
2378                 obj->active = 1;
2379         }
2380
2381         /* Move from whatever list we were on to the tail of execution. */
2382         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
2383         list_move_tail(&obj->ring_list, &ring->active_list);
2384
2385         obj->last_rendering_seqno = seqno;
2386         if (obj->fenced_gpu_access) {
2387                 obj->last_fenced_seqno = seqno;
2388                 obj->last_fenced_ring = ring;
2389
2390                 /* Bump MRU to take account of the delayed flush */
2391                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2392                         reg = &dev_priv->fence_regs[obj->fence_reg];
2393                         list_move_tail(&reg->lru_list,
2394                                        &dev_priv->mm.fence_list);
2395                 }
2396         }
2397 }
2398
2399 static void
2400 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
2401 {
2402         list_del_init(&obj->ring_list);
2403         obj->last_rendering_seqno = 0;
2404         obj->last_fenced_seqno = 0;
2405 }
2406
2407 static void
2408 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
2409 {
2410         struct drm_device *dev = obj->base.dev;
2411         drm_i915_private_t *dev_priv = dev->dev_private;
2412
2413         KASSERT(obj->active, ("Object not active"));
2414         list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
2415
2416         i915_gem_object_move_off_active(obj);
2417 }
2418
2419 static void
2420 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2421 {
2422         struct drm_device *dev = obj->base.dev;
2423         struct drm_i915_private *dev_priv = dev->dev_private;
2424
2425         if (obj->pin_count != 0)
2426                 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
2427         else
2428                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2429
2430         KASSERT(list_empty(&obj->gpu_write_list), ("On gpu_write_list"));
2431         KASSERT(obj->active, ("Object not active"));
2432         obj->ring = NULL;
2433         obj->last_fenced_ring = NULL;
2434
2435         i915_gem_object_move_off_active(obj);
2436         obj->fenced_gpu_access = false;
2437
2438         obj->active = 0;
2439         obj->pending_gpu_write = false;
2440         drm_gem_object_unreference(&obj->base);
2441
2442 #if 1
2443         KIB_NOTYET();
2444 #else
2445         WARN_ON(i915_verify_lists(dev));
2446 #endif
2447 }
2448
2449 static void
2450 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2451 {
2452         vm_object_t vm_obj;
2453
2454         vm_obj = obj->base.vm_obj;
2455         VM_OBJECT_WLOCK(vm_obj);
2456         vm_object_page_remove(vm_obj, 0, 0, false);
2457         VM_OBJECT_WUNLOCK(vm_obj);
2458         obj->madv = I915_MADV_PURGED_INTERNAL;
2459 }
2460
2461 static inline int
2462 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
2463 {
2464
2465         return (obj->madv == I915_MADV_DONTNEED);
2466 }
2467
2468 static void
2469 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
2470     uint32_t flush_domains)
2471 {
2472         struct drm_i915_gem_object *obj, *next;
2473         uint32_t old_write_domain;
2474
2475         list_for_each_entry_safe(obj, next, &ring->gpu_write_list,
2476             gpu_write_list) {
2477                 if (obj->base.write_domain & flush_domains) {
2478                         old_write_domain = obj->base.write_domain;
2479                         obj->base.write_domain = 0;
2480                         list_del_init(&obj->gpu_write_list);
2481                         i915_gem_object_move_to_active(obj, ring,
2482                             i915_gem_next_request_seqno(ring));
2483
2484         CTR3(KTR_DRM, "object_change_domain process_flush %p %x %x",
2485                             obj, obj->base.read_domains, old_write_domain);
2486                 }
2487         }
2488 }
2489
2490 static int
2491 i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2492 {
2493         drm_i915_private_t *dev_priv;
2494
2495         dev_priv = obj->base.dev->dev_private;
2496         return (dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2497             obj->tiling_mode != I915_TILING_NONE);
2498 }
2499
2500 static vm_page_t
2501 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
2502 {
2503         vm_page_t m;
2504         int rv;
2505
2506         VM_OBJECT_ASSERT_WLOCKED(object);
2507         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
2508             VM_ALLOC_RETRY);
2509         if (m->valid != VM_PAGE_BITS_ALL) {
2510                 vm_page_busy(m);
2511                 if (vm_pager_has_page(object, pindex, NULL, NULL)) {
2512                         rv = vm_pager_get_pages(object, &m, 1, 0);
2513                         m = vm_page_lookup(object, pindex);
2514                         if (m == NULL)
2515                                 return (NULL);
2516                         if (rv != VM_PAGER_OK) {
2517                                 vm_page_lock(m);
2518                                 vm_page_free(m);
2519                                 vm_page_unlock(m);
2520                                 return (NULL);
2521                         }
2522                 } else {
2523                         pmap_zero_page(m);
2524                         m->valid = VM_PAGE_BITS_ALL;
2525                         m->dirty = 0;
2526                 }
2527                 vm_page_wakeup(m);
2528         }
2529         vm_page_lock(m);
2530         vm_page_wire(m);
2531         vm_page_unlock(m);
2532         atomic_add_long(&i915_gem_wired_pages_cnt, 1);
2533         return (m);
2534 }
2535
2536 int
2537 i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains,
2538     uint32_t flush_domains)
2539 {
2540         int ret;
2541
2542         if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2543                 return 0;
2544
2545         CTR3(KTR_DRM, "ring_flush %s %x %x", ring->name, invalidate_domains,
2546             flush_domains);
2547         ret = ring->flush(ring, invalidate_domains, flush_domains);
2548         if (ret)
2549                 return ret;
2550
2551         if (flush_domains & I915_GEM_GPU_DOMAINS)
2552                 i915_gem_process_flushing_list(ring, flush_domains);
2553         return 0;
2554 }
2555
2556 static int
2557 i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
2558 {
2559         int ret;
2560
2561         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2562                 return 0;
2563
2564         if (!list_empty(&ring->gpu_write_list)) {
2565                 ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS,
2566                     I915_GEM_GPU_DOMAINS);
2567                 if (ret != 0)
2568                         return ret;
2569         }
2570
2571         return (i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2572             do_retire));
2573 }
2574
2575 int
2576 i915_gpu_idle(struct drm_device *dev, bool do_retire)
2577 {
2578         drm_i915_private_t *dev_priv = dev->dev_private;
2579         int ret, i;
2580
2581         /* Flush everything onto the inactive list. */
2582         for (i = 0; i < I915_NUM_RINGS; i++) {
2583                 ret = i915_ring_idle(&dev_priv->rings[i], do_retire);
2584                 if (ret)
2585                         return ret;
2586         }
2587
2588         return 0;
2589 }
2590
2591 int
2592 i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire)
2593 {
2594         drm_i915_private_t *dev_priv;
2595         struct drm_i915_gem_request *request;
2596         uint32_t ier;
2597         int flags, ret;
2598         bool recovery_complete;
2599
2600         KASSERT(seqno != 0, ("Zero seqno"));
2601
2602         dev_priv = ring->dev->dev_private;
2603         ret = 0;
2604
2605         if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
2606                 /* Give the error handler a chance to run. */
2607                 mtx_lock(&dev_priv->error_completion_lock);
2608                 recovery_complete = (&dev_priv->error_completion) > 0;
2609                 mtx_unlock(&dev_priv->error_completion_lock);
2610                 return (recovery_complete ? -EIO : -EAGAIN);
2611         }
2612
2613         if (seqno == ring->outstanding_lazy_request) {
2614                 request = malloc(sizeof(*request), DRM_I915_GEM,
2615                     M_WAITOK | M_ZERO);
2616                 if (request == NULL)
2617                         return (-ENOMEM);
2618
2619                 ret = i915_add_request(ring, NULL, request);
2620                 if (ret != 0) {
2621                         free(request, DRM_I915_GEM);
2622                         return (ret);
2623                 }
2624
2625                 seqno = request->seqno;
2626         }
2627
2628         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2629                 if (HAS_PCH_SPLIT(ring->dev))
2630                         ier = I915_READ(DEIER) | I915_READ(GTIER);
2631                 else
2632                         ier = I915_READ(IER);
2633                 if (!ier) {
2634                         DRM_ERROR("something (likely vbetool) disabled "
2635                                   "interrupts, re-enabling\n");
2636                         ring->dev->driver->irq_preinstall(ring->dev);
2637                         ring->dev->driver->irq_postinstall(ring->dev);
2638                 }
2639
2640                 CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
2641
2642                 ring->waiting_seqno = seqno;
2643                 mtx_lock(&ring->irq_lock);
2644                 if (ring->irq_get(ring)) {
2645                         flags = dev_priv->mm.interruptible ? PCATCH : 0;
2646                         while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
2647                             && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
2648                             ret == 0) {
2649                                 ret = -msleep(ring, &ring->irq_lock, flags,
2650                                     "915gwr", 0);
2651                         }
2652                         ring->irq_put(ring);
2653                         mtx_unlock(&ring->irq_lock);
2654                 } else {
2655                         mtx_unlock(&ring->irq_lock);
2656                         if (_intel_wait_for(ring->dev,
2657                             i915_seqno_passed(ring->get_seqno(ring), seqno) ||
2658                             atomic_load_acq_int(&dev_priv->mm.wedged), 3000,
2659                             0, "i915wrq") != 0)
2660                                 ret = -EBUSY;
2661                 }
2662                 ring->waiting_seqno = 0;
2663
2664                 CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno,
2665                     ret);
2666         }
2667         if (atomic_load_acq_int(&dev_priv->mm.wedged))
2668                 ret = -EAGAIN;
2669
2670         /* Directly dispatch request retiring.  While we have the work queue
2671          * to handle this, the waiter on a request often wants an associated
2672          * buffer to have made it to the inactive list, and we would need
2673          * a separate wait queue to handle that.
2674          */
2675         if (ret == 0 && do_retire)
2676                 i915_gem_retire_requests_ring(ring);
2677
2678         return (ret);
2679 }
2680
2681 static u32
2682 i915_gem_get_seqno(struct drm_device *dev)
2683 {
2684         drm_i915_private_t *dev_priv = dev->dev_private;
2685         u32 seqno = dev_priv->next_seqno;
2686
2687         /* reserve 0 for non-seqno */
2688         if (++dev_priv->next_seqno == 0)
2689                 dev_priv->next_seqno = 1;
2690
2691         return seqno;
2692 }
2693
2694 u32
2695 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
2696 {
2697         if (ring->outstanding_lazy_request == 0)
2698                 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
2699
2700         return ring->outstanding_lazy_request;
2701 }
2702
2703 int
2704 i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
2705      struct drm_i915_gem_request *request)
2706 {
2707         drm_i915_private_t *dev_priv;
2708         struct drm_i915_file_private *file_priv;
2709         uint32_t seqno;
2710         u32 request_ring_position;
2711         int was_empty;
2712         int ret;
2713
2714         KASSERT(request != NULL, ("NULL request in add"));
2715         DRM_LOCK_ASSERT(ring->dev);
2716         dev_priv = ring->dev->dev_private;
2717
2718         seqno = i915_gem_next_request_seqno(ring);
2719         request_ring_position = intel_ring_get_tail(ring);
2720
2721         ret = ring->add_request(ring, &seqno);
2722         if (ret != 0)
2723             return ret;
2724
2725         CTR2(KTR_DRM, "request_add %s %d", ring->name, seqno);
2726
2727         request->seqno = seqno;
2728         request->ring = ring;
2729         request->tail = request_ring_position;
2730         request->emitted_jiffies = ticks;
2731         was_empty = list_empty(&ring->request_list);
2732         list_add_tail(&request->list, &ring->request_list);
2733
2734         if (file != NULL) {
2735                 file_priv = file->driver_priv;
2736
2737                 mtx_lock(&file_priv->mm.lck);
2738                 request->file_priv = file_priv;
2739                 list_add_tail(&request->client_list,
2740                     &file_priv->mm.request_list);
2741                 mtx_unlock(&file_priv->mm.lck);
2742         }
2743
2744         ring->outstanding_lazy_request = 0;
2745
2746         if (!dev_priv->mm.suspended) {
2747                 if (i915_enable_hangcheck) {
2748                         callout_schedule(&dev_priv->hangcheck_timer,
2749                             DRM_I915_HANGCHECK_PERIOD);
2750                 }
2751                 if (was_empty)
2752                         taskqueue_enqueue_timeout(dev_priv->tq,
2753                             &dev_priv->mm.retire_task, hz);
2754         }
2755         return (0);
2756 }
2757
2758 static inline void
2759 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2760 {
2761         struct drm_i915_file_private *file_priv = request->file_priv;
2762
2763         if (!file_priv)
2764                 return;
2765
2766         DRM_LOCK_ASSERT(request->ring->dev);
2767
2768         mtx_lock(&file_priv->mm.lck);
2769         if (request->file_priv != NULL) {
2770                 list_del(&request->client_list);
2771                 request->file_priv = NULL;
2772         }
2773         mtx_unlock(&file_priv->mm.lck);
2774 }
2775
2776 void
2777 i915_gem_release(struct drm_device *dev, struct drm_file *file)
2778 {
2779         struct drm_i915_file_private *file_priv;
2780         struct drm_i915_gem_request *request;
2781
2782         file_priv = file->driver_priv;
2783
2784         /* Clean up our request list when the client is going away, so that
2785          * later retire_requests won't dereference our soon-to-be-gone
2786          * file_priv.
2787          */
2788         mtx_lock(&file_priv->mm.lck);
2789         while (!list_empty(&file_priv->mm.request_list)) {
2790                 request = list_first_entry(&file_priv->mm.request_list,
2791                                            struct drm_i915_gem_request,
2792                                            client_list);
2793                 list_del(&request->client_list);
2794                 request->file_priv = NULL;
2795         }
2796         mtx_unlock(&file_priv->mm.lck);
2797 }
2798
2799 static void
2800 i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2801     struct intel_ring_buffer *ring)
2802 {
2803
2804         if (ring->dev != NULL)
2805                 DRM_LOCK_ASSERT(ring->dev);
2806
2807         while (!list_empty(&ring->request_list)) {
2808                 struct drm_i915_gem_request *request;
2809
2810                 request = list_first_entry(&ring->request_list,
2811                     struct drm_i915_gem_request, list);
2812
2813                 list_del(&request->list);
2814                 i915_gem_request_remove_from_client(request);
2815                 free(request, DRM_I915_GEM);
2816         }
2817
2818         while (!list_empty(&ring->active_list)) {
2819                 struct drm_i915_gem_object *obj;
2820
2821                 obj = list_first_entry(&ring->active_list,
2822                     struct drm_i915_gem_object, ring_list);
2823
2824                 obj->base.write_domain = 0;
2825                 list_del_init(&obj->gpu_write_list);
2826                 i915_gem_object_move_to_inactive(obj);
2827         }
2828 }
2829
2830 static void
2831 i915_gem_reset_fences(struct drm_device *dev)
2832 {
2833         struct drm_i915_private *dev_priv = dev->dev_private;
2834         int i;
2835
2836         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2837                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2838                 struct drm_i915_gem_object *obj = reg->obj;
2839
2840                 if (!obj)
2841                         continue;
2842
2843                 if (obj->tiling_mode)
2844                         i915_gem_release_mmap(obj);
2845
2846                 reg->obj->fence_reg = I915_FENCE_REG_NONE;
2847                 reg->obj->fenced_gpu_access = false;
2848                 reg->obj->last_fenced_seqno = 0;
2849                 reg->obj->last_fenced_ring = NULL;
2850                 i915_gem_clear_fence_reg(dev, reg);
2851         }
2852 }
2853
2854 void
2855 i915_gem_reset(struct drm_device *dev)
2856 {
2857         struct drm_i915_private *dev_priv = dev->dev_private;
2858         struct drm_i915_gem_object *obj;
2859         int i;
2860
2861         for (i = 0; i < I915_NUM_RINGS; i++)
2862                 i915_gem_reset_ring_lists(dev_priv, &dev_priv->rings[i]);
2863
2864         /* Remove anything from the flushing lists. The GPU cache is likely
2865          * to be lost on reset along with the data, so simply move the
2866          * lost bo to the inactive list.
2867          */
2868         while (!list_empty(&dev_priv->mm.flushing_list)) {
2869                 obj = list_first_entry(&dev_priv->mm.flushing_list,
2870                                       struct drm_i915_gem_object,
2871                                       mm_list);
2872
2873                 obj->base.write_domain = 0;
2874                 list_del_init(&obj->gpu_write_list);
2875                 i915_gem_object_move_to_inactive(obj);
2876         }
2877
2878         /* Move everything out of the GPU domains to ensure we do any
2879          * necessary invalidation upon reuse.
2880          */
2881         list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
2882                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2883         }
2884
2885         /* The fence registers are invalidated so clear them out */
2886         i915_gem_reset_fences(dev);
2887 }
2888
2889 /**
2890  * This function clears the request list as sequence numbers are passed.
2891  */
2892 void
2893 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2894 {
2895         uint32_t seqno;
2896         int i;
2897
2898         if (list_empty(&ring->request_list))
2899                 return;
2900
2901         seqno = ring->get_seqno(ring);
2902         CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
2903
2904         for (i = 0; i < DRM_ARRAY_SIZE(ring->sync_seqno); i++)
2905                 if (seqno >= ring->sync_seqno[i])
2906                         ring->sync_seqno[i] = 0;
2907
2908         while (!list_empty(&ring->request_list)) {
2909                 struct drm_i915_gem_request *request;
2910
2911                 request = list_first_entry(&ring->request_list,
2912                                            struct drm_i915_gem_request,
2913                                            list);
2914
2915                 if (!i915_seqno_passed(seqno, request->seqno))
2916                         break;
2917
2918                 CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
2919                     ring->name, seqno);
2920                 ring->last_retired_head = request->tail;
2921
2922                 list_del(&request->list);
2923                 i915_gem_request_remove_from_client(request);
2924                 free(request, DRM_I915_GEM);
2925         }
2926
2927         /* Move any buffers on the active list that are no longer referenced
2928          * by the ringbuffer to the flushing/inactive lists as appropriate.
2929          */
2930         while (!list_empty(&ring->active_list)) {
2931                 struct drm_i915_gem_object *obj;
2932
2933                 obj = list_first_entry(&ring->active_list,
2934                                       struct drm_i915_gem_object,
2935                                       ring_list);
2936
2937                 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
2938                         break;
2939
2940                 if (obj->base.write_domain != 0)
2941                         i915_gem_object_move_to_flushing(obj);
2942                 else
2943                         i915_gem_object_move_to_inactive(obj);
2944         }
2945
2946         if (ring->trace_irq_seqno &&
2947             i915_seqno_passed(seqno, ring->trace_irq_seqno)) {
2948                 mtx_lock(&ring->irq_lock);
2949                 ring->irq_put(ring);
2950                 mtx_unlock(&ring->irq_lock);
2951                 ring->trace_irq_seqno = 0;
2952         }
2953 }
2954
2955 void
2956 i915_gem_retire_requests(struct drm_device *dev)
2957 {
2958         drm_i915_private_t *dev_priv = dev->dev_private;
2959         struct drm_i915_gem_object *obj, *next;
2960         int i;
2961
2962         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
2963                 list_for_each_entry_safe(obj, next,
2964                     &dev_priv->mm.deferred_free_list, mm_list)
2965                         i915_gem_free_object_tail(obj);
2966         }
2967
2968         for (i = 0; i < I915_NUM_RINGS; i++)
2969                 i915_gem_retire_requests_ring(&dev_priv->rings[i]);
2970 }
2971
2972 static int
2973 sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2974     struct intel_ring_buffer *pipelined)
2975 {
2976         struct drm_device *dev = obj->base.dev;
2977         drm_i915_private_t *dev_priv = dev->dev_private;
2978         u32 size = obj->gtt_space->size;
2979         int regnum = obj->fence_reg;
2980         uint64_t val;
2981
2982         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2983                          0xfffff000) << 32;
2984         val |= obj->gtt_offset & 0xfffff000;
2985         val |= (uint64_t)((obj->stride / 128) - 1) <<
2986                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2987
2988         if (obj->tiling_mode == I915_TILING_Y)
2989                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2990         val |= I965_FENCE_REG_VALID;
2991
2992         if (pipelined) {
2993                 int ret = intel_ring_begin(pipelined, 6);
2994                 if (ret)
2995                         return ret;
2996
2997                 intel_ring_emit(pipelined, MI_NOOP);
2998                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2999                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
3000                 intel_ring_emit(pipelined, (u32)val);
3001                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
3002                 intel_ring_emit(pipelined, (u32)(val >> 32));
3003                 intel_ring_advance(pipelined);
3004         } else
3005                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
3006
3007         return 0;
3008 }
3009
3010 static int
3011 i965_write_fence_reg(struct drm_i915_gem_object *obj,
3012     struct intel_ring_buffer *pipelined)
3013 {
3014         struct drm_device *dev = obj->base.dev;
3015         drm_i915_private_t *dev_priv = dev->dev_private;
3016         u32 size = obj->gtt_space->size;
3017         int regnum = obj->fence_reg;
3018         uint64_t val;
3019
3020         val = (uint64_t)((obj->gtt_offset + size - 4096) &
3021                     0xfffff000) << 32;
3022         val |= obj->gtt_offset & 0xfffff000;
3023         val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
3024         if (obj->tiling_mode == I915_TILING_Y)
3025                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3026         val |= I965_FENCE_REG_VALID;
3027
3028         if (pipelined) {
3029                 int ret = intel_ring_begin(pipelined, 6);
3030                 if (ret)
3031                         return ret;
3032
3033                 intel_ring_emit(pipelined, MI_NOOP);
3034                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
3035                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
3036                 intel_ring_emit(pipelined, (u32)val);
3037                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
3038                 intel_ring_emit(pipelined, (u32)(val >> 32));
3039                 intel_ring_advance(pipelined);
3040         } else
3041                 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
3042
3043         return 0;
3044 }
3045
3046 static int
3047 i915_write_fence_reg(struct drm_i915_gem_object *obj,
3048     struct intel_ring_buffer *pipelined)
3049 {
3050         struct drm_device *dev = obj->base.dev;
3051         drm_i915_private_t *dev_priv = dev->dev_private;
3052         u32 size = obj->gtt_space->size;
3053         u32 fence_reg, val, pitch_val;
3054         int tile_width;
3055
3056         if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
3057             (size & -size) != size || (obj->gtt_offset & (size - 1))) {
3058                 printf(
3059 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3060                  obj->gtt_offset, obj->map_and_fenceable, size);
3061                 return -EINVAL;
3062         }
3063
3064         if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3065                 tile_width = 128;
3066         else
3067                 tile_width = 512;
3068
3069         /* Note: pitch better be a power of two tile widths */
3070         pitch_val = obj->stride / tile_width;
3071         pitch_val = ffs(pitch_val) - 1;
3072
3073         val = obj->gtt_offset;
3074         if (obj->tiling_mode == I915_TILING_Y)
3075                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3076         val |= I915_FENCE_SIZE_BITS(size);
3077         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3078         val |= I830_FENCE_REG_VALID;
3079
3080         fence_reg = obj->fence_reg;
3081         if (fence_reg < 8)
3082                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
3083         else
3084                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
3085
3086         if (pipelined) {
3087                 int ret = intel_ring_begin(pipelined, 4);
3088                 if (ret)
3089                         return ret;
3090
3091                 intel_ring_emit(pipelined, MI_NOOP);
3092                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
3093                 intel_ring_emit(pipelined, fence_reg);
3094                 intel_ring_emit(pipelined, val);
3095                 intel_ring_advance(pipelined);
3096         } else
3097                 I915_WRITE(fence_reg, val);
3098
3099         return 0;
3100 }
3101
3102 static int
3103 i830_write_fence_reg(struct drm_i915_gem_object *obj,
3104     struct intel_ring_buffer *pipelined)
3105 {
3106         struct drm_device *dev = obj->base.dev;
3107         drm_i915_private_t *dev_priv = dev->dev_private;
3108         u32 size = obj->gtt_space->size;
3109         int regnum = obj->fence_reg;
3110         uint32_t val;
3111         uint32_t pitch_val;
3112
3113         if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
3114             (size & -size) != size || (obj->gtt_offset & (size - 1))) {
3115                 printf(
3116 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
3117                     obj->gtt_offset, size);
3118                 return -EINVAL;
3119         }
3120
3121         pitch_val = obj->stride / 128;
3122         pitch_val = ffs(pitch_val) - 1;
3123
3124         val = obj->gtt_offset;
3125         if (obj->tiling_mode == I915_TILING_Y)
3126                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3127         val |= I830_FENCE_SIZE_BITS(size);
3128         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3129         val |= I830_FENCE_REG_VALID;
3130
3131         if (pipelined) {
3132                 int ret = intel_ring_begin(pipelined, 4);
3133                 if (ret)
3134                         return ret;
3135
3136                 intel_ring_emit(pipelined, MI_NOOP);
3137                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
3138                 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
3139                 intel_ring_emit(pipelined, val);
3140                 intel_ring_advance(pipelined);
3141         } else
3142                 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
3143
3144         return 0;
3145 }
3146
3147 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
3148 {
3149         return i915_seqno_passed(ring->get_seqno(ring), seqno);
3150 }
3151
3152 static int
3153 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
3154     struct intel_ring_buffer *pipelined)
3155 {
3156         int ret;
3157
3158         if (obj->fenced_gpu_access) {
3159                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3160                         ret = i915_gem_flush_ring(obj->last_fenced_ring, 0,
3161                             obj->base.write_domain);
3162                         if (ret)
3163                                 return ret;
3164                 }
3165
3166                 obj->fenced_gpu_access = false;
3167         }
3168
3169         if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
3170                 if (!ring_passed_seqno(obj->last_fenced_ring,
3171                                        obj->last_fenced_seqno)) {
3172                         ret = i915_wait_request(obj->last_fenced_ring,
3173                                                 obj->last_fenced_seqno,
3174                                                 true);
3175                         if (ret)
3176                                 return ret;
3177                 }
3178
3179                 obj->last_fenced_seqno = 0;
3180                 obj->last_fenced_ring = NULL;
3181         }
3182
3183         /* Ensure that all CPU reads are completed before installing a fence
3184          * and all writes before removing the fence.
3185          */
3186         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
3187                 mb();
3188
3189         return 0;
3190 }
3191
3192 int
3193 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3194 {
3195         int ret;
3196
3197         if (obj->tiling_mode)
3198                 i915_gem_release_mmap(obj);
3199
3200         ret = i915_gem_object_flush_fence(obj, NULL);
3201         if (ret)
3202                 return ret;
3203
3204         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3205                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3206
3207                 if (dev_priv->fence_regs[obj->fence_reg].pin_count != 0)
3208                         printf("%s: pin_count %d\n", __func__,
3209                             dev_priv->fence_regs[obj->fence_reg].pin_count);
3210                 i915_gem_clear_fence_reg(obj->base.dev,
3211                                          &dev_priv->fence_regs[obj->fence_reg]);
3212
3213                 obj->fence_reg = I915_FENCE_REG_NONE;
3214         }
3215
3216         return 0;
3217 }
3218
3219 static struct drm_i915_fence_reg *
3220 i915_find_fence_reg(struct drm_device *dev, struct intel_ring_buffer *pipelined)
3221 {
3222         struct drm_i915_private *dev_priv = dev->dev_private;
3223         struct drm_i915_fence_reg *reg, *first, *avail;
3224         int i;
3225
3226         /* First try to find a free reg */
3227         avail = NULL;
3228         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3229                 reg = &dev_priv->fence_regs[i];
3230                 if (!reg->obj)
3231                         return reg;
3232
3233                 if (!reg->pin_count)
3234                         avail = reg;
3235         }
3236
3237         if (avail == NULL)
3238                 return NULL;
3239
3240         /* None available, try to steal one or wait for a user to finish */
3241         avail = first = NULL;
3242         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3243                 if (reg->pin_count)
3244                         continue;
3245
3246                 if (first == NULL)
3247                         first = reg;
3248
3249                 if (!pipelined ||
3250                     !reg->obj->last_fenced_ring ||
3251                     reg->obj->last_fenced_ring == pipelined) {
3252                         avail = reg;
3253                         break;
3254                 }
3255         }
3256
3257         if (avail == NULL)
3258                 avail = first;
3259
3260         return avail;
3261 }
3262
3263 int
3264 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
3265     struct intel_ring_buffer *pipelined)
3266 {
3267         struct drm_device *dev = obj->base.dev;
3268         struct drm_i915_private *dev_priv = dev->dev_private;
3269         struct drm_i915_fence_reg *reg;
3270         int ret;
3271
3272         pipelined = NULL;
3273         ret = 0;
3274
3275         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3276                 reg = &dev_priv->fence_regs[obj->fence_reg];
3277                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
3278
3279                 if (obj->tiling_changed) {
3280                         ret = i915_gem_object_flush_fence(obj, pipelined);
3281                         if (ret)
3282                                 return ret;
3283
3284                         if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
3285                                 pipelined = NULL;
3286
3287                         if (pipelined) {
3288                                 reg->setup_seqno =
3289                                         i915_gem_next_request_seqno(pipelined);
3290                                 obj->last_fenced_seqno = reg->setup_seqno;
3291                                 obj->last_fenced_ring = pipelined;
3292                         }
3293
3294                         goto update;
3295                 }
3296
3297                 if (!pipelined) {
3298                         if (reg->setup_seqno) {
3299                                 if (!ring_passed_seqno(obj->last_fenced_ring,
3300                                     reg->setup_seqno)) {
3301                                         ret = i915_wait_request(
3302                                             obj->last_fenced_ring,
3303                                             reg->setup_seqno,
3304                                             true);
3305                                         if (ret)
3306                                                 return ret;
3307                                 }
3308
3309                                 reg->setup_seqno = 0;
3310                         }
3311                 } else if (obj->last_fenced_ring &&
3312                            obj->last_fenced_ring != pipelined) {
3313                         ret = i915_gem_object_flush_fence(obj, pipelined);
3314                         if (ret)
3315                                 return ret;
3316                 }
3317
3318                 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
3319                         pipelined = NULL;
3320                 KASSERT(pipelined || reg->setup_seqno == 0, ("!pipelined"));
3321
3322                 if (obj->tiling_changed) {
3323                         if (pipelined) {
3324                                 reg->setup_seqno =
3325                                         i915_gem_next_request_seqno(pipelined);
3326                                 obj->last_fenced_seqno = reg->setup_seqno;
3327                                 obj->last_fenced_ring = pipelined;
3328                         }
3329                         goto update;
3330                 }
3331
3332                 return 0;
3333         }
3334
3335         reg = i915_find_fence_reg(dev, pipelined);
3336         if (reg == NULL)
3337                 return -EDEADLK;
3338
3339         ret = i915_gem_object_flush_fence(obj, pipelined);
3340         if (ret)
3341                 return ret;
3342
3343         if (reg->obj) {
3344                 struct drm_i915_gem_object *old = reg->obj;
3345
3346                 drm_gem_object_reference(&old->base);
3347
3348                 if (old->tiling_mode)
3349                         i915_gem_release_mmap(old);
3350
3351                 ret = i915_gem_object_flush_fence(old, pipelined);
3352                 if (ret) {
3353                         drm_gem_object_unreference(&old->base);
3354                         return ret;
3355                 }
3356
3357                 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
3358                         pipelined = NULL;
3359
3360                 old->fence_reg = I915_FENCE_REG_NONE;
3361                 old->last_fenced_ring = pipelined;
3362                 old->last_fenced_seqno =
3363                         pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
3364
3365                 drm_gem_object_unreference(&old->base);
3366         } else if (obj->last_fenced_seqno == 0)
3367                 pipelined = NULL;
3368
3369         reg->obj = obj;
3370         list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
3371         obj->fence_reg = reg - dev_priv->fence_regs;
3372         obj->last_fenced_ring = pipelined;
3373
3374         reg->setup_seqno =
3375                 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
3376         obj->last_fenced_seqno = reg->setup_seqno;
3377
3378 update:
3379         obj->tiling_changed = false;
3380         switch (INTEL_INFO(dev)->gen) {
3381         case 7:
3382         case 6:
3383                 ret = sandybridge_write_fence_reg(obj, pipelined);
3384                 break;
3385         case 5:
3386         case 4:
3387                 ret = i965_write_fence_reg(obj, pipelined);
3388                 break;
3389         case 3:
3390                 ret = i915_write_fence_reg(obj, pipelined);
3391                 break;
3392         case 2:
3393                 ret = i830_write_fence_reg(obj, pipelined);
3394                 break;
3395         }
3396
3397         return ret;
3398 }
3399
3400 static void
3401 i915_gem_clear_fence_reg(struct drm_device *dev, struct drm_i915_fence_reg *reg)
3402 {
3403         drm_i915_private_t *dev_priv = dev->dev_private;
3404         uint32_t fence_reg = reg - dev_priv->fence_regs;
3405
3406         switch (INTEL_INFO(dev)->gen) {
3407         case 7:
3408         case 6:
3409                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
3410                 break;
3411         case 5:
3412         case 4:
3413                 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
3414                 break;
3415         case 3:
3416                 if (fence_reg >= 8)
3417                         fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
3418                 else
3419         case 2:
3420                         fence_reg = FENCE_REG_830_0 + fence_reg * 4;
3421
3422                 I915_WRITE(fence_reg, 0);
3423                 break;
3424         }
3425
3426         list_del_init(&reg->lru_list);
3427         reg->obj = NULL;
3428         reg->setup_seqno = 0;
3429         reg->pin_count = 0;
3430 }
3431
3432 int
3433 i915_gem_init_object(struct drm_gem_object *obj)
3434 {
3435
3436         printf("i915_gem_init_object called\n");
3437         return (0);
3438 }
3439
3440 static bool
3441 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
3442 {
3443
3444         return (obj->gtt_space && !obj->active && obj->pin_count == 0);
3445 }
3446
3447 static void
3448 i915_gem_retire_task_handler(void *arg, int pending)
3449 {
3450         drm_i915_private_t *dev_priv;
3451         struct drm_device *dev;
3452         bool idle;
3453         int i;
3454
3455         dev_priv = arg;
3456         dev = dev_priv->dev;
3457
3458         /* Come back later if the device is busy... */
3459         if (!sx_try_xlock(&dev->dev_struct_lock)) {
3460                 taskqueue_enqueue_timeout(dev_priv->tq,
3461                     &dev_priv->mm.retire_task, hz);
3462                 return;
3463         }
3464
3465         CTR0(KTR_DRM, "retire_task");
3466
3467         i915_gem_retire_requests(dev);
3468
3469         /* Send a periodic flush down the ring so we don't hold onto GEM
3470          * objects indefinitely.
3471          */
3472         idle = true;
3473         for (i = 0; i < I915_NUM_RINGS; i++) {
3474                 struct intel_ring_buffer *ring = &dev_priv->rings[i];
3475
3476                 if (!list_empty(&ring->gpu_write_list)) {
3477                         struct drm_i915_gem_request *request;
3478                         int ret;
3479
3480                         ret = i915_gem_flush_ring(ring,
3481                                                   0, I915_GEM_GPU_DOMAINS);
3482                         request = malloc(sizeof(*request), DRM_I915_GEM,
3483                             M_WAITOK | M_ZERO);
3484                         if (ret || request == NULL ||
3485                             i915_add_request(ring, NULL, request))
3486                                 free(request, DRM_I915_GEM);
3487                 }
3488
3489                 idle &= list_empty(&ring->request_list);
3490         }
3491
3492         if (!dev_priv->mm.suspended && !idle)
3493                 taskqueue_enqueue_timeout(dev_priv->tq,
3494                     &dev_priv->mm.retire_task, hz);
3495
3496         DRM_UNLOCK(dev);
3497 }
3498
3499 void
3500 i915_gem_lastclose(struct drm_device *dev)
3501 {
3502         int ret;
3503
3504         if (drm_core_check_feature(dev, DRIVER_MODESET))
3505                 return;
3506
3507         ret = i915_gem_idle(dev);
3508         if (ret != 0)
3509                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3510 }
3511
3512 static int
3513 i915_gem_init_phys_object(struct drm_device *dev, int id, int size, int align)
3514 {
3515         drm_i915_private_t *dev_priv;
3516         struct drm_i915_gem_phys_object *phys_obj;
3517         int ret;
3518
3519         dev_priv = dev->dev_private;
3520         if (dev_priv->mm.phys_objs[id - 1] != NULL || size == 0)
3521                 return (0);
3522
3523         phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object), DRM_I915_GEM,
3524             M_WAITOK | M_ZERO);
3525
3526         phys_obj->id = id;
3527
3528         phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
3529         if (phys_obj->handle == NULL) {
3530                 ret = -ENOMEM;
3531                 goto free_obj;
3532         }
3533         pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
3534             size / PAGE_SIZE, PAT_WRITE_COMBINING);
3535
3536         dev_priv->mm.phys_objs[id - 1] = phys_obj;
3537
3538         return (0);
3539
3540 free_obj:
3541         free(phys_obj, DRM_I915_GEM);
3542         return (ret);
3543 }
3544
3545 static void
3546 i915_gem_free_phys_object(struct drm_device *dev, int id)
3547 {
3548         drm_i915_private_t *dev_priv;
3549         struct drm_i915_gem_phys_object *phys_obj;
3550
3551         dev_priv = dev->dev_private;
3552         if (dev_priv->mm.phys_objs[id - 1] == NULL)
3553                 return;
3554
3555         phys_obj = dev_priv->mm.phys_objs[id - 1];
3556         if (phys_obj->cur_obj != NULL)
3557                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3558
3559         drm_pci_free(dev, phys_obj->handle);
3560         free(phys_obj, DRM_I915_GEM);
3561         dev_priv->mm.phys_objs[id - 1] = NULL;
3562 }
3563
3564 void
3565 i915_gem_free_all_phys_object(struct drm_device *dev)
3566 {
3567         int i;
3568
3569         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3570                 i915_gem_free_phys_object(dev, i);
3571 }
3572
3573 void
3574 i915_gem_detach_phys_object(struct drm_device *dev,
3575     struct drm_i915_gem_object *obj)
3576 {
3577         vm_page_t m;
3578         struct sf_buf *sf;
3579         char *vaddr, *dst;
3580         int i, page_count;
3581
3582         if (obj->phys_obj == NULL)
3583                 return;
3584         vaddr = obj->phys_obj->handle->vaddr;
3585
3586         page_count = obj->base.size / PAGE_SIZE;
3587         VM_OBJECT_WLOCK(obj->base.vm_obj);
3588         for (i = 0; i < page_count; i++) {
3589                 m = i915_gem_wire_page(obj->base.vm_obj, i);
3590                 if (m == NULL)
3591                         continue; /* XXX */
3592
3593                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3594                 sf = sf_buf_alloc(m, 0);
3595                 if (sf != NULL) {
3596                         dst = (char *)sf_buf_kva(sf);
3597                         memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
3598                         sf_buf_free(sf);
3599                 }
3600                 drm_clflush_pages(&m, 1);
3601
3602                 VM_OBJECT_WLOCK(obj->base.vm_obj);
3603                 vm_page_reference(m);
3604                 vm_page_lock(m);
3605                 vm_page_dirty(m);
3606                 vm_page_unwire(m, 0);
3607                 vm_page_unlock(m);
3608                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
3609         }
3610         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3611         intel_gtt_chipset_flush();
3612
3613         obj->phys_obj->cur_obj = NULL;
3614         obj->phys_obj = NULL;
3615 }
3616
3617 int
3618 i915_gem_attach_phys_object(struct drm_device *dev,
3619     struct drm_i915_gem_object *obj, int id, int align)
3620 {
3621         drm_i915_private_t *dev_priv;
3622         vm_page_t m;
3623         struct sf_buf *sf;
3624         char *dst, *src;
3625         int i, page_count, ret;
3626
3627         if (id > I915_MAX_PHYS_OBJECT)
3628                 return (-EINVAL);
3629
3630         if (obj->phys_obj != NULL) {
3631                 if (obj->phys_obj->id == id)
3632                         return (0);
3633                 i915_gem_detach_phys_object(dev, obj);
3634         }
3635
3636         dev_priv = dev->dev_private;
3637         if (dev_priv->mm.phys_objs[id - 1] == NULL) {
3638                 ret = i915_gem_init_phys_object(dev, id, obj->base.size, align);
3639                 if (ret != 0) {
3640                         DRM_ERROR("failed to init phys object %d size: %zu\n",
3641                                   id, obj->base.size);
3642                         return (ret);
3643                 }
3644         }
3645
3646         /* bind to the object */
3647         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3648         obj->phys_obj->cur_obj = obj;
3649
3650         page_count = obj->base.size / PAGE_SIZE;
3651
3652         VM_OBJECT_WLOCK(obj->base.vm_obj);
3653         ret = 0;
3654         for (i = 0; i < page_count; i++) {
3655                 m = i915_gem_wire_page(obj->base.vm_obj, i);
3656                 if (m == NULL) {
3657                         ret = -EIO;
3658                         break;
3659                 }
3660                 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3661                 sf = sf_buf_alloc(m, 0);
3662                 src = (char *)sf_buf_kva(sf);
3663                 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
3664                 memcpy(dst, src, PAGE_SIZE);
3665                 sf_buf_free(sf);
3666
3667                 VM_OBJECT_WLOCK(obj->base.vm_obj);
3668
3669                 vm_page_reference(m);
3670                 vm_page_lock(m);
3671                 vm_page_unwire(m, 0);
3672                 vm_page_unlock(m);
3673                 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
3674         }
3675         VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3676
3677         return (0);
3678 }
3679
3680 static int
3681 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj,
3682     uint64_t data_ptr, uint64_t offset, uint64_t size,
3683     struct drm_file *file_priv)
3684 {
3685         char *user_data, *vaddr;
3686         int ret;
3687
3688         vaddr = (char *)obj->phys_obj->handle->vaddr + offset;
3689         user_data = (char *)(uintptr_t)data_ptr;
3690
3691         if (copyin_nofault(user_data, vaddr, size) != 0) {
3692                 /* The physical object once assigned is fixed for the lifetime
3693                  * of the obj, so we can safely drop the lock and continue
3694                  * to access vaddr.
3695                  */
3696                 DRM_UNLOCK(dev);
3697                 ret = -copyin(user_data, vaddr, size);
3698                 DRM_LOCK(dev);
3699                 if (ret != 0)
3700                         return (ret);
3701         }
3702
3703         intel_gtt_chipset_flush();
3704         return (0);
3705 }
3706
3707 static int
3708 i915_gpu_is_active(struct drm_device *dev)
3709 {
3710         drm_i915_private_t *dev_priv;
3711
3712         dev_priv = dev->dev_private;
3713         return (!list_empty(&dev_priv->mm.flushing_list) ||
3714             !list_empty(&dev_priv->mm.active_list));
3715 }
3716
3717 static void
3718 i915_gem_lowmem(void *arg)
3719 {
3720         struct drm_device *dev;
3721         struct drm_i915_private *dev_priv;
3722         struct drm_i915_gem_object *obj, *next;
3723         int cnt, cnt_fail, cnt_total;
3724
3725         dev = arg;
3726         dev_priv = dev->dev_private;
3727
3728         if (!sx_try_xlock(&dev->dev_struct_lock))
3729                 return;
3730
3731         CTR0(KTR_DRM, "gem_lowmem");
3732
3733 rescan:
3734         /* first scan for clean buffers */
3735         i915_gem_retire_requests(dev);
3736
3737         cnt_total = cnt_fail = cnt = 0;
3738
3739         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
3740             mm_list) {
3741                 if (i915_gem_object_is_purgeable(obj)) {
3742                         if (i915_gem_object_unbind(obj) != 0)
3743                                 cnt_total++;
3744                 } else
3745                         cnt_total++;
3746         }
3747
3748         /* second pass, evict/count anything still on the inactive list */
3749         list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
3750             mm_list) {
3751                 if (i915_gem_object_unbind(obj) == 0)
3752                         cnt++;
3753                 else
3754                         cnt_fail++;
3755         }
3756
3757         if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
3758                 /*
3759                  * We are desperate for pages, so as a last resort, wait
3760                  * for the GPU to finish and discard whatever we can.
3761                  * This has a dramatic impact to reduce the number of
3762                  * OOM-killer events whilst running the GPU aggressively.
3763                  */
3764                 if (i915_gpu_idle(dev, true) == 0)
3765                         goto rescan;
3766         }
3767         DRM_UNLOCK(dev);
3768 }
3769
3770 void
3771 i915_gem_unload(struct drm_device *dev)
3772 {
3773         struct drm_i915_private *dev_priv;
3774
3775         dev_priv = dev->dev_private;
3776         EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
3777 }