2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
26 * Copyright (c) 2011 The FreeBSD Foundation
27 * All rights reserved.
29 * This software was developed by Konstantin Belousov under sponsorship from
30 * the FreeBSD Foundation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
57 #include <dev/drm2/drmP.h>
58 #include <dev/drm2/drm.h>
59 #include <dev/drm2/i915/i915_drm.h>
60 #include <dev/drm2/i915/i915_drv.h>
61 #include <dev/drm2/i915/intel_drv.h>
62 #include <dev/drm2/i915/intel_ringbuffer.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sched.h>
65 #include <sys/sf_buf.h>
68 #include <vm/vm_pageout.h>
70 static void i915_gem_object_flush_cpu_write_domain(
71 struct drm_i915_gem_object *obj);
72 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
74 static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
75 uint32_t size, int tiling_mode);
76 static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
77 unsigned alignment, bool map_and_fenceable);
78 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
80 static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj);
81 static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
83 static void i915_gem_object_set_to_full_cpu_read_domain(
84 struct drm_i915_gem_object *obj);
85 static int i915_gem_object_set_cpu_read_domain_range(
86 struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size);
87 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
88 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
89 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
90 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
91 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
92 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
93 static void i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
94 uint32_t flush_domains);
95 static void i915_gem_clear_fence_reg(struct drm_device *dev,
96 struct drm_i915_fence_reg *reg);
97 static void i915_gem_reset_fences(struct drm_device *dev);
98 static void i915_gem_retire_task_handler(void *arg, int pending);
99 static int i915_gem_phys_pwrite(struct drm_device *dev,
100 struct drm_i915_gem_object *obj, uint64_t data_ptr, uint64_t offset,
101 uint64_t size, struct drm_file *file_priv);
102 static void i915_gem_lowmem(void *arg);
104 MALLOC_DEFINE(DRM_I915_GEM, "i915gem", "Allocations from i915 gem");
105 long i915_gem_wired_pages_cnt;
108 i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size)
111 dev_priv->mm.object_count++;
112 dev_priv->mm.object_memory += size;
116 i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, size_t size)
119 dev_priv->mm.object_count--;
120 dev_priv->mm.object_memory -= size;
124 i915_gem_wait_for_error(struct drm_device *dev)
126 struct drm_i915_private *dev_priv;
129 dev_priv = dev->dev_private;
130 if (!atomic_load_acq_int(&dev_priv->mm.wedged))
133 mtx_lock(&dev_priv->error_completion_lock);
134 while (dev_priv->error_completion == 0) {
135 ret = -msleep(&dev_priv->error_completion,
136 &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
138 mtx_unlock(&dev_priv->error_completion_lock);
142 mtx_unlock(&dev_priv->error_completion_lock);
144 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
145 mtx_lock(&dev_priv->error_completion_lock);
146 dev_priv->error_completion++;
147 mtx_unlock(&dev_priv->error_completion_lock);
153 i915_mutex_lock_interruptible(struct drm_device *dev)
155 struct drm_i915_private *dev_priv;
158 dev_priv = dev->dev_private;
159 ret = i915_gem_wait_for_error(dev);
164 * interruptible shall it be. might indeed be if dev_lock is
167 ret = sx_xlock_sig(&dev->dev_struct_lock);
176 i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
178 struct drm_device *dev;
179 drm_i915_private_t *dev_priv;
183 dev_priv = dev->dev_private;
185 ret = i915_gem_object_unbind(obj);
186 if (ret == -ERESTART) {
187 list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list);
191 CTR1(KTR_DRM, "object_destroy_tail %p", obj);
192 drm_gem_free_mmap_offset(&obj->base);
193 drm_gem_object_release(&obj->base);
194 i915_gem_info_remove_obj(dev_priv, obj->base.size);
196 free(obj->page_cpu_valid, DRM_I915_GEM);
197 free(obj->bit_17, DRM_I915_GEM);
198 free(obj, DRM_I915_GEM);
202 i915_gem_free_object(struct drm_gem_object *gem_obj)
204 struct drm_i915_gem_object *obj;
205 struct drm_device *dev;
207 obj = to_intel_bo(gem_obj);
210 while (obj->pin_count > 0)
211 i915_gem_object_unpin(obj);
213 if (obj->phys_obj != NULL)
214 i915_gem_detach_phys_object(dev, obj);
216 i915_gem_free_object_tail(obj);
220 init_ring_lists(struct intel_ring_buffer *ring)
223 INIT_LIST_HEAD(&ring->active_list);
224 INIT_LIST_HEAD(&ring->request_list);
225 INIT_LIST_HEAD(&ring->gpu_write_list);
229 i915_gem_load(struct drm_device *dev)
231 drm_i915_private_t *dev_priv;
234 dev_priv = dev->dev_private;
236 INIT_LIST_HEAD(&dev_priv->mm.active_list);
237 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
238 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
239 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
240 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
241 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
242 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
243 for (i = 0; i < I915_NUM_RINGS; i++)
244 init_ring_lists(&dev_priv->rings[i]);
245 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
246 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
247 TIMEOUT_TASK_INIT(dev_priv->tq, &dev_priv->mm.retire_task, 0,
248 i915_gem_retire_task_handler, dev_priv);
249 dev_priv->error_completion = 0;
251 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
253 u32 tmp = I915_READ(MI_ARB_STATE);
254 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
256 * arb state is a masked write, so set bit +
259 tmp = MI_ARB_C3_LP_WRITE_ENABLE |
260 (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
261 I915_WRITE(MI_ARB_STATE, tmp);
265 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
267 /* Old X drivers will take 0-2 for front, back, depth buffers */
268 if (!drm_core_check_feature(dev, DRIVER_MODESET))
269 dev_priv->fence_reg_start = 3;
271 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) ||
273 dev_priv->num_fence_regs = 16;
275 dev_priv->num_fence_regs = 8;
277 /* Initialize fence registers to zero */
278 for (i = 0; i < dev_priv->num_fence_regs; i++) {
279 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
281 i915_gem_detect_bit_6_swizzle(dev);
282 dev_priv->mm.interruptible = true;
284 dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
285 i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
289 i915_gem_do_init(struct drm_device *dev, unsigned long start,
290 unsigned long mappable_end, unsigned long end)
292 drm_i915_private_t *dev_priv;
293 unsigned long mappable;
296 dev_priv = dev->dev_private;
297 mappable = min(end, mappable_end) - start;
299 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
301 dev_priv->mm.gtt_start = start;
302 dev_priv->mm.gtt_mappable_end = mappable_end;
303 dev_priv->mm.gtt_end = end;
304 dev_priv->mm.gtt_total = end - start;
305 dev_priv->mm.mappable_gtt_total = mappable;
307 /* Take over this portion of the GTT */
308 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
309 device_printf(dev->device,
310 "taking over the fictitious range 0x%lx-0x%lx\n",
311 dev->agp->base + start, dev->agp->base + start + mappable);
312 error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
313 dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
318 i915_gem_init_ioctl(struct drm_device *dev, void *data,
319 struct drm_file *file)
321 struct drm_i915_gem_init *args;
322 drm_i915_private_t *dev_priv;
324 dev_priv = dev->dev_private;
327 if (args->gtt_start >= args->gtt_end ||
328 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
331 if (mtx_initialized(&dev_priv->mm.gtt_space.unused_lock))
334 * XXXKIB. The second-time initialization should be guarded
337 return (i915_gem_do_init(dev, args->gtt_start, args->gtt_end,
342 i915_gem_idle(struct drm_device *dev)
344 drm_i915_private_t *dev_priv;
347 dev_priv = dev->dev_private;
348 if (dev_priv->mm.suspended)
351 ret = i915_gpu_idle(dev, true);
355 /* Under UMS, be paranoid and evict. */
356 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
357 ret = i915_gem_evict_inactive(dev, false);
362 i915_gem_reset_fences(dev);
364 /* Hack! Don't let anybody do execbuf while we don't control the chip.
365 * We need to replace this with a semaphore, or something.
366 * And not confound mm.suspended!
368 dev_priv->mm.suspended = 1;
369 callout_stop(&dev_priv->hangcheck_timer);
371 i915_kernel_lost_context(dev);
372 i915_gem_cleanup_ringbuffer(dev);
374 /* Cancel the retire work handler, which should be idle now. */
375 taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
380 i915_gem_init_swizzling(struct drm_device *dev)
382 drm_i915_private_t *dev_priv;
384 dev_priv = dev->dev_private;
386 if (INTEL_INFO(dev)->gen < 5 ||
387 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
390 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
391 DISP_TILE_SURFACE_SWIZZLING);
396 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
398 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
400 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
404 i915_gem_init_ppgtt(struct drm_device *dev)
406 drm_i915_private_t *dev_priv;
407 struct i915_hw_ppgtt *ppgtt;
408 uint32_t pd_offset, pd_entry;
410 struct intel_ring_buffer *ring;
411 u_int first_pd_entry_in_global_pt, i;
413 dev_priv = dev->dev_private;
414 ppgtt = dev_priv->mm.aliasing_ppgtt;
418 first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
419 for (i = 0; i < ppgtt->num_pd_entries; i++) {
420 pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
421 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
422 pd_entry |= GEN6_PDE_VALID;
423 intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
425 intel_gtt_read_pte(first_pd_entry_in_global_pt);
427 pd_offset = ppgtt->pd_offset;
428 pd_offset /= 64; /* in cachelines, */
431 if (INTEL_INFO(dev)->gen == 6) {
432 uint32_t ecochk = I915_READ(GAM_ECOCHK);
433 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
434 ECOCHK_PPGTT_CACHE64B);
435 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
436 } else if (INTEL_INFO(dev)->gen >= 7) {
437 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
438 /* GFX_MODE is per-ring on gen7+ */
441 for (i = 0; i < I915_NUM_RINGS; i++) {
442 ring = &dev_priv->rings[i];
444 if (INTEL_INFO(dev)->gen >= 7)
445 I915_WRITE(RING_MODE_GEN7(ring),
446 GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
448 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
449 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
454 i915_gem_init_hw(struct drm_device *dev)
456 drm_i915_private_t *dev_priv;
459 dev_priv = dev->dev_private;
461 i915_gem_init_swizzling(dev);
463 ret = intel_init_render_ring_buffer(dev);
468 ret = intel_init_bsd_ring_buffer(dev);
470 goto cleanup_render_ring;
474 ret = intel_init_blt_ring_buffer(dev);
476 goto cleanup_bsd_ring;
479 dev_priv->next_seqno = 1;
480 i915_gem_init_ppgtt(dev);
484 intel_cleanup_ring_buffer(&dev_priv->rings[VCS]);
486 intel_cleanup_ring_buffer(&dev_priv->rings[RCS]);
491 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
492 struct drm_file *file)
494 struct drm_i915_private *dev_priv;
495 struct drm_i915_gem_get_aperture *args;
496 struct drm_i915_gem_object *obj;
499 dev_priv = dev->dev_private;
502 if (!(dev->driver->driver_features & DRIVER_GEM))
507 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
508 pinned += obj->gtt_space->size;
511 args->aper_size = dev_priv->mm.gtt_total;
512 args->aper_available_size = args->aper_size - pinned;
518 i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
519 bool map_and_fenceable)
521 struct drm_device *dev;
522 struct drm_i915_private *dev_priv;
526 dev_priv = dev->dev_private;
528 KASSERT(obj->pin_count != DRM_I915_GEM_OBJECT_MAX_PIN_COUNT,
531 if (obj->gtt_space != NULL) {
532 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
533 (map_and_fenceable && !obj->map_and_fenceable)) {
534 DRM_DEBUG("bo is already pinned with incorrect alignment:"
535 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
536 " obj->map_and_fenceable=%d\n",
537 obj->gtt_offset, alignment,
539 obj->map_and_fenceable);
540 ret = i915_gem_object_unbind(obj);
546 if (obj->gtt_space == NULL) {
547 ret = i915_gem_object_bind_to_gtt(obj, alignment,
553 if (obj->pin_count++ == 0 && !obj->active)
554 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
555 obj->pin_mappable |= map_and_fenceable;
560 WARN_ON(i915_verify_lists(dev));
566 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
568 struct drm_device *dev;
569 drm_i915_private_t *dev_priv;
572 dev_priv = dev->dev_private;
577 WARN_ON(i915_verify_lists(dev));
580 KASSERT(obj->pin_count != 0, ("zero pin count"));
581 KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
583 if (--obj->pin_count == 0) {
585 list_move_tail(&obj->mm_list,
586 &dev_priv->mm.inactive_list);
587 obj->pin_mappable = false;
592 WARN_ON(i915_verify_lists(dev));
597 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
598 struct drm_file *file)
600 struct drm_i915_gem_pin *args;
601 struct drm_i915_gem_object *obj;
602 struct drm_gem_object *gobj;
607 ret = i915_mutex_lock_interruptible(dev);
611 gobj = drm_gem_object_lookup(dev, file, args->handle);
616 obj = to_intel_bo(gobj);
618 if (obj->madv != I915_MADV_WILLNEED) {
619 DRM_ERROR("Attempting to pin a purgeable buffer\n");
624 if (obj->pin_filp != NULL && obj->pin_filp != file) {
625 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
631 obj->user_pin_count++;
632 obj->pin_filp = file;
633 if (obj->user_pin_count == 1) {
634 ret = i915_gem_object_pin(obj, args->alignment, true);
639 /* XXX - flush the CPU caches for pinned objects
640 * as the X server doesn't manage domains yet
642 i915_gem_object_flush_cpu_write_domain(obj);
643 args->offset = obj->gtt_offset;
645 drm_gem_object_unreference(&obj->base);
652 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
653 struct drm_file *file)
655 struct drm_i915_gem_pin *args;
656 struct drm_i915_gem_object *obj;
660 ret = i915_mutex_lock_interruptible(dev);
664 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
665 if (&obj->base == NULL) {
670 if (obj->pin_filp != file) {
671 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
676 obj->user_pin_count--;
677 if (obj->user_pin_count == 0) {
678 obj->pin_filp = NULL;
679 i915_gem_object_unpin(obj);
683 drm_gem_object_unreference(&obj->base);
690 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
691 struct drm_file *file)
693 struct drm_i915_gem_busy *args;
694 struct drm_i915_gem_object *obj;
695 struct drm_i915_gem_request *request;
700 ret = i915_mutex_lock_interruptible(dev);
704 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
705 if (&obj->base == NULL) {
710 args->busy = obj->active;
712 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
713 ret = i915_gem_flush_ring(obj->ring,
714 0, obj->base.write_domain);
715 } else if (obj->ring->outstanding_lazy_request ==
716 obj->last_rendering_seqno) {
717 request = malloc(sizeof(*request), DRM_I915_GEM,
719 ret = i915_add_request(obj->ring, NULL, request);
721 free(request, DRM_I915_GEM);
724 i915_gem_retire_requests_ring(obj->ring);
725 args->busy = obj->active;
728 drm_gem_object_unreference(&obj->base);
735 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
737 struct drm_i915_private *dev_priv;
738 struct drm_i915_file_private *file_priv;
739 unsigned long recent_enough;
740 struct drm_i915_gem_request *request;
741 struct intel_ring_buffer *ring;
745 dev_priv = dev->dev_private;
746 if (atomic_load_acq_int(&dev_priv->mm.wedged))
749 file_priv = file->driver_priv;
750 recent_enough = ticks - (20 * hz / 1000);
754 mtx_lock(&file_priv->mm.lck);
755 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
756 if (time_after_eq(request->emitted_jiffies, recent_enough))
758 ring = request->ring;
759 seqno = request->seqno;
761 mtx_unlock(&file_priv->mm.lck);
766 mtx_lock(&ring->irq_lock);
767 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
768 if (ring->irq_get(ring)) {
770 !(i915_seqno_passed(ring->get_seqno(ring), seqno) ||
771 atomic_load_acq_int(&dev_priv->mm.wedged)))
772 ret = -msleep(ring, &ring->irq_lock, PCATCH,
775 if (ret == 0 && atomic_load_acq_int(&dev_priv->mm.wedged))
777 } else if (_intel_wait_for(dev,
778 i915_seqno_passed(ring->get_seqno(ring), seqno) ||
779 atomic_load_acq_int(&dev_priv->mm.wedged), 3000, 0, "915rtr")) {
783 mtx_unlock(&ring->irq_lock);
786 taskqueue_enqueue_timeout(dev_priv->tq,
787 &dev_priv->mm.retire_task, 0);
793 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
794 struct drm_file *file_priv)
797 return (i915_gem_ring_throttle(dev, file_priv));
801 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
802 struct drm_file *file_priv)
804 struct drm_i915_gem_madvise *args;
805 struct drm_i915_gem_object *obj;
809 switch (args->madv) {
810 case I915_MADV_DONTNEED:
811 case I915_MADV_WILLNEED:
817 ret = i915_mutex_lock_interruptible(dev);
821 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
822 if (&obj->base == NULL) {
827 if (obj->pin_count != 0) {
832 if (obj->madv != I915_MADV_PURGED_INTERNAL)
833 obj->madv = args->madv;
834 if (i915_gem_object_is_purgeable(obj) && obj->gtt_space == NULL)
835 i915_gem_object_truncate(obj);
836 args->retained = obj->madv != I915_MADV_PURGED_INTERNAL;
839 drm_gem_object_unreference(&obj->base);
846 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
848 drm_i915_private_t *dev_priv;
851 dev_priv = dev->dev_private;
852 for (i = 0; i < I915_NUM_RINGS; i++)
853 intel_cleanup_ring_buffer(&dev_priv->rings[i]);
857 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
858 struct drm_file *file_priv)
860 drm_i915_private_t *dev_priv;
863 if (drm_core_check_feature(dev, DRIVER_MODESET))
865 dev_priv = dev->dev_private;
866 if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
867 DRM_ERROR("Reenabling wedged hardware, good luck\n");
868 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
871 dev_priv->mm.suspended = 0;
873 ret = i915_gem_init_hw(dev);
878 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
879 KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
880 KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
881 for (i = 0; i < I915_NUM_RINGS; i++) {
882 KASSERT(list_empty(&dev_priv->rings[i].active_list),
883 ("ring %d active list", i));
884 KASSERT(list_empty(&dev_priv->rings[i].request_list),
885 ("ring %d request list", i));
889 ret = drm_irq_install(dev);
892 goto cleanup_ringbuffer;
897 i915_gem_cleanup_ringbuffer(dev);
898 dev_priv->mm.suspended = 1;
904 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
905 struct drm_file *file_priv)
908 if (drm_core_check_feature(dev, DRIVER_MODESET))
911 drm_irq_uninstall(dev);
912 return (i915_gem_idle(dev));
916 i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
919 struct drm_i915_gem_object *obj;
923 size = roundup(size, PAGE_SIZE);
927 obj = i915_gem_alloc_object(dev, size);
932 ret = drm_gem_handle_create(file, &obj->base, &handle);
934 drm_gem_object_release(&obj->base);
935 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
936 free(obj, DRM_I915_GEM);
940 /* drop reference from allocate - handle holds it now */
941 drm_gem_object_unreference(&obj->base);
942 CTR2(KTR_DRM, "object_create %p %x", obj, size);
948 i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
949 struct drm_mode_create_dumb *args)
952 /* have to work out size/pitch and return them */
953 args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
954 args->size = args->pitch * args->height;
955 return (i915_gem_create(file, dev, args->size, &args->handle));
959 i915_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
963 return (drm_gem_handle_delete(file, handle));
967 i915_gem_create_ioctl(struct drm_device *dev, void *data,
968 struct drm_file *file)
970 struct drm_i915_gem_create *args = data;
972 return (i915_gem_create(file, dev, args->size, &args->handle));
976 i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
977 uint64_t data_ptr, uint64_t size, uint64_t offset, enum uio_rw rw,
978 struct drm_file *file)
985 int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
987 if (obj->gtt_offset != 0 && rw == UIO_READ)
988 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
990 do_bit17_swizzling = 0;
993 vm_obj = obj->base.vm_obj;
996 VM_OBJECT_WLOCK(vm_obj);
997 vm_object_pip_add(vm_obj, 1);
999 obj_pi = OFF_TO_IDX(offset);
1000 obj_po = offset & PAGE_MASK;
1002 m = i915_gem_wire_page(vm_obj, obj_pi);
1003 VM_OBJECT_WUNLOCK(vm_obj);
1006 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
1007 mkva = sf_buf_kva(sf);
1008 length = min(size, PAGE_SIZE - obj_po);
1009 while (length > 0) {
1010 if (do_bit17_swizzling &&
1011 (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
1012 cnt = roundup2(obj_po + 1, 64);
1013 cnt = min(cnt - obj_po, length);
1014 swizzled_po = obj_po ^ 64;
1017 swizzled_po = obj_po;
1020 ret = -copyout_nofault(
1021 (char *)mkva + swizzled_po,
1022 (void *)(uintptr_t)data_ptr, cnt);
1024 ret = -copyin_nofault(
1025 (void *)(uintptr_t)data_ptr,
1026 (char *)mkva + swizzled_po, cnt);
1037 VM_OBJECT_WLOCK(vm_obj);
1038 if (rw == UIO_WRITE)
1040 vm_page_reference(m);
1042 vm_page_unwire(m, PQ_ACTIVE);
1044 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
1049 vm_object_pip_wakeup(vm_obj);
1050 VM_OBJECT_WUNLOCK(vm_obj);
1056 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
1057 uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
1063 obj_pi = OFF_TO_IDX(offset);
1064 obj_po = offset & PAGE_MASK;
1066 mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
1067 IDX_TO_OFF(obj_pi), size, PAT_WRITE_COMBINING);
1068 ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva +
1070 pmap_unmapdev(mkva, size);
1075 i915_gem_obj_io(struct drm_device *dev, uint32_t handle, uint64_t data_ptr,
1076 uint64_t size, uint64_t offset, enum uio_rw rw, struct drm_file *file)
1078 struct drm_i915_gem_object *obj;
1080 vm_offset_t start, end;
1085 start = trunc_page(data_ptr);
1086 end = round_page(data_ptr + size);
1087 npages = howmany(end - start, PAGE_SIZE);
1088 ma = malloc(npages * sizeof(vm_page_t), DRM_I915_GEM, M_WAITOK |
1090 npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1091 (vm_offset_t)data_ptr, size,
1092 (rw == UIO_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, ma, npages);
1098 ret = i915_mutex_lock_interruptible(dev);
1102 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1103 if (&obj->base == NULL) {
1107 if (offset > obj->base.size || size > obj->base.size - offset) {
1112 if (rw == UIO_READ) {
1113 CTR3(KTR_DRM, "object_pread %p %jx %jx", obj, offset, size);
1114 ret = i915_gem_object_set_cpu_read_domain_range(obj,
1118 ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
1121 if (obj->phys_obj) {
1122 CTR3(KTR_DRM, "object_phys_write %p %jx %jx", obj,
1124 ret = i915_gem_phys_pwrite(dev, obj, data_ptr, offset,
1126 } else if (obj->gtt_space &&
1127 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1128 CTR3(KTR_DRM, "object_gtt_write %p %jx %jx", obj,
1130 ret = i915_gem_object_pin(obj, 0, true);
1133 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1136 ret = i915_gem_object_put_fence(obj);
1139 ret = i915_gem_gtt_write(dev, obj, data_ptr, size,
1142 i915_gem_object_unpin(obj);
1144 CTR3(KTR_DRM, "object_pwrite %p %jx %jx", obj,
1146 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1149 ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
1154 drm_gem_object_unreference(&obj->base);
1158 vm_page_unhold_pages(ma, npages);
1160 free(ma, DRM_I915_GEM);
1165 i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1167 struct drm_i915_gem_pread *args;
1170 return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
1171 args->offset, UIO_READ, file));
1175 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1177 struct drm_i915_gem_pwrite *args;
1180 return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
1181 args->offset, UIO_WRITE, file));
1185 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1186 struct drm_file *file)
1188 struct drm_i915_gem_set_domain *args;
1189 struct drm_i915_gem_object *obj;
1190 uint32_t read_domains;
1191 uint32_t write_domain;
1194 if ((dev->driver->driver_features & DRIVER_GEM) == 0)
1198 read_domains = args->read_domains;
1199 write_domain = args->write_domain;
1201 if ((write_domain & I915_GEM_GPU_DOMAINS) != 0 ||
1202 (read_domains & I915_GEM_GPU_DOMAINS) != 0 ||
1203 (write_domain != 0 && read_domains != write_domain))
1206 ret = i915_mutex_lock_interruptible(dev);
1210 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1211 if (&obj->base == NULL) {
1216 if ((read_domains & I915_GEM_DOMAIN_GTT) != 0) {
1217 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1221 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1223 drm_gem_object_unreference(&obj->base);
1230 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1231 struct drm_file *file)
1233 struct drm_i915_gem_sw_finish *args;
1234 struct drm_i915_gem_object *obj;
1239 if ((dev->driver->driver_features & DRIVER_GEM) == 0)
1241 ret = i915_mutex_lock_interruptible(dev);
1244 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1245 if (&obj->base == NULL) {
1249 if (obj->pin_count != 0)
1250 i915_gem_object_flush_cpu_write_domain(obj);
1251 drm_gem_object_unreference(&obj->base);
1258 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1259 struct drm_file *file)
1261 struct drm_i915_gem_mmap *args;
1262 struct drm_gem_object *obj;
1271 if ((dev->driver->driver_features & DRIVER_GEM) == 0)
1274 obj = drm_gem_object_lookup(dev, file, args->handle);
1278 if (args->size == 0)
1281 map = &p->p_vmspace->vm_map;
1282 size = round_page(args->size);
1284 if (map->size + size > lim_cur(p, RLIMIT_VMEM)) {
1292 vm_object_reference(obj->vm_obj);
1294 rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
1295 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1296 VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
1297 if (rv != KERN_SUCCESS) {
1298 vm_object_deallocate(obj->vm_obj);
1299 error = -vm_mmap_to_errno(rv);
1301 args->addr_ptr = (uint64_t)addr;
1305 drm_gem_object_unreference(obj);
1310 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1311 vm_ooffset_t foff, struct ucred *cred, u_short *color)
1314 *color = 0; /* XXXKIB */
1321 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
1324 struct drm_gem_object *gem_obj;
1325 struct drm_i915_gem_object *obj;
1326 struct drm_device *dev;
1327 drm_i915_private_t *dev_priv;
1332 gem_obj = vm_obj->handle;
1333 obj = to_intel_bo(gem_obj);
1334 dev = obj->base.dev;
1335 dev_priv = dev->dev_private;
1337 write = (prot & VM_PROT_WRITE) != 0;
1341 vm_object_pip_add(vm_obj, 1);
1344 * Remove the placeholder page inserted by vm_fault() from the
1345 * object before dropping the object lock. If
1346 * i915_gem_release_mmap() is active in parallel on this gem
1347 * object, then it owns the drm device sx and might find the
1348 * placeholder already. Then, since the page is busy,
1349 * i915_gem_release_mmap() sleeps waiting for the busy state
1350 * of the page cleared. We will be not able to acquire drm
1351 * device lock until i915_gem_release_mmap() is able to make a
1354 if (*mres != NULL) {
1357 vm_page_remove(oldm);
1358 vm_page_unlock(oldm);
1362 VM_OBJECT_WUNLOCK(vm_obj);
1368 ret = i915_mutex_lock_interruptible(dev);
1377 * Since the object lock was dropped, other thread might have
1378 * faulted on the same GTT address and instantiated the
1379 * mapping for the page. Recheck.
1381 VM_OBJECT_WLOCK(vm_obj);
1382 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1384 if (vm_page_busied(m)) {
1387 VM_OBJECT_WUNLOCK(vm_obj);
1388 vm_page_busy_sleep(m, "915pee");
1393 VM_OBJECT_WUNLOCK(vm_obj);
1395 /* Now bind it into the GTT if needed */
1396 if (!obj->map_and_fenceable) {
1397 ret = i915_gem_object_unbind(obj);
1403 if (!obj->gtt_space) {
1404 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1410 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1417 if (obj->tiling_mode == I915_TILING_NONE)
1418 ret = i915_gem_object_put_fence(obj);
1420 ret = i915_gem_object_get_fence(obj, NULL);
1426 if (i915_gem_object_is_inactive(obj))
1427 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1429 obj->fault_mappable = true;
1430 VM_OBJECT_WLOCK(vm_obj);
1431 m = PHYS_TO_VM_PAGE(dev->agp->base + obj->gtt_offset + offset);
1432 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1433 ("physical address %#jx not fictitious",
1434 (uintmax_t)(dev->agp->base + obj->gtt_offset + offset)));
1436 VM_OBJECT_WUNLOCK(vm_obj);
1441 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1442 ("not fictitious %p", m));
1443 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1445 if (vm_page_busied(m)) {
1448 VM_OBJECT_WUNLOCK(vm_obj);
1449 vm_page_busy_sleep(m, "915pbs");
1452 if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
1454 VM_OBJECT_WUNLOCK(vm_obj);
1458 m->valid = VM_PAGE_BITS_ALL;
1463 CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
1469 vm_page_unlock(oldm);
1471 vm_object_pip_wakeup(vm_obj);
1472 return (VM_PAGER_OK);
1477 KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1478 CTR5(KTR_DRM, "fault_fail %p %jx %x err %d %d", gem_obj, offset, prot,
1480 if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1481 kern_yield(PRI_USER);
1484 VM_OBJECT_WLOCK(vm_obj);
1485 vm_object_pip_wakeup(vm_obj);
1486 return (VM_PAGER_ERROR);
1490 i915_gem_pager_dtor(void *handle)
1492 struct drm_gem_object *obj;
1493 struct drm_device *dev;
1499 drm_gem_free_mmap_offset(obj);
1500 i915_gem_release_mmap(to_intel_bo(obj));
1501 drm_gem_object_unreference(obj);
1505 struct cdev_pager_ops i915_gem_pager_ops = {
1506 .cdev_pg_fault = i915_gem_pager_fault,
1507 .cdev_pg_ctor = i915_gem_pager_ctor,
1508 .cdev_pg_dtor = i915_gem_pager_dtor
1512 i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev,
1513 uint32_t handle, uint64_t *offset)
1515 struct drm_i915_private *dev_priv;
1516 struct drm_i915_gem_object *obj;
1519 if (!(dev->driver->driver_features & DRIVER_GEM))
1522 dev_priv = dev->dev_private;
1524 ret = i915_mutex_lock_interruptible(dev);
1528 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1529 if (&obj->base == NULL) {
1534 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1539 if (obj->madv != I915_MADV_WILLNEED) {
1540 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1545 ret = drm_gem_create_mmap_offset(&obj->base);
1549 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1550 DRM_GEM_MAPPING_KEY;
1552 drm_gem_object_unreference(&obj->base);
1559 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1560 struct drm_file *file)
1562 struct drm_i915_private *dev_priv;
1563 struct drm_i915_gem_mmap_gtt *args;
1565 dev_priv = dev->dev_private;
1568 return (i915_gem_mmap_gtt(file, dev, args->handle, &args->offset));
1571 struct drm_i915_gem_object *
1572 i915_gem_alloc_object(struct drm_device *dev, size_t size)
1574 struct drm_i915_private *dev_priv;
1575 struct drm_i915_gem_object *obj;
1577 dev_priv = dev->dev_private;
1579 obj = malloc(sizeof(*obj), DRM_I915_GEM, M_WAITOK | M_ZERO);
1581 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
1582 free(obj, DRM_I915_GEM);
1586 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1587 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
1590 obj->cache_level = I915_CACHE_LLC;
1592 obj->cache_level = I915_CACHE_NONE;
1593 obj->base.driver_private = NULL;
1594 obj->fence_reg = I915_FENCE_REG_NONE;
1595 INIT_LIST_HEAD(&obj->mm_list);
1596 INIT_LIST_HEAD(&obj->gtt_list);
1597 INIT_LIST_HEAD(&obj->ring_list);
1598 INIT_LIST_HEAD(&obj->exec_list);
1599 INIT_LIST_HEAD(&obj->gpu_write_list);
1600 obj->madv = I915_MADV_WILLNEED;
1601 /* Avoid an unnecessary call to unbind on the first bind. */
1602 obj->map_and_fenceable = true;
1604 i915_gem_info_add_obj(dev_priv, size);
1610 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
1613 /* If we don't have a page list set up, then we're not pinned
1614 * to GPU, and we can ignore the cache flush because it'll happen
1615 * again at bind time.
1617 if (obj->pages == NULL)
1620 /* If the GPU is snooping the contents of the CPU cache,
1621 * we do not need to manually clear the CPU cache lines. However,
1622 * the caches are only snooped when the render cache is
1623 * flushed/invalidated. As we always have to emit invalidations
1624 * and flushes when moving into and out of the RENDER domain, correct
1625 * snooping behaviour occurs naturally as the result of our domain
1628 if (obj->cache_level != I915_CACHE_NONE)
1631 CTR1(KTR_DRM, "object_clflush %p", obj);
1632 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
1636 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
1638 uint32_t old_write_domain;
1640 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
1643 i915_gem_clflush_object(obj);
1644 intel_gtt_chipset_flush();
1645 old_write_domain = obj->base.write_domain;
1646 obj->base.write_domain = 0;
1648 CTR3(KTR_DRM, "object_change_domain flush_cpu_write %p %x %x", obj,
1649 obj->base.read_domains, old_write_domain);
1653 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
1656 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
1658 return (i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain));
1662 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
1664 uint32_t old_write_domain;
1666 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
1671 old_write_domain = obj->base.write_domain;
1672 obj->base.write_domain = 0;
1674 CTR3(KTR_DRM, "object_change_domain flush gtt_write %p %x %x", obj,
1675 obj->base.read_domains, old_write_domain);
1679 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
1681 uint32_t old_write_domain, old_read_domains;
1684 if (obj->gtt_space == NULL)
1687 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
1690 ret = i915_gem_object_flush_gpu_write_domain(obj);
1694 if (obj->pending_gpu_write || write) {
1695 ret = i915_gem_object_wait_rendering(obj);
1700 i915_gem_object_flush_cpu_write_domain(obj);
1702 old_write_domain = obj->base.write_domain;
1703 old_read_domains = obj->base.read_domains;
1705 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
1706 ("In GTT write domain"));
1707 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
1709 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
1710 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
1714 CTR3(KTR_DRM, "object_change_domain set_to_gtt %p %x %x", obj,
1715 old_read_domains, old_write_domain);
1720 i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1721 enum i915_cache_level cache_level)
1723 struct drm_device *dev;
1724 drm_i915_private_t *dev_priv;
1727 if (obj->cache_level == cache_level)
1730 if (obj->pin_count) {
1731 DRM_DEBUG("can not change the cache level of pinned objects\n");
1735 dev = obj->base.dev;
1736 dev_priv = dev->dev_private;
1737 if (obj->gtt_space) {
1738 ret = i915_gem_object_finish_gpu(obj);
1742 i915_gem_object_finish_gtt(obj);
1744 /* Before SandyBridge, you could not use tiling or fence
1745 * registers with snooped memory, so relinquish any fences
1746 * currently pointing to our region in the aperture.
1748 if (INTEL_INFO(obj->base.dev)->gen < 6) {
1749 ret = i915_gem_object_put_fence(obj);
1754 i915_gem_gtt_rebind_object(obj, cache_level);
1755 if (obj->has_aliasing_ppgtt_mapping)
1756 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
1760 if (cache_level == I915_CACHE_NONE) {
1761 u32 old_read_domains, old_write_domain;
1763 /* If we're coming from LLC cached, then we haven't
1764 * actually been tracking whether the data is in the
1765 * CPU cache or not, since we only allow one bit set
1766 * in obj->write_domain and have been skipping the clflushes.
1767 * Just set it to the CPU cache for now.
1769 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
1770 ("obj %p in CPU write domain", obj));
1771 KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
1772 ("obj %p in CPU read domain", obj));
1774 old_read_domains = obj->base.read_domains;
1775 old_write_domain = obj->base.write_domain;
1777 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
1778 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1780 CTR3(KTR_DRM, "object_change_domain set_cache_level %p %x %x",
1781 obj, old_read_domains, old_write_domain);
1784 obj->cache_level = cache_level;
1789 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1790 u32 alignment, struct intel_ring_buffer *pipelined)
1792 u32 old_read_domains, old_write_domain;
1795 ret = i915_gem_object_flush_gpu_write_domain(obj);
1799 if (pipelined != obj->ring) {
1800 ret = i915_gem_object_wait_rendering(obj);
1801 if (ret == -ERESTART || ret == -EINTR)
1805 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
1809 ret = i915_gem_object_pin(obj, alignment, true);
1813 i915_gem_object_flush_cpu_write_domain(obj);
1815 old_write_domain = obj->base.write_domain;
1816 old_read_domains = obj->base.read_domains;
1818 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) == 0,
1819 ("obj %p in GTT write domain", obj));
1820 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
1822 CTR3(KTR_DRM, "object_change_domain pin_to_display_plan %p %x %x",
1823 obj, old_read_domains, obj->base.write_domain);
1828 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
1832 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
1835 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
1836 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
1841 ret = i915_gem_object_wait_rendering(obj);
1845 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1851 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
1853 uint32_t old_write_domain, old_read_domains;
1856 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
1859 ret = i915_gem_object_flush_gpu_write_domain(obj);
1863 ret = i915_gem_object_wait_rendering(obj);
1867 i915_gem_object_flush_gtt_write_domain(obj);
1868 i915_gem_object_set_to_full_cpu_read_domain(obj);
1870 old_write_domain = obj->base.write_domain;
1871 old_read_domains = obj->base.read_domains;
1873 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1874 i915_gem_clflush_object(obj);
1875 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
1878 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
1879 ("In cpu write domain"));
1882 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
1883 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1886 CTR3(KTR_DRM, "object_change_domain set_to_cpu %p %x %x", obj,
1887 old_read_domains, old_write_domain);
1892 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
1896 if (obj->page_cpu_valid == NULL)
1899 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) {
1900 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
1901 if (obj->page_cpu_valid[i] != 0)
1903 drm_clflush_pages(obj->pages + i, 1);
1907 free(obj->page_cpu_valid, DRM_I915_GEM);
1908 obj->page_cpu_valid = NULL;
1912 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
1913 uint64_t offset, uint64_t size)
1915 uint32_t old_read_domains;
1918 if (offset == 0 && size == obj->base.size)
1919 return (i915_gem_object_set_to_cpu_domain(obj, 0));
1921 ret = i915_gem_object_flush_gpu_write_domain(obj);
1924 ret = i915_gem_object_wait_rendering(obj);
1928 i915_gem_object_flush_gtt_write_domain(obj);
1930 if (obj->page_cpu_valid == NULL &&
1931 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
1934 if (obj->page_cpu_valid == NULL) {
1935 obj->page_cpu_valid = malloc(obj->base.size / PAGE_SIZE,
1936 DRM_I915_GEM, M_WAITOK | M_ZERO);
1937 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1938 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
1940 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1942 if (obj->page_cpu_valid[i])
1944 drm_clflush_pages(obj->pages + i, 1);
1945 obj->page_cpu_valid[i] = 1;
1948 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
1949 ("In gpu write domain"));
1951 old_read_domains = obj->base.read_domains;
1952 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
1954 CTR3(KTR_DRM, "object_change_domain set_cpu_read %p %x %x", obj,
1955 old_read_domains, obj->base.write_domain);
1960 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1964 if (INTEL_INFO(dev)->gen >= 4 ||
1965 tiling_mode == I915_TILING_NONE)
1968 /* Previous chips need a power-of-two fence region when tiling */
1969 if (INTEL_INFO(dev)->gen == 3)
1970 gtt_size = 1024*1024;
1972 gtt_size = 512*1024;
1974 while (gtt_size < size)
1981 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1982 * @obj: object to check
1984 * Return the required GTT alignment for an object, taking into account
1985 * potential fence register mapping.
1988 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1993 * Minimum alignment is 4k (GTT page size), but might be greater
1994 * if a fence register is needed for the object.
1996 if (INTEL_INFO(dev)->gen >= 4 ||
1997 tiling_mode == I915_TILING_NONE)
2001 * Previous chips need to be aligned to the size of the smallest
2002 * fence register that can contain the object.
2004 return (i915_gem_get_gtt_size(dev, size, tiling_mode));
2008 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, uint32_t size,
2012 if (tiling_mode == I915_TILING_NONE)
2016 * Minimum alignment is 4k (GTT page size) for sane hw.
2018 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev))
2022 * Previous hardware however needs to be aligned to a power-of-two
2023 * tile height. The simplest method for determining this is to reuse
2024 * the power-of-tile object size.
2026 return (i915_gem_get_gtt_size(dev, size, tiling_mode));
2030 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2031 unsigned alignment, bool map_and_fenceable)
2033 struct drm_device *dev;
2034 struct drm_i915_private *dev_priv;
2035 struct drm_mm_node *free_space;
2036 uint32_t size, fence_size, fence_alignment, unfenced_alignment;
2037 bool mappable, fenceable;
2040 dev = obj->base.dev;
2041 dev_priv = dev->dev_private;
2043 if (obj->madv != I915_MADV_WILLNEED) {
2044 DRM_ERROR("Attempting to bind a purgeable object\n");
2048 fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
2050 fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
2052 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
2053 obj->base.size, obj->tiling_mode);
2055 alignment = map_and_fenceable ? fence_alignment :
2057 if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
2058 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2062 size = map_and_fenceable ? fence_size : obj->base.size;
2064 /* If the object is bigger than the entire aperture, reject it early
2065 * before evicting everything in a vain attempt to find space.
2067 if (obj->base.size > (map_and_fenceable ?
2068 dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2070 "Attempting to bind an object larger than the aperture\n");
2075 if (map_and_fenceable)
2076 free_space = drm_mm_search_free_in_range(
2077 &dev_priv->mm.gtt_space, size, alignment, 0,
2078 dev_priv->mm.gtt_mappable_end, 0);
2080 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2081 size, alignment, 0);
2082 if (free_space != NULL) {
2083 if (map_and_fenceable)
2084 obj->gtt_space = drm_mm_get_block_range_generic(
2085 free_space, size, alignment, 0,
2086 dev_priv->mm.gtt_mappable_end, 1);
2088 obj->gtt_space = drm_mm_get_block_generic(free_space,
2089 size, alignment, 1);
2091 if (obj->gtt_space == NULL) {
2092 ret = i915_gem_evict_something(dev, size, alignment,
2098 ret = i915_gem_object_get_pages_gtt(obj, 0);
2100 drm_mm_put_block(obj->gtt_space);
2101 obj->gtt_space = NULL;
2103 * i915_gem_object_get_pages_gtt() cannot return
2104 * ENOMEM, since we use vm_page_grab().
2109 ret = i915_gem_gtt_bind_object(obj);
2111 i915_gem_object_put_pages_gtt(obj);
2112 drm_mm_put_block(obj->gtt_space);
2113 obj->gtt_space = NULL;
2114 if (i915_gem_evict_everything(dev, false))
2119 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2120 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2122 KASSERT((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0,
2123 ("Object in gpu read domain"));
2124 KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
2125 ("Object in gpu write domain"));
2127 obj->gtt_offset = obj->gtt_space->start;
2130 obj->gtt_space->size == fence_size &&
2131 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2134 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2135 obj->map_and_fenceable = mappable && fenceable;
2137 CTR4(KTR_DRM, "object_bind %p %x %x %d", obj, obj->gtt_offset,
2138 obj->base.size, map_and_fenceable);
2143 i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2145 u32 old_write_domain, old_read_domains;
2147 /* Act a barrier for all accesses through the GTT */
2150 /* Force a pagefault for domain tracking on next user access */
2151 i915_gem_release_mmap(obj);
2153 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2156 old_read_domains = obj->base.read_domains;
2157 old_write_domain = obj->base.write_domain;
2159 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2160 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2162 CTR3(KTR_DRM, "object_change_domain finish gtt %p %x %x",
2163 obj, old_read_domains, old_write_domain);
2167 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2169 drm_i915_private_t *dev_priv;
2172 dev_priv = obj->base.dev->dev_private;
2174 if (obj->gtt_space == NULL)
2176 if (obj->pin_count != 0) {
2177 DRM_ERROR("Attempting to unbind pinned buffer\n");
2181 ret = i915_gem_object_finish_gpu(obj);
2182 if (ret == -ERESTART || ret == -EINTR)
2185 i915_gem_object_finish_gtt(obj);
2188 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2189 if (ret == -ERESTART || ret == -EINTR)
2192 i915_gem_clflush_object(obj);
2193 obj->base.read_domains = obj->base.write_domain =
2194 I915_GEM_DOMAIN_CPU;
2197 ret = i915_gem_object_put_fence(obj);
2198 if (ret == -ERESTART)
2201 i915_gem_gtt_unbind_object(obj);
2202 if (obj->has_aliasing_ppgtt_mapping) {
2203 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2204 obj->has_aliasing_ppgtt_mapping = 0;
2206 i915_gem_object_put_pages_gtt(obj);
2208 list_del_init(&obj->gtt_list);
2209 list_del_init(&obj->mm_list);
2210 obj->map_and_fenceable = true;
2212 drm_mm_put_block(obj->gtt_space);
2213 obj->gtt_space = NULL;
2214 obj->gtt_offset = 0;
2216 if (i915_gem_object_is_purgeable(obj))
2217 i915_gem_object_truncate(obj);
2218 CTR1(KTR_DRM, "object_unbind %p", obj);
2224 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
2227 struct drm_device *dev;
2230 int page_count, i, j;
2232 dev = obj->base.dev;
2233 KASSERT(obj->pages == NULL, ("Obj already has pages"));
2234 page_count = obj->base.size / PAGE_SIZE;
2235 obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
2237 vm_obj = obj->base.vm_obj;
2238 VM_OBJECT_WLOCK(vm_obj);
2239 for (i = 0; i < page_count; i++) {
2240 if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
2243 VM_OBJECT_WUNLOCK(vm_obj);
2244 if (i915_gem_object_needs_bit17_swizzle(obj))
2245 i915_gem_object_do_bit_17_swizzle(obj);
2249 for (j = 0; j < i; j++) {
2252 vm_page_unwire(m, PQ_INACTIVE);
2254 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
2256 VM_OBJECT_WUNLOCK(vm_obj);
2257 free(obj->pages, DRM_I915_GEM);
2262 #define GEM_PARANOID_CHECK_GTT 0
2263 #if GEM_PARANOID_CHECK_GTT
2265 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
2268 struct drm_i915_private *dev_priv;
2270 unsigned long start, end;
2274 dev_priv = dev->dev_private;
2275 start = OFF_TO_IDX(dev_priv->mm.gtt_start);
2276 end = OFF_TO_IDX(dev_priv->mm.gtt_end);
2277 for (i = start; i < end; i++) {
2278 pa = intel_gtt_read_pte_paddr(i);
2279 for (j = 0; j < page_count; j++) {
2280 if (pa == VM_PAGE_TO_PHYS(ma[j])) {
2281 panic("Page %p in GTT pte index %d pte %x",
2282 ma[i], i, intel_gtt_read_pte(i));
2290 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2295 KASSERT(obj->madv != I915_MADV_PURGED_INTERNAL, ("Purged object"));
2297 if (obj->tiling_mode != I915_TILING_NONE)
2298 i915_gem_object_save_bit_17_swizzle(obj);
2299 if (obj->madv == I915_MADV_DONTNEED)
2301 page_count = obj->base.size / PAGE_SIZE;
2302 VM_OBJECT_WLOCK(obj->base.vm_obj);
2303 #if GEM_PARANOID_CHECK_GTT
2304 i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
2306 for (i = 0; i < page_count; i++) {
2310 if (obj->madv == I915_MADV_WILLNEED)
2311 vm_page_reference(m);
2313 vm_page_unwire(obj->pages[i], PQ_ACTIVE);
2315 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
2317 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
2319 free(obj->pages, DRM_I915_GEM);
2324 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2330 if (!obj->fault_mappable)
2333 CTR3(KTR_DRM, "release_mmap %p %x %x", obj, obj->gtt_offset,
2334 OFF_TO_IDX(obj->base.size));
2335 devobj = cdev_pager_lookup(obj);
2336 if (devobj != NULL) {
2337 page_count = OFF_TO_IDX(obj->base.size);
2339 VM_OBJECT_WLOCK(devobj);
2341 for (i = 0; i < page_count; i++) {
2342 m = vm_page_lookup(devobj, i);
2345 if (vm_page_sleep_if_busy(m, "915unm"))
2347 cdev_pager_free_page(devobj, m);
2349 VM_OBJECT_WUNLOCK(devobj);
2350 vm_object_deallocate(devobj);
2353 obj->fault_mappable = false;
2357 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2361 KASSERT((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0,
2362 ("In GPU write domain"));
2364 CTR5(KTR_DRM, "object_wait_rendering %p %s %x %d %d", obj,
2365 obj->ring != NULL ? obj->ring->name : "none", obj->gtt_offset,
2366 obj->active, obj->last_rendering_seqno);
2368 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
2377 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2378 struct intel_ring_buffer *ring, uint32_t seqno)
2380 struct drm_device *dev = obj->base.dev;
2381 struct drm_i915_private *dev_priv = dev->dev_private;
2382 struct drm_i915_fence_reg *reg;
2385 KASSERT(ring != NULL, ("NULL ring"));
2387 /* Add a reference if we're newly entering the active list. */
2389 drm_gem_object_reference(&obj->base);
2393 /* Move from whatever list we were on to the tail of execution. */
2394 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
2395 list_move_tail(&obj->ring_list, &ring->active_list);
2397 obj->last_rendering_seqno = seqno;
2398 if (obj->fenced_gpu_access) {
2399 obj->last_fenced_seqno = seqno;
2400 obj->last_fenced_ring = ring;
2402 /* Bump MRU to take account of the delayed flush */
2403 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2404 reg = &dev_priv->fence_regs[obj->fence_reg];
2405 list_move_tail(®->lru_list,
2406 &dev_priv->mm.fence_list);
2412 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
2414 list_del_init(&obj->ring_list);
2415 obj->last_rendering_seqno = 0;
2416 obj->last_fenced_seqno = 0;
2420 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
2422 struct drm_device *dev = obj->base.dev;
2423 drm_i915_private_t *dev_priv = dev->dev_private;
2425 KASSERT(obj->active, ("Object not active"));
2426 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
2428 i915_gem_object_move_off_active(obj);
2432 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2434 struct drm_device *dev = obj->base.dev;
2435 struct drm_i915_private *dev_priv = dev->dev_private;
2437 if (obj->pin_count != 0)
2438 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
2440 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2442 KASSERT(list_empty(&obj->gpu_write_list), ("On gpu_write_list"));
2443 KASSERT(obj->active, ("Object not active"));
2445 obj->last_fenced_ring = NULL;
2447 i915_gem_object_move_off_active(obj);
2448 obj->fenced_gpu_access = false;
2451 obj->pending_gpu_write = false;
2452 drm_gem_object_unreference(&obj->base);
2457 WARN_ON(i915_verify_lists(dev));
2462 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2466 vm_obj = obj->base.vm_obj;
2467 VM_OBJECT_WLOCK(vm_obj);
2468 vm_object_page_remove(vm_obj, 0, 0, false);
2469 VM_OBJECT_WUNLOCK(vm_obj);
2470 obj->madv = I915_MADV_PURGED_INTERNAL;
2474 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
2477 return (obj->madv == I915_MADV_DONTNEED);
2481 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
2482 uint32_t flush_domains)
2484 struct drm_i915_gem_object *obj, *next;
2485 uint32_t old_write_domain;
2487 list_for_each_entry_safe(obj, next, &ring->gpu_write_list,
2489 if (obj->base.write_domain & flush_domains) {
2490 old_write_domain = obj->base.write_domain;
2491 obj->base.write_domain = 0;
2492 list_del_init(&obj->gpu_write_list);
2493 i915_gem_object_move_to_active(obj, ring,
2494 i915_gem_next_request_seqno(ring));
2496 CTR3(KTR_DRM, "object_change_domain process_flush %p %x %x",
2497 obj, obj->base.read_domains, old_write_domain);
2503 i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2505 drm_i915_private_t *dev_priv;
2507 dev_priv = obj->base.dev->dev_private;
2508 return (dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2509 obj->tiling_mode != I915_TILING_NONE);
2513 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
2518 VM_OBJECT_ASSERT_WLOCKED(object);
2519 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
2520 if (m->valid != VM_PAGE_BITS_ALL) {
2521 if (vm_pager_has_page(object, pindex, NULL, NULL)) {
2522 rv = vm_pager_get_pages(object, &m, 1, 0);
2523 m = vm_page_lookup(object, pindex);
2526 if (rv != VM_PAGER_OK) {
2534 m->valid = VM_PAGE_BITS_ALL;
2542 atomic_add_long(&i915_gem_wired_pages_cnt, 1);
2547 i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains,
2548 uint32_t flush_domains)
2552 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2555 CTR3(KTR_DRM, "ring_flush %s %x %x", ring->name, invalidate_domains,
2557 ret = ring->flush(ring, invalidate_domains, flush_domains);
2561 if (flush_domains & I915_GEM_GPU_DOMAINS)
2562 i915_gem_process_flushing_list(ring, flush_domains);
2567 i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
2571 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2574 if (!list_empty(&ring->gpu_write_list)) {
2575 ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS,
2576 I915_GEM_GPU_DOMAINS);
2581 return (i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2586 i915_gpu_idle(struct drm_device *dev, bool do_retire)
2588 drm_i915_private_t *dev_priv = dev->dev_private;
2591 /* Flush everything onto the inactive list. */
2592 for (i = 0; i < I915_NUM_RINGS; i++) {
2593 ret = i915_ring_idle(&dev_priv->rings[i], do_retire);
2602 i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno, bool do_retire)
2604 drm_i915_private_t *dev_priv;
2605 struct drm_i915_gem_request *request;
2608 bool recovery_complete;
2610 KASSERT(seqno != 0, ("Zero seqno"));
2612 dev_priv = ring->dev->dev_private;
2615 if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
2616 /* Give the error handler a chance to run. */
2617 mtx_lock(&dev_priv->error_completion_lock);
2618 recovery_complete = (&dev_priv->error_completion) > 0;
2619 mtx_unlock(&dev_priv->error_completion_lock);
2620 return (recovery_complete ? -EIO : -EAGAIN);
2623 if (seqno == ring->outstanding_lazy_request) {
2624 request = malloc(sizeof(*request), DRM_I915_GEM,
2626 if (request == NULL)
2629 ret = i915_add_request(ring, NULL, request);
2631 free(request, DRM_I915_GEM);
2635 seqno = request->seqno;
2638 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2639 if (HAS_PCH_SPLIT(ring->dev))
2640 ier = I915_READ(DEIER) | I915_READ(GTIER);
2642 ier = I915_READ(IER);
2644 DRM_ERROR("something (likely vbetool) disabled "
2645 "interrupts, re-enabling\n");
2646 ring->dev->driver->irq_preinstall(ring->dev);
2647 ring->dev->driver->irq_postinstall(ring->dev);
2650 CTR2(KTR_DRM, "request_wait_begin %s %d", ring->name, seqno);
2652 ring->waiting_seqno = seqno;
2653 mtx_lock(&ring->irq_lock);
2654 if (ring->irq_get(ring)) {
2655 flags = dev_priv->mm.interruptible ? PCATCH : 0;
2656 while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
2657 && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
2659 ret = -msleep(ring, &ring->irq_lock, flags,
2662 ring->irq_put(ring);
2663 mtx_unlock(&ring->irq_lock);
2665 mtx_unlock(&ring->irq_lock);
2666 if (_intel_wait_for(ring->dev,
2667 i915_seqno_passed(ring->get_seqno(ring), seqno) ||
2668 atomic_load_acq_int(&dev_priv->mm.wedged), 3000,
2672 ring->waiting_seqno = 0;
2674 CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno,
2677 if (atomic_load_acq_int(&dev_priv->mm.wedged))
2680 /* Directly dispatch request retiring. While we have the work queue
2681 * to handle this, the waiter on a request often wants an associated
2682 * buffer to have made it to the inactive list, and we would need
2683 * a separate wait queue to handle that.
2685 if (ret == 0 && do_retire)
2686 i915_gem_retire_requests_ring(ring);
2692 i915_gem_get_seqno(struct drm_device *dev)
2694 drm_i915_private_t *dev_priv = dev->dev_private;
2695 u32 seqno = dev_priv->next_seqno;
2697 /* reserve 0 for non-seqno */
2698 if (++dev_priv->next_seqno == 0)
2699 dev_priv->next_seqno = 1;
2705 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
2707 if (ring->outstanding_lazy_request == 0)
2708 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
2710 return ring->outstanding_lazy_request;
2714 i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
2715 struct drm_i915_gem_request *request)
2717 drm_i915_private_t *dev_priv;
2718 struct drm_i915_file_private *file_priv;
2720 u32 request_ring_position;
2724 KASSERT(request != NULL, ("NULL request in add"));
2725 DRM_LOCK_ASSERT(ring->dev);
2726 dev_priv = ring->dev->dev_private;
2728 seqno = i915_gem_next_request_seqno(ring);
2729 request_ring_position = intel_ring_get_tail(ring);
2731 ret = ring->add_request(ring, &seqno);
2735 CTR2(KTR_DRM, "request_add %s %d", ring->name, seqno);
2737 request->seqno = seqno;
2738 request->ring = ring;
2739 request->tail = request_ring_position;
2740 request->emitted_jiffies = ticks;
2741 was_empty = list_empty(&ring->request_list);
2742 list_add_tail(&request->list, &ring->request_list);
2745 file_priv = file->driver_priv;
2747 mtx_lock(&file_priv->mm.lck);
2748 request->file_priv = file_priv;
2749 list_add_tail(&request->client_list,
2750 &file_priv->mm.request_list);
2751 mtx_unlock(&file_priv->mm.lck);
2754 ring->outstanding_lazy_request = 0;
2756 if (!dev_priv->mm.suspended) {
2757 if (i915_enable_hangcheck) {
2758 callout_schedule(&dev_priv->hangcheck_timer,
2759 DRM_I915_HANGCHECK_PERIOD);
2762 taskqueue_enqueue_timeout(dev_priv->tq,
2763 &dev_priv->mm.retire_task, hz);
2769 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2771 struct drm_i915_file_private *file_priv = request->file_priv;
2776 DRM_LOCK_ASSERT(request->ring->dev);
2778 mtx_lock(&file_priv->mm.lck);
2779 if (request->file_priv != NULL) {
2780 list_del(&request->client_list);
2781 request->file_priv = NULL;
2783 mtx_unlock(&file_priv->mm.lck);
2787 i915_gem_release(struct drm_device *dev, struct drm_file *file)
2789 struct drm_i915_file_private *file_priv;
2790 struct drm_i915_gem_request *request;
2792 file_priv = file->driver_priv;
2794 /* Clean up our request list when the client is going away, so that
2795 * later retire_requests won't dereference our soon-to-be-gone
2798 mtx_lock(&file_priv->mm.lck);
2799 while (!list_empty(&file_priv->mm.request_list)) {
2800 request = list_first_entry(&file_priv->mm.request_list,
2801 struct drm_i915_gem_request,
2803 list_del(&request->client_list);
2804 request->file_priv = NULL;
2806 mtx_unlock(&file_priv->mm.lck);
2810 i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2811 struct intel_ring_buffer *ring)
2814 if (ring->dev != NULL)
2815 DRM_LOCK_ASSERT(ring->dev);
2817 while (!list_empty(&ring->request_list)) {
2818 struct drm_i915_gem_request *request;
2820 request = list_first_entry(&ring->request_list,
2821 struct drm_i915_gem_request, list);
2823 list_del(&request->list);
2824 i915_gem_request_remove_from_client(request);
2825 free(request, DRM_I915_GEM);
2828 while (!list_empty(&ring->active_list)) {
2829 struct drm_i915_gem_object *obj;
2831 obj = list_first_entry(&ring->active_list,
2832 struct drm_i915_gem_object, ring_list);
2834 obj->base.write_domain = 0;
2835 list_del_init(&obj->gpu_write_list);
2836 i915_gem_object_move_to_inactive(obj);
2841 i915_gem_reset_fences(struct drm_device *dev)
2843 struct drm_i915_private *dev_priv = dev->dev_private;
2846 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2847 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2848 struct drm_i915_gem_object *obj = reg->obj;
2853 if (obj->tiling_mode)
2854 i915_gem_release_mmap(obj);
2856 reg->obj->fence_reg = I915_FENCE_REG_NONE;
2857 reg->obj->fenced_gpu_access = false;
2858 reg->obj->last_fenced_seqno = 0;
2859 reg->obj->last_fenced_ring = NULL;
2860 i915_gem_clear_fence_reg(dev, reg);
2865 i915_gem_reset(struct drm_device *dev)
2867 struct drm_i915_private *dev_priv = dev->dev_private;
2868 struct drm_i915_gem_object *obj;
2871 for (i = 0; i < I915_NUM_RINGS; i++)
2872 i915_gem_reset_ring_lists(dev_priv, &dev_priv->rings[i]);
2874 /* Remove anything from the flushing lists. The GPU cache is likely
2875 * to be lost on reset along with the data, so simply move the
2876 * lost bo to the inactive list.
2878 while (!list_empty(&dev_priv->mm.flushing_list)) {
2879 obj = list_first_entry(&dev_priv->mm.flushing_list,
2880 struct drm_i915_gem_object,
2883 obj->base.write_domain = 0;
2884 list_del_init(&obj->gpu_write_list);
2885 i915_gem_object_move_to_inactive(obj);
2888 /* Move everything out of the GPU domains to ensure we do any
2889 * necessary invalidation upon reuse.
2891 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
2892 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2895 /* The fence registers are invalidated so clear them out */
2896 i915_gem_reset_fences(dev);
2900 * This function clears the request list as sequence numbers are passed.
2903 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2908 if (list_empty(&ring->request_list))
2911 seqno = ring->get_seqno(ring);
2912 CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
2914 for (i = 0; i < DRM_ARRAY_SIZE(ring->sync_seqno); i++)
2915 if (seqno >= ring->sync_seqno[i])
2916 ring->sync_seqno[i] = 0;
2918 while (!list_empty(&ring->request_list)) {
2919 struct drm_i915_gem_request *request;
2921 request = list_first_entry(&ring->request_list,
2922 struct drm_i915_gem_request,
2925 if (!i915_seqno_passed(seqno, request->seqno))
2928 CTR2(KTR_DRM, "retire_request_seqno_passed %s %d",
2930 ring->last_retired_head = request->tail;
2932 list_del(&request->list);
2933 i915_gem_request_remove_from_client(request);
2934 free(request, DRM_I915_GEM);
2937 /* Move any buffers on the active list that are no longer referenced
2938 * by the ringbuffer to the flushing/inactive lists as appropriate.
2940 while (!list_empty(&ring->active_list)) {
2941 struct drm_i915_gem_object *obj;
2943 obj = list_first_entry(&ring->active_list,
2944 struct drm_i915_gem_object,
2947 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
2950 if (obj->base.write_domain != 0)
2951 i915_gem_object_move_to_flushing(obj);
2953 i915_gem_object_move_to_inactive(obj);
2956 if (ring->trace_irq_seqno &&
2957 i915_seqno_passed(seqno, ring->trace_irq_seqno)) {
2958 mtx_lock(&ring->irq_lock);
2959 ring->irq_put(ring);
2960 mtx_unlock(&ring->irq_lock);
2961 ring->trace_irq_seqno = 0;
2966 i915_gem_retire_requests(struct drm_device *dev)
2968 drm_i915_private_t *dev_priv = dev->dev_private;
2969 struct drm_i915_gem_object *obj, *next;
2972 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
2973 list_for_each_entry_safe(obj, next,
2974 &dev_priv->mm.deferred_free_list, mm_list)
2975 i915_gem_free_object_tail(obj);
2978 for (i = 0; i < I915_NUM_RINGS; i++)
2979 i915_gem_retire_requests_ring(&dev_priv->rings[i]);
2983 sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2984 struct intel_ring_buffer *pipelined)
2986 struct drm_device *dev = obj->base.dev;
2987 drm_i915_private_t *dev_priv = dev->dev_private;
2988 u32 size = obj->gtt_space->size;
2989 int regnum = obj->fence_reg;
2992 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2994 val |= obj->gtt_offset & 0xfffff000;
2995 val |= (uint64_t)((obj->stride / 128) - 1) <<
2996 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2998 if (obj->tiling_mode == I915_TILING_Y)
2999 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3000 val |= I965_FENCE_REG_VALID;
3003 int ret = intel_ring_begin(pipelined, 6);
3007 intel_ring_emit(pipelined, MI_NOOP);
3008 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
3009 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
3010 intel_ring_emit(pipelined, (u32)val);
3011 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
3012 intel_ring_emit(pipelined, (u32)(val >> 32));
3013 intel_ring_advance(pipelined);
3015 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
3021 i965_write_fence_reg(struct drm_i915_gem_object *obj,
3022 struct intel_ring_buffer *pipelined)
3024 struct drm_device *dev = obj->base.dev;
3025 drm_i915_private_t *dev_priv = dev->dev_private;
3026 u32 size = obj->gtt_space->size;
3027 int regnum = obj->fence_reg;
3030 val = (uint64_t)((obj->gtt_offset + size - 4096) &
3032 val |= obj->gtt_offset & 0xfffff000;
3033 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
3034 if (obj->tiling_mode == I915_TILING_Y)
3035 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3036 val |= I965_FENCE_REG_VALID;
3039 int ret = intel_ring_begin(pipelined, 6);
3043 intel_ring_emit(pipelined, MI_NOOP);
3044 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
3045 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
3046 intel_ring_emit(pipelined, (u32)val);
3047 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
3048 intel_ring_emit(pipelined, (u32)(val >> 32));
3049 intel_ring_advance(pipelined);
3051 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
3057 i915_write_fence_reg(struct drm_i915_gem_object *obj,
3058 struct intel_ring_buffer *pipelined)
3060 struct drm_device *dev = obj->base.dev;
3061 drm_i915_private_t *dev_priv = dev->dev_private;
3062 u32 size = obj->gtt_space->size;
3063 u32 fence_reg, val, pitch_val;
3066 if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
3067 (size & -size) != size || (obj->gtt_offset & (size - 1))) {
3069 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3070 obj->gtt_offset, obj->map_and_fenceable, size);
3074 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3079 /* Note: pitch better be a power of two tile widths */
3080 pitch_val = obj->stride / tile_width;
3081 pitch_val = ffs(pitch_val) - 1;
3083 val = obj->gtt_offset;
3084 if (obj->tiling_mode == I915_TILING_Y)
3085 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3086 val |= I915_FENCE_SIZE_BITS(size);
3087 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3088 val |= I830_FENCE_REG_VALID;
3090 fence_reg = obj->fence_reg;
3092 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
3094 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
3097 int ret = intel_ring_begin(pipelined, 4);
3101 intel_ring_emit(pipelined, MI_NOOP);
3102 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
3103 intel_ring_emit(pipelined, fence_reg);
3104 intel_ring_emit(pipelined, val);
3105 intel_ring_advance(pipelined);
3107 I915_WRITE(fence_reg, val);
3113 i830_write_fence_reg(struct drm_i915_gem_object *obj,
3114 struct intel_ring_buffer *pipelined)
3116 struct drm_device *dev = obj->base.dev;
3117 drm_i915_private_t *dev_priv = dev->dev_private;
3118 u32 size = obj->gtt_space->size;
3119 int regnum = obj->fence_reg;
3123 if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
3124 (size & -size) != size || (obj->gtt_offset & (size - 1))) {
3126 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
3127 obj->gtt_offset, size);
3131 pitch_val = obj->stride / 128;
3132 pitch_val = ffs(pitch_val) - 1;
3134 val = obj->gtt_offset;
3135 if (obj->tiling_mode == I915_TILING_Y)
3136 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3137 val |= I830_FENCE_SIZE_BITS(size);
3138 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3139 val |= I830_FENCE_REG_VALID;
3142 int ret = intel_ring_begin(pipelined, 4);
3146 intel_ring_emit(pipelined, MI_NOOP);
3147 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
3148 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
3149 intel_ring_emit(pipelined, val);
3150 intel_ring_advance(pipelined);
3152 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
3157 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
3159 return i915_seqno_passed(ring->get_seqno(ring), seqno);
3163 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
3164 struct intel_ring_buffer *pipelined)
3168 if (obj->fenced_gpu_access) {
3169 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3170 ret = i915_gem_flush_ring(obj->last_fenced_ring, 0,
3171 obj->base.write_domain);
3176 obj->fenced_gpu_access = false;
3179 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
3180 if (!ring_passed_seqno(obj->last_fenced_ring,
3181 obj->last_fenced_seqno)) {
3182 ret = i915_wait_request(obj->last_fenced_ring,
3183 obj->last_fenced_seqno,
3189 obj->last_fenced_seqno = 0;
3190 obj->last_fenced_ring = NULL;
3193 /* Ensure that all CPU reads are completed before installing a fence
3194 * and all writes before removing the fence.
3196 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
3203 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3207 if (obj->tiling_mode)
3208 i915_gem_release_mmap(obj);
3210 ret = i915_gem_object_flush_fence(obj, NULL);
3214 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3215 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3217 if (dev_priv->fence_regs[obj->fence_reg].pin_count != 0)
3218 printf("%s: pin_count %d\n", __func__,
3219 dev_priv->fence_regs[obj->fence_reg].pin_count);
3220 i915_gem_clear_fence_reg(obj->base.dev,
3221 &dev_priv->fence_regs[obj->fence_reg]);
3223 obj->fence_reg = I915_FENCE_REG_NONE;
3229 static struct drm_i915_fence_reg *
3230 i915_find_fence_reg(struct drm_device *dev, struct intel_ring_buffer *pipelined)
3232 struct drm_i915_private *dev_priv = dev->dev_private;
3233 struct drm_i915_fence_reg *reg, *first, *avail;
3236 /* First try to find a free reg */
3238 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3239 reg = &dev_priv->fence_regs[i];
3243 if (!reg->pin_count)
3250 /* None available, try to steal one or wait for a user to finish */
3251 avail = first = NULL;
3252 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3260 !reg->obj->last_fenced_ring ||
3261 reg->obj->last_fenced_ring == pipelined) {
3274 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
3275 struct intel_ring_buffer *pipelined)
3277 struct drm_device *dev = obj->base.dev;
3278 struct drm_i915_private *dev_priv = dev->dev_private;
3279 struct drm_i915_fence_reg *reg;
3285 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3286 reg = &dev_priv->fence_regs[obj->fence_reg];
3287 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
3289 if (obj->tiling_changed) {
3290 ret = i915_gem_object_flush_fence(obj, pipelined);
3294 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
3299 i915_gem_next_request_seqno(pipelined);
3300 obj->last_fenced_seqno = reg->setup_seqno;
3301 obj->last_fenced_ring = pipelined;
3308 if (reg->setup_seqno) {
3309 if (!ring_passed_seqno(obj->last_fenced_ring,
3310 reg->setup_seqno)) {
3311 ret = i915_wait_request(
3312 obj->last_fenced_ring,
3319 reg->setup_seqno = 0;
3321 } else if (obj->last_fenced_ring &&
3322 obj->last_fenced_ring != pipelined) {
3323 ret = i915_gem_object_flush_fence(obj, pipelined);
3328 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
3330 KASSERT(pipelined || reg->setup_seqno == 0, ("!pipelined"));
3332 if (obj->tiling_changed) {
3335 i915_gem_next_request_seqno(pipelined);
3336 obj->last_fenced_seqno = reg->setup_seqno;
3337 obj->last_fenced_ring = pipelined;
3345 reg = i915_find_fence_reg(dev, pipelined);
3349 ret = i915_gem_object_flush_fence(obj, pipelined);
3354 struct drm_i915_gem_object *old = reg->obj;
3356 drm_gem_object_reference(&old->base);
3358 if (old->tiling_mode)
3359 i915_gem_release_mmap(old);
3361 ret = i915_gem_object_flush_fence(old, pipelined);
3363 drm_gem_object_unreference(&old->base);
3367 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
3370 old->fence_reg = I915_FENCE_REG_NONE;
3371 old->last_fenced_ring = pipelined;
3372 old->last_fenced_seqno =
3373 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
3375 drm_gem_object_unreference(&old->base);
3376 } else if (obj->last_fenced_seqno == 0)
3380 list_move_tail(®->lru_list, &dev_priv->mm.fence_list);
3381 obj->fence_reg = reg - dev_priv->fence_regs;
3382 obj->last_fenced_ring = pipelined;
3385 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
3386 obj->last_fenced_seqno = reg->setup_seqno;
3389 obj->tiling_changed = false;
3390 switch (INTEL_INFO(dev)->gen) {
3393 ret = sandybridge_write_fence_reg(obj, pipelined);
3397 ret = i965_write_fence_reg(obj, pipelined);
3400 ret = i915_write_fence_reg(obj, pipelined);
3403 ret = i830_write_fence_reg(obj, pipelined);
3411 i915_gem_clear_fence_reg(struct drm_device *dev, struct drm_i915_fence_reg *reg)
3413 drm_i915_private_t *dev_priv = dev->dev_private;
3414 uint32_t fence_reg = reg - dev_priv->fence_regs;
3416 switch (INTEL_INFO(dev)->gen) {
3419 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
3423 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
3427 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
3430 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
3432 I915_WRITE(fence_reg, 0);
3436 list_del_init(®->lru_list);
3438 reg->setup_seqno = 0;
3443 i915_gem_init_object(struct drm_gem_object *obj)
3446 printf("i915_gem_init_object called\n");
3451 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
3454 return (obj->gtt_space && !obj->active && obj->pin_count == 0);
3458 i915_gem_retire_task_handler(void *arg, int pending)
3460 drm_i915_private_t *dev_priv;
3461 struct drm_device *dev;
3466 dev = dev_priv->dev;
3468 /* Come back later if the device is busy... */
3469 if (!sx_try_xlock(&dev->dev_struct_lock)) {
3470 taskqueue_enqueue_timeout(dev_priv->tq,
3471 &dev_priv->mm.retire_task, hz);
3475 CTR0(KTR_DRM, "retire_task");
3477 i915_gem_retire_requests(dev);
3479 /* Send a periodic flush down the ring so we don't hold onto GEM
3480 * objects indefinitely.
3483 for (i = 0; i < I915_NUM_RINGS; i++) {
3484 struct intel_ring_buffer *ring = &dev_priv->rings[i];
3486 if (!list_empty(&ring->gpu_write_list)) {
3487 struct drm_i915_gem_request *request;
3490 ret = i915_gem_flush_ring(ring,
3491 0, I915_GEM_GPU_DOMAINS);
3492 request = malloc(sizeof(*request), DRM_I915_GEM,
3494 if (ret || request == NULL ||
3495 i915_add_request(ring, NULL, request))
3496 free(request, DRM_I915_GEM);
3499 idle &= list_empty(&ring->request_list);
3502 if (!dev_priv->mm.suspended && !idle)
3503 taskqueue_enqueue_timeout(dev_priv->tq,
3504 &dev_priv->mm.retire_task, hz);
3510 i915_gem_lastclose(struct drm_device *dev)
3514 if (drm_core_check_feature(dev, DRIVER_MODESET))
3517 ret = i915_gem_idle(dev);
3519 DRM_ERROR("failed to idle hardware: %d\n", ret);
3523 i915_gem_init_phys_object(struct drm_device *dev, int id, int size, int align)
3525 drm_i915_private_t *dev_priv;
3526 struct drm_i915_gem_phys_object *phys_obj;
3529 dev_priv = dev->dev_private;
3530 if (dev_priv->mm.phys_objs[id - 1] != NULL || size == 0)
3533 phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object), DRM_I915_GEM,
3538 phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
3539 if (phys_obj->handle == NULL) {
3543 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
3544 size / PAGE_SIZE, PAT_WRITE_COMBINING);
3546 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3551 free(phys_obj, DRM_I915_GEM);
3556 i915_gem_free_phys_object(struct drm_device *dev, int id)
3558 drm_i915_private_t *dev_priv;
3559 struct drm_i915_gem_phys_object *phys_obj;
3561 dev_priv = dev->dev_private;
3562 if (dev_priv->mm.phys_objs[id - 1] == NULL)
3565 phys_obj = dev_priv->mm.phys_objs[id - 1];
3566 if (phys_obj->cur_obj != NULL)
3567 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3569 drm_pci_free(dev, phys_obj->handle);
3570 free(phys_obj, DRM_I915_GEM);
3571 dev_priv->mm.phys_objs[id - 1] = NULL;
3575 i915_gem_free_all_phys_object(struct drm_device *dev)
3579 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3580 i915_gem_free_phys_object(dev, i);
3584 i915_gem_detach_phys_object(struct drm_device *dev,
3585 struct drm_i915_gem_object *obj)
3592 if (obj->phys_obj == NULL)
3594 vaddr = obj->phys_obj->handle->vaddr;
3596 page_count = obj->base.size / PAGE_SIZE;
3597 VM_OBJECT_WLOCK(obj->base.vm_obj);
3598 for (i = 0; i < page_count; i++) {
3599 m = i915_gem_wire_page(obj->base.vm_obj, i);
3603 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3604 sf = sf_buf_alloc(m, 0);
3606 dst = (char *)sf_buf_kva(sf);
3607 memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
3610 drm_clflush_pages(&m, 1);
3612 VM_OBJECT_WLOCK(obj->base.vm_obj);
3613 vm_page_reference(m);
3616 vm_page_unwire(m, PQ_INACTIVE);
3618 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
3620 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3621 intel_gtt_chipset_flush();
3623 obj->phys_obj->cur_obj = NULL;
3624 obj->phys_obj = NULL;
3628 i915_gem_attach_phys_object(struct drm_device *dev,
3629 struct drm_i915_gem_object *obj, int id, int align)
3631 drm_i915_private_t *dev_priv;
3635 int i, page_count, ret;
3637 if (id > I915_MAX_PHYS_OBJECT)
3640 if (obj->phys_obj != NULL) {
3641 if (obj->phys_obj->id == id)
3643 i915_gem_detach_phys_object(dev, obj);
3646 dev_priv = dev->dev_private;
3647 if (dev_priv->mm.phys_objs[id - 1] == NULL) {
3648 ret = i915_gem_init_phys_object(dev, id, obj->base.size, align);
3650 DRM_ERROR("failed to init phys object %d size: %zu\n",
3651 id, obj->base.size);
3656 /* bind to the object */
3657 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3658 obj->phys_obj->cur_obj = obj;
3660 page_count = obj->base.size / PAGE_SIZE;
3662 VM_OBJECT_WLOCK(obj->base.vm_obj);
3664 for (i = 0; i < page_count; i++) {
3665 m = i915_gem_wire_page(obj->base.vm_obj, i);
3670 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3671 sf = sf_buf_alloc(m, 0);
3672 src = (char *)sf_buf_kva(sf);
3673 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
3674 memcpy(dst, src, PAGE_SIZE);
3677 VM_OBJECT_WLOCK(obj->base.vm_obj);
3679 vm_page_reference(m);
3681 vm_page_unwire(m, PQ_INACTIVE);
3683 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
3685 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
3691 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj,
3692 uint64_t data_ptr, uint64_t offset, uint64_t size,
3693 struct drm_file *file_priv)
3695 char *user_data, *vaddr;
3698 vaddr = (char *)obj->phys_obj->handle->vaddr + offset;
3699 user_data = (char *)(uintptr_t)data_ptr;
3701 if (copyin_nofault(user_data, vaddr, size) != 0) {
3702 /* The physical object once assigned is fixed for the lifetime
3703 * of the obj, so we can safely drop the lock and continue
3707 ret = -copyin(user_data, vaddr, size);
3713 intel_gtt_chipset_flush();
3718 i915_gpu_is_active(struct drm_device *dev)
3720 drm_i915_private_t *dev_priv;
3722 dev_priv = dev->dev_private;
3723 return (!list_empty(&dev_priv->mm.flushing_list) ||
3724 !list_empty(&dev_priv->mm.active_list));
3728 i915_gem_lowmem(void *arg)
3730 struct drm_device *dev;
3731 struct drm_i915_private *dev_priv;
3732 struct drm_i915_gem_object *obj, *next;
3733 int cnt, cnt_fail, cnt_total;
3736 dev_priv = dev->dev_private;
3738 if (!sx_try_xlock(&dev->dev_struct_lock))
3741 CTR0(KTR_DRM, "gem_lowmem");
3744 /* first scan for clean buffers */
3745 i915_gem_retire_requests(dev);
3747 cnt_total = cnt_fail = cnt = 0;
3749 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
3751 if (i915_gem_object_is_purgeable(obj)) {
3752 if (i915_gem_object_unbind(obj) != 0)
3758 /* second pass, evict/count anything still on the inactive list */
3759 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
3761 if (i915_gem_object_unbind(obj) == 0)
3767 if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
3769 * We are desperate for pages, so as a last resort, wait
3770 * for the GPU to finish and discard whatever we can.
3771 * This has a dramatic impact to reduce the number of
3772 * OOM-killer events whilst running the GPU aggressively.
3774 if (i915_gpu_idle(dev, true) == 0)
3781 i915_gem_unload(struct drm_device *dev)
3783 struct drm_i915_private *dev_priv;
3785 dev_priv = dev->dev_private;
3786 EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);