2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <dev/drm2/drmP.h>
34 #include <dev/drm2/drm.h>
35 #include <dev/drm2/i915/i915_drm.h>
36 #include <dev/drm2/i915/i915_drv.h>
37 #include <dev/drm2/i915/intel_drv.h>
38 #include <dev/drm2/i915/intel_ringbuffer.h>
39 #include <sys/sched.h>
40 #include <sys/sf_buf.h>
43 * 965+ support PIPE_CONTROL commands, which provide finer grained control
44 * over cache flushing.
47 struct drm_i915_gem_object *obj;
48 volatile u32 *cpu_page;
53 i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno)
56 if (ring->trace_irq_seqno == 0) {
57 mtx_lock(&ring->irq_lock);
58 if (ring->irq_get(ring))
59 ring->trace_irq_seqno = seqno;
60 mtx_unlock(&ring->irq_lock);
64 static inline int ring_space(struct intel_ring_buffer *ring)
66 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
73 render_ring_flush(struct intel_ring_buffer *ring,
74 uint32_t invalidate_domains,
75 uint32_t flush_domains)
77 struct drm_device *dev = ring->dev;
84 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
85 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
86 * also flushed at 2d versus 3d pipeline switches.
90 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
91 * MI_READ_FLUSH is set, and is always flushed on 965.
93 * I915_GEM_DOMAIN_COMMAND may not exist?
95 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
96 * invalidated when MI_EXE_FLUSH is set.
98 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
99 * invalidated with every MI_FLUSH.
103 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
104 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
105 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
106 * are flushed at any MI_FLUSH.
109 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
110 if ((invalidate_domains|flush_domains) &
111 I915_GEM_DOMAIN_RENDER)
112 cmd &= ~MI_NO_WRITE_FLUSH;
113 if (INTEL_INFO(dev)->gen < 4) {
115 * On the 965, the sampler cache always gets flushed
116 * and this bit is reserved.
118 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
119 cmd |= MI_READ_FLUSH;
121 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
124 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
125 (IS_G4X(dev) || IS_GEN5(dev)))
126 cmd |= MI_INVALIDATE_ISP;
128 ret = intel_ring_begin(ring, 2);
132 intel_ring_emit(ring, cmd);
133 intel_ring_emit(ring, MI_NOOP);
134 intel_ring_advance(ring);
140 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
141 * implementing two workarounds on gen6. From section 1.4.7.1
142 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
144 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
145 * produced by non-pipelined state commands), software needs to first
146 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
149 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
150 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
152 * And the workaround for these two requires this workaround first:
154 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
155 * BEFORE the pipe-control with a post-sync op and no write-cache
158 * And this last workaround is tricky because of the requirements on
159 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
162 * "1 of the following must also be set:
163 * - Render Target Cache Flush Enable ([12] of DW1)
164 * - Depth Cache Flush Enable ([0] of DW1)
165 * - Stall at Pixel Scoreboard ([1] of DW1)
166 * - Depth Stall ([13] of DW1)
167 * - Post-Sync Operation ([13] of DW1)
168 * - Notify Enable ([8] of DW1)"
170 * The cache flushes require the workaround flush that triggered this
171 * one, so we can't use it. Depth stall would trigger the same.
172 * Post-sync nonzero is what triggered this second workaround, so we
173 * can't use that one either. Notify enable is IRQs, which aren't
174 * really our business. That leaves only stall at scoreboard.
177 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
179 struct pipe_control *pc = ring->private;
180 u32 scratch_addr = pc->gtt_offset + 128;
184 ret = intel_ring_begin(ring, 6);
188 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
189 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
190 PIPE_CONTROL_STALL_AT_SCOREBOARD);
191 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
192 intel_ring_emit(ring, 0); /* low dword */
193 intel_ring_emit(ring, 0); /* high dword */
194 intel_ring_emit(ring, MI_NOOP);
195 intel_ring_advance(ring);
197 ret = intel_ring_begin(ring, 6);
201 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
202 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
203 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
204 intel_ring_emit(ring, 0);
205 intel_ring_emit(ring, 0);
206 intel_ring_emit(ring, MI_NOOP);
207 intel_ring_advance(ring);
213 gen6_render_ring_flush(struct intel_ring_buffer *ring,
214 u32 invalidate_domains, u32 flush_domains)
217 struct pipe_control *pc = ring->private;
218 u32 scratch_addr = pc->gtt_offset + 128;
221 /* Force SNB workarounds for PIPE_CONTROL flushes */
222 intel_emit_post_sync_nonzero_flush(ring);
224 /* Just flush everything. Experiments have shown that reducing the
225 * number of bits based on the write domains has little performance
228 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
229 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
230 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
231 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
232 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
233 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
234 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
236 ret = intel_ring_begin(ring, 6);
240 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
241 intel_ring_emit(ring, flags);
242 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
243 intel_ring_emit(ring, 0); /* lower dword */
244 intel_ring_emit(ring, 0); /* uppwer dword */
245 intel_ring_emit(ring, MI_NOOP);
246 intel_ring_advance(ring);
251 static void ring_write_tail(struct intel_ring_buffer *ring,
254 drm_i915_private_t *dev_priv = ring->dev->dev_private;
255 I915_WRITE_TAIL(ring, value);
258 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
260 drm_i915_private_t *dev_priv = ring->dev->dev_private;
261 uint32_t acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
262 RING_ACTHD(ring->mmio_base) : ACTHD;
264 return I915_READ(acthd_reg);
267 static int init_ring_common(struct intel_ring_buffer *ring)
269 drm_i915_private_t *dev_priv = ring->dev->dev_private;
270 struct drm_i915_gem_object *obj = ring->obj;
273 /* Stop the ring if it's running. */
274 I915_WRITE_CTL(ring, 0);
275 I915_WRITE_HEAD(ring, 0);
276 ring->write_tail(ring, 0);
278 /* Initialize the ring. */
279 I915_WRITE_START(ring, obj->gtt_offset);
280 head = I915_READ_HEAD(ring) & HEAD_ADDR;
282 /* G45 ring initialization fails to reset head to zero */
284 DRM_DEBUG("%s head not reset to zero "
285 "ctl %08x head %08x tail %08x start %08x\n",
288 I915_READ_HEAD(ring),
289 I915_READ_TAIL(ring),
290 I915_READ_START(ring));
292 I915_WRITE_HEAD(ring, 0);
294 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
295 DRM_ERROR("failed to set %s head to zero "
296 "ctl %08x head %08x tail %08x start %08x\n",
299 I915_READ_HEAD(ring),
300 I915_READ_TAIL(ring),
301 I915_READ_START(ring));
306 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
309 /* If the head is still not zero, the ring is dead */
310 if (_intel_wait_for(ring->dev,
311 (I915_READ_CTL(ring) & RING_VALID) != 0 &&
312 I915_READ_START(ring) == obj->gtt_offset &&
313 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0,
315 DRM_ERROR("%s initialization failed "
316 "ctl %08x head %08x tail %08x start %08x\n",
319 I915_READ_HEAD(ring),
320 I915_READ_TAIL(ring),
321 I915_READ_START(ring));
325 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
326 i915_kernel_lost_context(ring->dev);
328 ring->head = I915_READ_HEAD(ring);
329 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
330 ring->space = ring_space(ring);
337 init_pipe_control(struct intel_ring_buffer *ring)
339 struct pipe_control *pc;
340 struct drm_i915_gem_object *obj;
346 pc = malloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK);
350 obj = i915_gem_alloc_object(ring->dev, 4096);
352 DRM_ERROR("Failed to allocate seqno page\n");
357 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
359 ret = i915_gem_object_pin(obj, 4096, true);
363 pc->gtt_offset = obj->gtt_offset;
364 pc->cpu_page = (uint32_t *)kmem_alloc_nofault(kernel_map, PAGE_SIZE);
365 if (pc->cpu_page == NULL)
367 pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
368 pmap_invalidate_range(kernel_pmap, (vm_offset_t)pc->cpu_page,
369 (vm_offset_t)pc->cpu_page + PAGE_SIZE);
370 pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
371 (vm_offset_t)pc->cpu_page + PAGE_SIZE);
378 i915_gem_object_unpin(obj);
380 drm_gem_object_unreference(&obj->base);
382 free(pc, DRM_I915_GEM);
387 cleanup_pipe_control(struct intel_ring_buffer *ring)
389 struct pipe_control *pc = ring->private;
390 struct drm_i915_gem_object *obj;
396 pmap_qremove((vm_offset_t)pc->cpu_page, 1);
397 pmap_invalidate_range(kernel_pmap, (vm_offset_t)pc->cpu_page,
398 (vm_offset_t)pc->cpu_page + PAGE_SIZE);
399 kmem_free(kernel_map, (uintptr_t)pc->cpu_page, PAGE_SIZE);
400 i915_gem_object_unpin(obj);
401 drm_gem_object_unreference(&obj->base);
403 free(pc, DRM_I915_GEM);
404 ring->private = NULL;
407 static int init_render_ring(struct intel_ring_buffer *ring)
409 struct drm_device *dev = ring->dev;
410 struct drm_i915_private *dev_priv = dev->dev_private;
411 int ret = init_ring_common(ring);
413 if (INTEL_INFO(dev)->gen > 3) {
414 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
415 I915_WRITE(MI_MODE, mode);
417 I915_WRITE(GFX_MODE_GEN7,
418 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
419 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
422 if (INTEL_INFO(dev)->gen >= 5) {
423 ret = init_pipe_control(ring);
430 /* From the Sandybridge PRM, volume 1 part 3, page 24:
431 * "If this bit is set, STCunit will have LRA as replacement
432 * policy. [...] This bit must be reset. LRA replacement
433 * policy is not supported."
435 I915_WRITE(CACHE_MODE_0,
436 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
439 if (INTEL_INFO(dev)->gen >= 6) {
441 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
447 static void render_ring_cleanup(struct intel_ring_buffer *ring)
452 cleanup_pipe_control(ring);
456 update_mboxes(struct intel_ring_buffer *ring,
460 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
461 MI_SEMAPHORE_GLOBAL_GTT |
462 MI_SEMAPHORE_REGISTER |
463 MI_SEMAPHORE_UPDATE);
464 intel_ring_emit(ring, seqno);
465 intel_ring_emit(ring, mmio_offset);
469 * gen6_add_request - Update the semaphore mailbox registers
471 * @ring - ring that is adding a request
472 * @seqno - return seqno stuck into the ring
474 * Update the mailbox registers in the *other* rings with the current seqno.
475 * This acts like a signal in the canonical semaphore.
478 gen6_add_request(struct intel_ring_buffer *ring,
485 ret = intel_ring_begin(ring, 10);
489 mbox1_reg = ring->signal_mbox[0];
490 mbox2_reg = ring->signal_mbox[1];
492 *seqno = i915_gem_next_request_seqno(ring);
494 update_mboxes(ring, *seqno, mbox1_reg);
495 update_mboxes(ring, *seqno, mbox2_reg);
496 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
497 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
498 intel_ring_emit(ring, *seqno);
499 intel_ring_emit(ring, MI_USER_INTERRUPT);
500 intel_ring_advance(ring);
506 * intel_ring_sync - sync the waiter to the signaller on seqno
508 * @waiter - ring that is waiting
509 * @signaller - ring which has, or will signal
510 * @seqno - seqno which the waiter will block on
513 intel_ring_sync(struct intel_ring_buffer *waiter,
514 struct intel_ring_buffer *signaller,
519 u32 dw1 = MI_SEMAPHORE_MBOX |
520 MI_SEMAPHORE_COMPARE |
521 MI_SEMAPHORE_REGISTER;
523 ret = intel_ring_begin(waiter, 4);
527 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
528 intel_ring_emit(waiter, seqno);
529 intel_ring_emit(waiter, 0);
530 intel_ring_emit(waiter, MI_NOOP);
531 intel_ring_advance(waiter);
536 int render_ring_sync_to(struct intel_ring_buffer *waiter,
537 struct intel_ring_buffer *signaller, u32 seqno);
538 int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
539 struct intel_ring_buffer *signaller, u32 seqno);
540 int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
541 struct intel_ring_buffer *signaller, u32 seqno);
543 /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
545 render_ring_sync_to(struct intel_ring_buffer *waiter,
546 struct intel_ring_buffer *signaller,
549 KASSERT(signaller->semaphore_register[RCS] != MI_SEMAPHORE_SYNC_INVALID,
550 ("valid RCS semaphore"));
551 return intel_ring_sync(waiter,
557 /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
559 gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
560 struct intel_ring_buffer *signaller,
563 KASSERT(signaller->semaphore_register[VCS] != MI_SEMAPHORE_SYNC_INVALID,
564 ("Valid VCS semaphore"));
565 return intel_ring_sync(waiter,
571 /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
573 gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
574 struct intel_ring_buffer *signaller,
577 KASSERT(signaller->semaphore_register[BCS] != MI_SEMAPHORE_SYNC_INVALID,
578 ("Valid BCS semaphore"));
579 return intel_ring_sync(waiter,
585 #define PIPE_CONTROL_FLUSH(ring__, addr__) \
587 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
588 PIPE_CONTROL_DEPTH_STALL); \
589 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
590 intel_ring_emit(ring__, 0); \
591 intel_ring_emit(ring__, 0); \
595 pc_render_add_request(struct intel_ring_buffer *ring,
598 u32 seqno = i915_gem_next_request_seqno(ring);
599 struct pipe_control *pc = ring->private;
600 u32 scratch_addr = pc->gtt_offset + 128;
603 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
604 * incoherent with writes to memory, i.e. completely fubar,
605 * so we need to use PIPE_NOTIFY instead.
607 * However, we also need to workaround the qword write
608 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
609 * memory before requesting an interrupt.
611 ret = intel_ring_begin(ring, 32);
615 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
616 PIPE_CONTROL_WRITE_FLUSH |
617 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
618 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
619 intel_ring_emit(ring, seqno);
620 intel_ring_emit(ring, 0);
621 PIPE_CONTROL_FLUSH(ring, scratch_addr);
622 scratch_addr += 128; /* write to separate cachelines */
623 PIPE_CONTROL_FLUSH(ring, scratch_addr);
625 PIPE_CONTROL_FLUSH(ring, scratch_addr);
627 PIPE_CONTROL_FLUSH(ring, scratch_addr);
629 PIPE_CONTROL_FLUSH(ring, scratch_addr);
631 PIPE_CONTROL_FLUSH(ring, scratch_addr);
632 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
633 PIPE_CONTROL_WRITE_FLUSH |
634 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
635 PIPE_CONTROL_NOTIFY);
636 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
637 intel_ring_emit(ring, seqno);
638 intel_ring_emit(ring, 0);
639 intel_ring_advance(ring);
646 render_ring_add_request(struct intel_ring_buffer *ring,
649 u32 seqno = i915_gem_next_request_seqno(ring);
652 ret = intel_ring_begin(ring, 4);
656 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
657 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
658 intel_ring_emit(ring, seqno);
659 intel_ring_emit(ring, MI_USER_INTERRUPT);
660 intel_ring_advance(ring);
667 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
669 struct drm_device *dev = ring->dev;
671 /* Workaround to force correct ordering between irq and seqno writes on
672 * ivb (and maybe also on snb) by reading from a CS register (like
673 * ACTHD) before reading the status page. */
674 if (/* IS_GEN6(dev) || */IS_GEN7(dev))
675 intel_ring_get_active_head(ring);
676 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
680 ring_get_seqno(struct intel_ring_buffer *ring)
682 if (ring->status_page.page_addr == NULL)
684 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
688 pc_render_get_seqno(struct intel_ring_buffer *ring)
690 struct pipe_control *pc = ring->private;
692 return pc->cpu_page[0];
698 ironlake_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
700 dev_priv->gt_irq_mask &= ~mask;
701 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
706 ironlake_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
708 dev_priv->gt_irq_mask |= mask;
709 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
714 i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
716 dev_priv->irq_mask &= ~mask;
717 I915_WRITE(IMR, dev_priv->irq_mask);
722 i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
724 dev_priv->irq_mask |= mask;
725 I915_WRITE(IMR, dev_priv->irq_mask);
730 render_ring_get_irq(struct intel_ring_buffer *ring)
732 struct drm_device *dev = ring->dev;
733 drm_i915_private_t *dev_priv = dev->dev_private;
735 if (!dev->irq_enabled)
738 mtx_assert(&ring->irq_lock, MA_OWNED);
739 if (ring->irq_refcount++ == 0) {
740 if (HAS_PCH_SPLIT(dev))
741 ironlake_enable_irq(dev_priv,
742 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
744 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
751 render_ring_put_irq(struct intel_ring_buffer *ring)
753 struct drm_device *dev = ring->dev;
754 drm_i915_private_t *dev_priv = dev->dev_private;
756 mtx_assert(&ring->irq_lock, MA_OWNED);
757 if (--ring->irq_refcount == 0) {
758 if (HAS_PCH_SPLIT(dev))
759 ironlake_disable_irq(dev_priv,
763 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
767 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
769 struct drm_device *dev = ring->dev;
770 drm_i915_private_t *dev_priv = dev->dev_private;
773 /* The ring status page addresses are no longer next to the rest of
774 * the ring registers as of gen7.
779 mmio = RENDER_HWS_PGA_GEN7;
782 mmio = BLT_HWS_PGA_GEN7;
785 mmio = BSD_HWS_PGA_GEN7;
788 } else if (IS_GEN6(dev)) {
789 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
791 mmio = RING_HWS_PGA(ring->mmio_base);
794 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
799 bsd_ring_flush(struct intel_ring_buffer *ring,
800 uint32_t invalidate_domains,
801 uint32_t flush_domains)
805 ret = intel_ring_begin(ring, 2);
809 intel_ring_emit(ring, MI_FLUSH);
810 intel_ring_emit(ring, MI_NOOP);
811 intel_ring_advance(ring);
816 ring_add_request(struct intel_ring_buffer *ring,
822 ret = intel_ring_begin(ring, 4);
826 seqno = i915_gem_next_request_seqno(ring);
828 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
829 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
830 intel_ring_emit(ring, seqno);
831 intel_ring_emit(ring, MI_USER_INTERRUPT);
832 intel_ring_advance(ring);
839 gen6_ring_get_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
841 struct drm_device *dev = ring->dev;
842 drm_i915_private_t *dev_priv = dev->dev_private;
844 if (!dev->irq_enabled)
847 gen6_gt_force_wake_get(dev_priv);
849 mtx_assert(&ring->irq_lock, MA_OWNED);
850 if (ring->irq_refcount++ == 0) {
851 ring->irq_mask &= ~rflag;
852 I915_WRITE_IMR(ring, ring->irq_mask);
853 ironlake_enable_irq(dev_priv, gflag);
860 gen6_ring_put_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
862 struct drm_device *dev = ring->dev;
863 drm_i915_private_t *dev_priv = dev->dev_private;
865 mtx_assert(&ring->irq_lock, MA_OWNED);
866 if (--ring->irq_refcount == 0) {
867 ring->irq_mask |= rflag;
868 I915_WRITE_IMR(ring, ring->irq_mask);
869 ironlake_disable_irq(dev_priv, gflag);
872 gen6_gt_force_wake_put(dev_priv);
876 bsd_ring_get_irq(struct intel_ring_buffer *ring)
878 struct drm_device *dev = ring->dev;
879 drm_i915_private_t *dev_priv = dev->dev_private;
881 if (!dev->irq_enabled)
884 mtx_assert(&ring->irq_lock, MA_OWNED);
885 if (ring->irq_refcount++ == 0) {
887 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
889 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
895 bsd_ring_put_irq(struct intel_ring_buffer *ring)
897 struct drm_device *dev = ring->dev;
898 drm_i915_private_t *dev_priv = dev->dev_private;
900 mtx_assert(&ring->irq_lock, MA_OWNED);
901 if (--ring->irq_refcount == 0) {
903 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
905 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
910 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint32_t offset,
915 ret = intel_ring_begin(ring, 2);
919 intel_ring_emit(ring,
920 MI_BATCH_BUFFER_START | (2 << 6) |
921 MI_BATCH_NON_SECURE_I965);
922 intel_ring_emit(ring, offset);
923 intel_ring_advance(ring);
929 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
930 uint32_t offset, uint32_t len)
932 struct drm_device *dev = ring->dev;
935 if (IS_I830(dev) || IS_845G(dev)) {
936 ret = intel_ring_begin(ring, 4);
940 intel_ring_emit(ring, MI_BATCH_BUFFER);
941 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
942 intel_ring_emit(ring, offset + len - 8);
943 intel_ring_emit(ring, 0);
945 ret = intel_ring_begin(ring, 2);
949 if (INTEL_INFO(dev)->gen >= 4) {
950 intel_ring_emit(ring,
951 MI_BATCH_BUFFER_START | (2 << 6) |
952 MI_BATCH_NON_SECURE_I965);
953 intel_ring_emit(ring, offset);
955 intel_ring_emit(ring,
956 MI_BATCH_BUFFER_START | (2 << 6));
957 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
960 intel_ring_advance(ring);
965 static void cleanup_status_page(struct intel_ring_buffer *ring)
967 drm_i915_private_t *dev_priv = ring->dev->dev_private;
968 struct drm_i915_gem_object *obj;
970 obj = ring->status_page.obj;
974 pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
975 pmap_invalidate_range(kernel_pmap,
976 (vm_offset_t)ring->status_page.page_addr,
977 (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
978 kmem_free(kernel_map, (vm_offset_t)ring->status_page.page_addr,
980 i915_gem_object_unpin(obj);
981 drm_gem_object_unreference(&obj->base);
982 ring->status_page.obj = NULL;
984 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
987 static int init_status_page(struct intel_ring_buffer *ring)
989 struct drm_device *dev = ring->dev;
990 drm_i915_private_t *dev_priv = dev->dev_private;
991 struct drm_i915_gem_object *obj;
994 obj = i915_gem_alloc_object(dev, 4096);
996 DRM_ERROR("Failed to allocate status page\n");
1001 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1003 ret = i915_gem_object_pin(obj, 4096, true);
1008 ring->status_page.gfx_addr = obj->gtt_offset;
1009 ring->status_page.page_addr = (void *)kmem_alloc_nofault(kernel_map,
1011 if (ring->status_page.page_addr == NULL) {
1012 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
1015 pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
1017 pmap_invalidate_range(kernel_pmap,
1018 (vm_offset_t)ring->status_page.page_addr,
1019 (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
1020 pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
1021 (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
1022 ring->status_page.obj = obj;
1023 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1025 intel_ring_setup_status_page(ring);
1026 DRM_DEBUG("i915: init_status_page %s hws offset: 0x%08x\n",
1027 ring->name, ring->status_page.gfx_addr);
1032 i915_gem_object_unpin(obj);
1034 drm_gem_object_unreference(&obj->base);
1040 int intel_init_ring_buffer(struct drm_device *dev,
1041 struct intel_ring_buffer *ring)
1043 struct drm_i915_gem_object *obj;
1047 INIT_LIST_HEAD(&ring->active_list);
1048 INIT_LIST_HEAD(&ring->request_list);
1049 INIT_LIST_HEAD(&ring->gpu_write_list);
1051 mtx_init(&ring->irq_lock, "ringb", NULL, MTX_DEF);
1052 ring->irq_mask = ~0;
1054 if (I915_NEED_GFX_HWS(dev)) {
1055 ret = init_status_page(ring);
1060 obj = i915_gem_alloc_object(dev, ring->size);
1062 DRM_ERROR("Failed to allocate ringbuffer\n");
1069 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1073 ring->map.size = ring->size;
1074 ring->map.offset = dev->agp->base + obj->gtt_offset;
1076 ring->map.flags = 0;
1079 drm_core_ioremap_wc(&ring->map, dev);
1080 if (ring->map.virtual == NULL) {
1081 DRM_ERROR("Failed to map ringbuffer.\n");
1086 ring->virtual_start = ring->map.virtual;
1087 ret = ring->init(ring);
1091 /* Workaround an erratum on the i830 which causes a hang if
1092 * the TAIL pointer points to within the last 2 cachelines
1095 ring->effective_size = ring->size;
1096 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1097 ring->effective_size -= 128;
1102 drm_core_ioremapfree(&ring->map, dev);
1104 i915_gem_object_unpin(obj);
1106 drm_gem_object_unreference(&obj->base);
1109 cleanup_status_page(ring);
1113 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1115 struct drm_i915_private *dev_priv;
1118 if (ring->obj == NULL)
1121 /* Disable the ring buffer. The ring must be idle at this point */
1122 dev_priv = ring->dev->dev_private;
1123 ret = intel_wait_ring_idle(ring);
1124 I915_WRITE_CTL(ring, 0);
1126 drm_core_ioremapfree(&ring->map, ring->dev);
1128 i915_gem_object_unpin(ring->obj);
1129 drm_gem_object_unreference(&ring->obj->base);
1133 ring->cleanup(ring);
1135 cleanup_status_page(ring);
1138 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1141 int rem = ring->size - ring->tail;
1143 if (ring->space < rem) {
1144 int ret = intel_wait_ring_buffer(ring, rem);
1149 virt = (unsigned int *)((char *)ring->virtual_start + ring->tail);
1157 ring->space = ring_space(ring);
1162 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1164 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1165 bool was_interruptible;
1168 /* XXX As we have not yet audited all the paths to check that
1169 * they are ready for ERESTARTSYS from intel_ring_begin, do not
1170 * allow us to be interruptible by a signal.
1172 was_interruptible = dev_priv->mm.interruptible;
1173 dev_priv->mm.interruptible = false;
1175 ret = i915_wait_request(ring, seqno, true);
1177 dev_priv->mm.interruptible = was_interruptible;
1182 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1184 struct drm_i915_gem_request *request;
1188 i915_gem_retire_requests_ring(ring);
1190 if (ring->last_retired_head != -1) {
1191 ring->head = ring->last_retired_head;
1192 ring->last_retired_head = -1;
1193 ring->space = ring_space(ring);
1194 if (ring->space >= n)
1198 list_for_each_entry(request, &ring->request_list, list) {
1201 if (request->tail == -1)
1204 space = request->tail - (ring->tail + 8);
1206 space += ring->size;
1208 seqno = request->seqno;
1212 /* Consume this request in case we need more space than
1213 * is available and so need to prevent a race between
1214 * updating last_retired_head and direct reads of
1215 * I915_RING_HEAD. It also provides a nice sanity check.
1223 ret = intel_ring_wait_seqno(ring, seqno);
1227 if (ring->last_retired_head == -1)
1230 ring->head = ring->last_retired_head;
1231 ring->last_retired_head = -1;
1232 ring->space = ring_space(ring);
1233 if (ring->space < n)
1239 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1241 struct drm_device *dev = ring->dev;
1242 struct drm_i915_private *dev_priv = dev->dev_private;
1246 ret = intel_ring_wait_request(ring, n);
1250 CTR1(KTR_DRM, "ring_wait_begin %s", ring->name);
1251 if (drm_core_check_feature(dev, DRIVER_GEM))
1252 /* With GEM the hangcheck timer should kick us out of the loop,
1253 * leaving it early runs the risk of corrupting GEM state (due
1254 * to running on almost untested codepaths). But on resume
1255 * timers don't work yet, so prevent a complete hang in that
1256 * case by choosing an insanely large timeout. */
1257 end = ticks + hz * 60;
1259 end = ticks + hz * 3;
1261 ring->head = I915_READ_HEAD(ring);
1262 ring->space = ring_space(ring);
1263 if (ring->space >= n) {
1264 CTR1(KTR_DRM, "ring_wait_end %s", ring->name);
1269 if (dev->primary->master) {
1270 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1271 if (master_priv->sarea_priv)
1272 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1275 if (dev_priv->sarea_priv)
1276 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1280 if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {
1281 CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name);
1284 } while (!time_after(ticks, end));
1285 CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name);
1289 int intel_ring_begin(struct intel_ring_buffer *ring,
1292 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1293 int n = 4*num_dwords;
1296 if (atomic_load_acq_int(&dev_priv->mm.wedged))
1299 if (ring->tail + n > ring->effective_size) {
1300 ret = intel_wrap_ring_buffer(ring);
1305 if (ring->space < n) {
1306 ret = intel_wait_ring_buffer(ring, n);
1315 void intel_ring_advance(struct intel_ring_buffer *ring)
1317 ring->tail &= ring->size - 1;
1318 ring->write_tail(ring, ring->tail);
1321 static const struct intel_ring_buffer render_ring = {
1322 .name = "render ring",
1324 .mmio_base = RENDER_RING_BASE,
1325 .size = 32 * PAGE_SIZE,
1326 .init = init_render_ring,
1327 .write_tail = ring_write_tail,
1328 .flush = render_ring_flush,
1329 .add_request = render_ring_add_request,
1330 .get_seqno = ring_get_seqno,
1331 .irq_get = render_ring_get_irq,
1332 .irq_put = render_ring_put_irq,
1333 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1334 .cleanup = render_ring_cleanup,
1335 .sync_to = render_ring_sync_to,
1336 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1337 MI_SEMAPHORE_SYNC_RV,
1338 MI_SEMAPHORE_SYNC_RB},
1339 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
1342 /* ring buffer for bit-stream decoder */
1344 static const struct intel_ring_buffer bsd_ring = {
1347 .mmio_base = BSD_RING_BASE,
1348 .size = 32 * PAGE_SIZE,
1349 .init = init_ring_common,
1350 .write_tail = ring_write_tail,
1351 .flush = bsd_ring_flush,
1352 .add_request = ring_add_request,
1353 .get_seqno = ring_get_seqno,
1354 .irq_get = bsd_ring_get_irq,
1355 .irq_put = bsd_ring_put_irq,
1356 .dispatch_execbuffer = ring_dispatch_execbuffer,
1360 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1363 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1365 /* Every tail move must follow the sequence below */
1366 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1367 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1368 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1369 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1371 if (_intel_wait_for(ring->dev,
1372 (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1373 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50,
1374 true, "915g6i") != 0)
1375 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1377 I915_WRITE_TAIL(ring, value);
1378 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1379 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1380 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1383 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1384 uint32_t invalidate, uint32_t flush)
1389 ret = intel_ring_begin(ring, 4);
1394 if (invalidate & I915_GEM_GPU_DOMAINS)
1395 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1396 intel_ring_emit(ring, cmd);
1397 intel_ring_emit(ring, 0);
1398 intel_ring_emit(ring, 0);
1399 intel_ring_emit(ring, MI_NOOP);
1400 intel_ring_advance(ring);
1405 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1406 uint32_t offset, uint32_t len)
1410 ret = intel_ring_begin(ring, 2);
1414 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1415 /* bit0-7 is the length on GEN6+ */
1416 intel_ring_emit(ring, offset);
1417 intel_ring_advance(ring);
1423 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1425 return gen6_ring_get_irq(ring,
1427 GEN6_RENDER_USER_INTERRUPT);
1431 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1433 return gen6_ring_put_irq(ring,
1435 GEN6_RENDER_USER_INTERRUPT);
1439 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1441 return gen6_ring_get_irq(ring,
1442 GT_GEN6_BSD_USER_INTERRUPT,
1443 GEN6_BSD_USER_INTERRUPT);
1447 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1449 return gen6_ring_put_irq(ring,
1450 GT_GEN6_BSD_USER_INTERRUPT,
1451 GEN6_BSD_USER_INTERRUPT);
1454 /* ring buffer for Video Codec for Gen6+ */
1455 static const struct intel_ring_buffer gen6_bsd_ring = {
1456 .name = "gen6 bsd ring",
1458 .mmio_base = GEN6_BSD_RING_BASE,
1459 .size = 32 * PAGE_SIZE,
1460 .init = init_ring_common,
1461 .write_tail = gen6_bsd_ring_write_tail,
1462 .flush = gen6_ring_flush,
1463 .add_request = gen6_add_request,
1464 .get_seqno = gen6_ring_get_seqno,
1465 .irq_get = gen6_bsd_ring_get_irq,
1466 .irq_put = gen6_bsd_ring_put_irq,
1467 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1468 .sync_to = gen6_bsd_ring_sync_to,
1469 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1470 MI_SEMAPHORE_SYNC_INVALID,
1471 MI_SEMAPHORE_SYNC_VB},
1472 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
1475 /* Blitter support (SandyBridge+) */
1478 blt_ring_get_irq(struct intel_ring_buffer *ring)
1480 return gen6_ring_get_irq(ring,
1481 GT_BLT_USER_INTERRUPT,
1482 GEN6_BLITTER_USER_INTERRUPT);
1486 blt_ring_put_irq(struct intel_ring_buffer *ring)
1488 gen6_ring_put_irq(ring,
1489 GT_BLT_USER_INTERRUPT,
1490 GEN6_BLITTER_USER_INTERRUPT);
1493 static int blt_ring_flush(struct intel_ring_buffer *ring,
1494 uint32_t invalidate, uint32_t flush)
1499 ret = intel_ring_begin(ring, 4);
1504 if (invalidate & I915_GEM_DOMAIN_RENDER)
1505 cmd |= MI_INVALIDATE_TLB;
1506 intel_ring_emit(ring, cmd);
1507 intel_ring_emit(ring, 0);
1508 intel_ring_emit(ring, 0);
1509 intel_ring_emit(ring, MI_NOOP);
1510 intel_ring_advance(ring);
1514 static const struct intel_ring_buffer gen6_blt_ring = {
1517 .mmio_base = BLT_RING_BASE,
1518 .size = 32 * PAGE_SIZE,
1519 .init = init_ring_common,
1520 .write_tail = ring_write_tail,
1521 .flush = blt_ring_flush,
1522 .add_request = gen6_add_request,
1523 .get_seqno = gen6_ring_get_seqno,
1524 .irq_get = blt_ring_get_irq,
1525 .irq_put = blt_ring_put_irq,
1526 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1527 .sync_to = gen6_blt_ring_sync_to,
1528 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1529 MI_SEMAPHORE_SYNC_BV,
1530 MI_SEMAPHORE_SYNC_INVALID},
1531 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
1534 int intel_init_render_ring_buffer(struct drm_device *dev)
1536 drm_i915_private_t *dev_priv = dev->dev_private;
1537 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1539 *ring = render_ring;
1540 if (INTEL_INFO(dev)->gen >= 6) {
1541 ring->add_request = gen6_add_request;
1542 ring->flush = gen6_render_ring_flush;
1543 ring->irq_get = gen6_render_ring_get_irq;
1544 ring->irq_put = gen6_render_ring_put_irq;
1545 ring->get_seqno = gen6_ring_get_seqno;
1546 } else if (IS_GEN5(dev)) {
1547 ring->add_request = pc_render_add_request;
1548 ring->get_seqno = pc_render_get_seqno;
1551 if (!I915_NEED_GFX_HWS(dev)) {
1552 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1553 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1556 return intel_init_ring_buffer(dev, ring);
1559 int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
1562 drm_i915_private_t *dev_priv = dev->dev_private;
1563 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1565 *ring = render_ring;
1566 if (INTEL_INFO(dev)->gen >= 6) {
1567 ring->add_request = gen6_add_request;
1568 ring->irq_get = gen6_render_ring_get_irq;
1569 ring->irq_put = gen6_render_ring_put_irq;
1570 } else if (IS_GEN5(dev)) {
1571 ring->add_request = pc_render_add_request;
1572 ring->get_seqno = pc_render_get_seqno;
1576 INIT_LIST_HEAD(&ring->active_list);
1577 INIT_LIST_HEAD(&ring->request_list);
1578 INIT_LIST_HEAD(&ring->gpu_write_list);
1581 ring->effective_size = ring->size;
1582 if (IS_I830(ring->dev))
1583 ring->effective_size -= 128;
1585 ring->map.offset = start;
1586 ring->map.size = size;
1588 ring->map.flags = 0;
1591 drm_core_ioremap_wc(&ring->map, dev);
1592 if (ring->map.virtual == NULL) {
1593 DRM_ERROR("can not ioremap virtual address for"
1598 ring->virtual_start = (void *)ring->map.virtual;
1602 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1604 drm_i915_private_t *dev_priv = dev->dev_private;
1605 struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
1607 if (IS_GEN6(dev) || IS_GEN7(dev))
1608 *ring = gen6_bsd_ring;
1612 return intel_init_ring_buffer(dev, ring);
1615 int intel_init_blt_ring_buffer(struct drm_device *dev)
1617 drm_i915_private_t *dev_priv = dev->dev_private;
1618 struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
1620 *ring = gen6_blt_ring;
1622 return intel_init_ring_buffer(dev, ring);