1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
8 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
9 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
10 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
12 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
13 * cacheline, the Head Pointer must not be greater than the Tail
16 #define I915_RING_FREE_SPACE 64
18 struct intel_hw_status_page {
20 unsigned int gfx_addr;
21 struct drm_i915_gem_object *obj;
24 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
25 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
27 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
28 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
30 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
31 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
33 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
34 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
36 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
37 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
39 #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
40 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
41 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
43 struct intel_ring_buffer {
50 #define I915_NUM_RINGS 3
52 void __iomem *virtual_start;
53 struct drm_device *dev;
54 struct drm_i915_gem_object *obj;
61 struct intel_hw_status_page status_page;
63 /** We track the position of the requests in the ring buffer, and
64 * when each is retired we increment last_retired_head as the GPU
65 * must have finished processing the request and so we know we
66 * can advance the ringbuffer up to that position.
68 * last_retired_head is set to -1 after the value is consumed so
69 * we can detect new retirements.
71 u32 last_retired_head;
73 u32 irq_refcount; /* protected by dev_priv->irq_lock */
74 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
76 u32 sync_seqno[I915_NUM_RINGS-1];
77 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
78 void (*irq_put)(struct intel_ring_buffer *ring);
80 int (*init)(struct intel_ring_buffer *ring);
82 void (*write_tail)(struct intel_ring_buffer *ring,
84 int __must_check (*flush)(struct intel_ring_buffer *ring,
85 u32 invalidate_domains,
87 int (*add_request)(struct intel_ring_buffer *ring);
88 /* Some chipsets are not quite as coherent as advertised and need
89 * an expensive kick to force a true read of the up-to-date seqno.
90 * However, the up-to-date seqno is not always required and the last
91 * seen value is good enough. Note that the seqno will always be
92 * monotonic, even if not coherent.
94 u32 (*get_seqno)(struct intel_ring_buffer *ring,
96 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
97 u32 offset, u32 length,
99 #define I915_DISPATCH_SECURE 0x1
100 #define I915_DISPATCH_PINNED 0x2
101 void (*cleanup)(struct intel_ring_buffer *ring);
102 int (*sync_to)(struct intel_ring_buffer *ring,
103 struct intel_ring_buffer *to,
106 u32 semaphore_register[3]; /*our mbox written by others */
107 u32 signal_mbox[2]; /* mboxes this ring signals to */
109 * List of objects currently involved in rendering from the
112 * Includes buffers having the contents of their GPU caches
113 * flushed, not necessarily primitives. last_rendering_seqno
114 * represents when the rendering involved will be completed.
116 * A reference is held on the buffer while on this list.
118 struct list_head active_list;
121 * List of breadcrumbs associated with GPU requests currently
124 struct list_head request_list;
127 * Do we have some not yet emitted requests outstanding?
129 u32 outstanding_lazy_request;
130 bool gpu_caches_dirty;
132 wait_queue_head_t irq_queue;
135 * Do an explicit TLB flush before MI_SET_CONTEXT
137 bool itlb_before_ctx_switch;
138 struct i915_hw_context *default_context;
139 struct drm_i915_gem_object *last_context_obj;
145 intel_ring_initialized(struct intel_ring_buffer *ring)
147 return ring->obj != NULL;
150 static inline unsigned
151 intel_ring_flag(struct intel_ring_buffer *ring)
153 return 1 << ring->id;
157 intel_ring_sync_index(struct intel_ring_buffer *ring,
158 struct intel_ring_buffer *other)
163 * cs -> 0 = vcs, 1 = bcs
164 * vcs -> 0 = bcs, 1 = cs,
165 * bcs -> 0 = cs, 1 = vcs.
168 idx = (other - ring) - 1;
170 idx += I915_NUM_RINGS;
176 intel_read_status_page(struct intel_ring_buffer *ring,
179 /* Ensure that the compiler doesn't optimize away the load. */
181 return atomic_load_acq_32(ring->status_page.page_addr + reg);
185 * Reads a dword out of the status page, which is written to from the command
186 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
189 * The following dwords have a reserved meaning:
190 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
191 * 0x04: ring 0 head pointer
192 * 0x05: ring 1 head pointer (915-class)
193 * 0x06: ring 2 head pointer (915-class)
194 * 0x10-0x1b: Context status DWords (GM45)
195 * 0x1f: Last written status offset. (GM45)
197 * The area from dword 0x20 to 0x3ff is available for driver usage.
199 #define I915_GEM_HWS_INDEX 0x20
200 #define I915_GEM_HWS_SCRATCH_INDEX 0x30
201 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
203 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
205 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
206 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
209 iowrite32(data, ring->virtual_start + ring->tail);
212 void intel_ring_advance(struct intel_ring_buffer *ring);
213 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
215 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
216 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
218 int intel_init_render_ring_buffer(struct drm_device *dev);
219 int intel_init_bsd_ring_buffer(struct drm_device *dev);
220 int intel_init_blt_ring_buffer(struct drm_device *dev);
222 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
223 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
225 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
230 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
232 BUG_ON(ring->outstanding_lazy_request == 0);
233 return ring->outstanding_lazy_request;
237 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
239 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
240 ring->trace_irq_seqno = seqno;
245 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
247 #endif /* _INTEL_RINGBUFFER_H_ */