5 #ifndef _INTEL_RINGBUFFER_H_
6 #define _INTEL_RINGBUFFER_H_
8 struct intel_hw_status_page {
10 unsigned int gfx_addr;
11 struct drm_i915_gem_object *obj;
14 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
15 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
17 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
18 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
20 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
21 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
23 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
24 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
26 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
27 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
29 #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
30 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
31 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
33 struct intel_ring_buffer {
40 #define I915_NUM_RINGS 3
43 struct drm_device *dev;
44 struct drm_i915_gem_object *obj;
51 struct intel_hw_status_page status_page;
53 /** We track the position of the requests in the ring buffer, and
54 * when each is retired we increment last_retired_head as the GPU
55 * must have finished processing the request and so we know we
56 * can advance the ringbuffer up to that position.
58 * last_retired_head is set to -1 after the value is consumed so
59 * we can detect new retirements.
61 u32 last_retired_head;
64 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
66 u32 sync_seqno[I915_NUM_RINGS-1];
67 bool (*irq_get)(struct intel_ring_buffer *ring);
68 void (*irq_put)(struct intel_ring_buffer *ring);
70 int (*init)(struct intel_ring_buffer *ring);
72 void (*write_tail)(struct intel_ring_buffer *ring,
74 int (*flush)(struct intel_ring_buffer *ring,
75 uint32_t invalidate_domains,
76 uint32_t flush_domains);
77 int (*add_request)(struct intel_ring_buffer *ring,
79 uint32_t (*get_seqno)(struct intel_ring_buffer *ring);
80 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
81 uint32_t offset, uint32_t length);
82 void (*cleanup)(struct intel_ring_buffer *ring);
83 int (*sync_to)(struct intel_ring_buffer *ring,
84 struct intel_ring_buffer *to,
87 u32 semaphore_register[3]; /*our mbox written by others */
88 u32 signal_mbox[2]; /* mboxes this ring signals to */
91 * List of objects currently involved in rendering from the
94 * Includes buffers having the contents of their GPU caches
95 * flushed, not necessarily primitives. last_rendering_seqno
96 * represents when the rendering involved will be completed.
98 * A reference is held on the buffer while on this list.
100 struct list_head active_list;
103 * List of breadcrumbs associated with GPU requests currently
106 struct list_head request_list;
109 * List of objects currently pending a GPU write flush.
111 * All elements on this list will belong to either the
112 * active_list or flushing_list, last_rendering_seqno can
113 * be used to differentiate between the two elements.
115 struct list_head gpu_write_list;
118 * Do we have some not yet emitted requests outstanding?
120 u32 outstanding_lazy_request;
123 * Do an explicit TLB flush before MI_SET_CONTEXT
125 bool itlb_before_ctx_switch;
126 struct i915_hw_context *default_context;
127 struct drm_i915_gem_object *last_context_obj;
135 intel_ring_initialized(struct intel_ring_buffer *ring)
137 return ring->obj != NULL;
140 static inline unsigned
141 intel_ring_flag(struct intel_ring_buffer *ring)
143 return 1 << ring->id;
146 static inline uint32_t
147 intel_ring_sync_index(struct intel_ring_buffer *ring,
148 struct intel_ring_buffer *other)
153 * cs -> 0 = vcs, 1 = bcs
154 * vcs -> 0 = bcs, 1 = cs,
155 * bcs -> 0 = cs, 1 = vcs.
158 idx = (other - ring) - 1;
160 idx += I915_NUM_RINGS;
165 static inline uint32_t
166 intel_read_status_page(struct intel_ring_buffer *ring, int reg)
169 /* Ensure that the compiler doesn't optimize away the load. */
171 return (atomic_load_acq_32(ring->status_page.page_addr + reg));
174 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
176 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
177 static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
180 return (intel_wait_ring_buffer(ring, ring->size - 8));
183 int intel_ring_begin(struct intel_ring_buffer *ring, int n);
185 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
188 *(volatile uint32_t *)((char *)ring->virtual_start +
193 void intel_ring_advance(struct intel_ring_buffer *ring);
195 uint32_t intel_ring_get_seqno(struct intel_ring_buffer *ring);
197 int intel_init_render_ring_buffer(struct drm_device *dev);
198 int intel_init_bsd_ring_buffer(struct drm_device *dev);
199 int intel_init_blt_ring_buffer(struct drm_device *dev);
201 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
202 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
204 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
209 void i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno);
212 int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
215 #endif /* _INTEL_RINGBUFFER_H_ */