1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <dev/drm2/drmP.h>
33 #include <dev/drm2/drm.h>
34 #include <dev/drm2/i915/i915_drm.h>
35 #include <dev/drm2/i915/i915_drv.h>
36 #include <dev/drm2/i915/intel_drv.h>
37 #include <dev/drm2/i915/intel_ringbuffer.h>
39 #define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
41 #define BEGIN_LP_RING(n) \
42 intel_ring_begin(LP_RING(dev_priv), (n))
45 intel_ring_emit(LP_RING(dev_priv), x)
47 #define ADVANCE_LP_RING() \
48 intel_ring_advance(LP_RING(dev_priv))
51 * Lock test for when it's just for synchronization of ring access.
53 * In that case, we don't need to do it when GEM is initialized as nobody else
54 * has access to the ring.
56 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
57 if (LP_RING(dev->dev_private)->obj == NULL) \
58 LOCK_TEST_WITH_RETURN(dev, file); \
62 intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
64 if (I915_NEED_GFX_HWS(dev_priv->dev))
65 return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg];
67 return intel_read_status_page(LP_RING(dev_priv), reg);
70 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
71 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
72 #define I915_BREADCRUMB_INDEX 0x21
74 void i915_update_dri1_breadcrumb(struct drm_device *dev)
76 drm_i915_private_t *dev_priv = dev->dev_private;
77 struct drm_i915_master_private *master_priv;
79 if (dev->primary->master) {
80 master_priv = dev->primary->master->driver_priv;
81 if (master_priv->sarea_priv)
82 master_priv->sarea_priv->last_dispatch =
83 READ_BREADCRUMB(dev_priv);
87 static void i915_write_hws_pga(struct drm_device *dev)
89 drm_i915_private_t *dev_priv = dev->dev_private;
92 addr = dev_priv->status_page_dmah->busaddr;
93 if (INTEL_INFO(dev)->gen >= 4)
94 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
95 I915_WRITE(HWS_PGA, addr);
99 * Sets up the hardware status page for devices that need a physical address
102 static int i915_init_phys_hws(struct drm_device *dev)
104 drm_i915_private_t *dev_priv = dev->dev_private;
105 struct intel_ring_buffer *ring = LP_RING(dev_priv);
108 * Program Hardware Status Page
109 * XXXKIB Keep 4GB limit for allocation for now. This method
110 * of allocation is used on <= 965 hardware, that has several
111 * erratas regarding the use of physical memory > 4 GB.
113 dev_priv->status_page_dmah =
114 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
115 if (!dev_priv->status_page_dmah) {
116 DRM_ERROR("Can not allocate hardware status page\n");
119 ring->status_page.page_addr = dev_priv->hw_status_page =
120 dev_priv->status_page_dmah->vaddr;
121 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
123 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
125 i915_write_hws_pga(dev);
126 DRM_DEBUG("Enabled hardware status page, phys %jx\n",
127 (uintmax_t)dev_priv->dma_status_page);
132 * Frees the hardware status page, whether it's a physical address or a virtual
133 * address set up by the X Server.
135 static void i915_free_hws(struct drm_device *dev)
137 drm_i915_private_t *dev_priv = dev->dev_private;
138 struct intel_ring_buffer *ring = LP_RING(dev_priv);
140 if (dev_priv->status_page_dmah) {
141 drm_pci_free(dev, dev_priv->status_page_dmah);
142 dev_priv->status_page_dmah = NULL;
145 if (dev_priv->status_gfx_addr) {
146 dev_priv->status_gfx_addr = 0;
147 ring->status_page.gfx_addr = 0;
148 pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
152 /* Need to rewrite hardware status page */
153 I915_WRITE(HWS_PGA, 0x1ffff000);
156 void i915_kernel_lost_context(struct drm_device * dev)
158 drm_i915_private_t *dev_priv = dev->dev_private;
159 struct drm_i915_master_private *master_priv;
160 struct intel_ring_buffer *ring = LP_RING(dev_priv);
163 * We should never lose context on the ring with modesetting
164 * as we don't expose it to userspace
166 if (drm_core_check_feature(dev, DRIVER_MODESET))
169 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
170 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
171 ring->space = ring->head - (ring->tail + 8);
173 ring->space += ring->size;
175 if (!dev->primary->master)
178 master_priv = dev->primary->master->driver_priv;
179 if (ring->head == ring->tail && master_priv->sarea_priv)
180 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
183 static int i915_dma_cleanup(struct drm_device * dev)
185 drm_i915_private_t *dev_priv = dev->dev_private;
188 /* Make sure interrupts are disabled here because the uninstall ioctl
189 * may not have been called from userspace and after dev_private
190 * is freed, it's too late.
192 if (dev->irq_enabled)
193 drm_irq_uninstall(dev);
196 for (i = 0; i < I915_NUM_RINGS; i++)
197 intel_cleanup_ring_buffer(&dev_priv->rings[i]);
200 /* Clear the HWS virtual address at teardown */
201 if (I915_NEED_GFX_HWS(dev))
207 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
209 drm_i915_private_t *dev_priv = dev->dev_private;
210 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
213 master_priv->sarea = drm_getsarea(dev);
214 if (master_priv->sarea) {
215 master_priv->sarea_priv = (drm_i915_sarea_t *)
216 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
218 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
221 if (init->ring_size != 0) {
222 if (LP_RING(dev_priv)->obj != NULL) {
223 i915_dma_cleanup(dev);
224 DRM_ERROR("Client tried to initialize ringbuffer in "
229 ret = intel_render_ring_init_dri(dev,
233 i915_dma_cleanup(dev);
238 dev_priv->cpp = init->cpp;
239 dev_priv->back_offset = init->back_offset;
240 dev_priv->front_offset = init->front_offset;
241 dev_priv->current_page = 0;
242 if (master_priv->sarea_priv)
243 master_priv->sarea_priv->pf_current_page = 0;
245 /* Allow hardware batchbuffers unless told otherwise.
247 dev_priv->dri1.allow_batchbuffer = 1;
252 static int i915_dma_resume(struct drm_device * dev)
254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255 struct intel_ring_buffer *ring = LP_RING(dev_priv);
259 if (ring->virtual_start == NULL) {
260 DRM_ERROR("can not ioremap virtual address for"
265 /* Program Hardware Status Page */
266 if (!ring->status_page.page_addr) {
267 DRM_ERROR("Can not find hardware status page\n");
270 DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr);
271 if (ring->status_page.gfx_addr != 0)
272 intel_ring_setup_status_page(ring);
274 i915_write_hws_pga(dev);
276 DRM_DEBUG("Enabled hardware status page\n");
281 static int i915_dma_init(struct drm_device *dev, void *data,
282 struct drm_file *file_priv)
284 drm_i915_init_t *init = data;
287 if (drm_core_check_feature(dev, DRIVER_MODESET))
290 switch (init->func) {
292 retcode = i915_initialize(dev, init);
294 case I915_CLEANUP_DMA:
295 retcode = i915_dma_cleanup(dev);
297 case I915_RESUME_DMA:
298 retcode = i915_dma_resume(dev);
308 /* Implement basically the same security restrictions as hardware does
309 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
311 * Most of the calculations below involve calculating the size of a
312 * particular instruction. It's important to get the size right as
313 * that tells us where the next instruction to check is. Any illegal
314 * instruction detected will be given a size of zero, which is a
315 * signal to abort the rest of the buffer.
317 static int validate_cmd(int cmd)
319 switch (((cmd >> 29) & 0x7)) {
321 switch ((cmd >> 23) & 0x3f) {
323 return 1; /* MI_NOOP */
325 return 1; /* MI_FLUSH */
327 return 0; /* disallow everything else */
331 return 0; /* reserved */
333 return (cmd & 0xff) + 2; /* 2d commands */
335 if (((cmd >> 24) & 0x1f) <= 0x18)
338 switch ((cmd >> 24) & 0x1f) {
342 switch ((cmd >> 16) & 0xff) {
344 return (cmd & 0x1f) + 2;
346 return (cmd & 0xf) + 2;
348 return (cmd & 0xffff) + 2;
352 return (cmd & 0xffff) + 1;
356 if ((cmd & (1 << 23)) == 0) /* inline vertices */
357 return (cmd & 0x1ffff) + 2;
358 else if (cmd & (1 << 17)) /* indirect random */
359 if ((cmd & 0xffff) == 0)
360 return 0; /* unknown length, too hard */
362 return (((cmd & 0xffff) + 1) / 2) + 1;
364 return 2; /* indirect sequential */
375 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
377 drm_i915_private_t *dev_priv = dev->dev_private;
380 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
383 BEGIN_LP_RING((dwords+1)&~1);
385 for (i = 0; i < dwords;) {
388 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
391 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
397 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
414 i915_emit_box(struct drm_device *dev,
415 struct drm_clip_rect *box,
418 struct drm_i915_private *dev_priv = dev->dev_private;
421 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
422 box->y2 <= 0 || box->x2 <= 0) {
423 DRM_ERROR("Bad box %d,%d..%d,%d\n",
424 box->x1, box->y1, box->x2, box->y2);
428 if (INTEL_INFO(dev)->gen >= 4) {
429 ret = BEGIN_LP_RING(4);
433 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
434 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
435 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
438 ret = BEGIN_LP_RING(6);
442 OUT_RING(GFX_OP_DRAWRECT_INFO);
444 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
445 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
454 /* XXX: Emitting the counter should really be moved to part of the IRQ
455 * emit. For now, do it in both places:
458 static void i915_emit_breadcrumb(struct drm_device *dev)
460 drm_i915_private_t *dev_priv = dev->dev_private;
461 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
463 if (++dev_priv->counter > 0x7FFFFFFFUL)
464 dev_priv->counter = 0;
465 if (master_priv->sarea_priv)
466 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
468 if (BEGIN_LP_RING(4) == 0) {
469 OUT_RING(MI_STORE_DWORD_INDEX);
470 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
471 OUT_RING(dev_priv->counter);
477 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
478 drm_i915_cmdbuffer_t *cmd,
479 struct drm_clip_rect *cliprects,
482 int nbox = cmd->num_cliprects;
483 int i = 0, count, ret;
486 DRM_ERROR("alignment\n");
490 i915_kernel_lost_context(dev);
492 count = nbox ? nbox : 1;
494 for (i = 0; i < count; i++) {
496 ret = i915_emit_box(dev, &cliprects[i],
502 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
507 i915_emit_breadcrumb(dev);
511 static int i915_dispatch_batchbuffer(struct drm_device * dev,
512 drm_i915_batchbuffer_t * batch,
513 struct drm_clip_rect *cliprects)
515 struct drm_i915_private *dev_priv = dev->dev_private;
516 int nbox = batch->num_cliprects;
519 if (drm_core_check_feature(dev, DRIVER_MODESET))
522 if ((batch->start | batch->used) & 0x7) {
523 DRM_ERROR("alignment\n");
527 i915_kernel_lost_context(dev);
529 count = nbox ? nbox : 1;
530 for (i = 0; i < count; i++) {
532 ret = i915_emit_box(dev, &cliprects[i],
533 batch->DR1, batch->DR4);
538 if (!IS_I830(dev) && !IS_845G(dev)) {
539 ret = BEGIN_LP_RING(2);
543 if (INTEL_INFO(dev)->gen >= 4) {
544 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) |
545 MI_BATCH_NON_SECURE_I965);
546 OUT_RING(batch->start);
548 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
549 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
552 ret = BEGIN_LP_RING(4);
556 OUT_RING(MI_BATCH_BUFFER);
557 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
558 OUT_RING(batch->start + batch->used - 4);
564 i915_emit_breadcrumb(dev);
569 static int i915_dispatch_flip(struct drm_device * dev)
571 drm_i915_private_t *dev_priv = dev->dev_private;
572 struct drm_i915_master_private *master_priv =
573 dev->primary->master->driver_priv;
576 if (!master_priv->sarea_priv)
579 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
581 dev_priv->current_page,
582 master_priv->sarea_priv->pf_current_page);
584 i915_kernel_lost_context(dev);
586 ret = BEGIN_LP_RING(10);
590 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
593 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
595 if (dev_priv->current_page == 0) {
596 OUT_RING(dev_priv->back_offset);
597 dev_priv->current_page = 1;
599 OUT_RING(dev_priv->front_offset);
600 dev_priv->current_page = 0;
604 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
609 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
611 if (BEGIN_LP_RING(4) == 0) {
612 OUT_RING(MI_STORE_DWORD_INDEX);
613 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
614 OUT_RING(dev_priv->counter);
619 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
623 static int i915_quiescent(struct drm_device *dev)
625 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
627 i915_kernel_lost_context(dev);
628 return (intel_wait_ring_idle(ring));
631 static int i915_flush_ioctl(struct drm_device *dev, void *data,
632 struct drm_file *file_priv)
636 if (drm_core_check_feature(dev, DRIVER_MODESET))
639 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
642 ret = i915_quiescent(dev);
648 int i915_batchbuffer(struct drm_device *dev, void *data,
649 struct drm_file *file_priv)
651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
652 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
653 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
654 master_priv->sarea_priv;
655 drm_i915_batchbuffer_t *batch = data;
656 struct drm_clip_rect *cliprects;
660 if (!dev_priv->dri1.allow_batchbuffer) {
661 DRM_ERROR("Batchbuffer ioctl disabled\n");
665 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
666 batch->start, batch->used, batch->num_cliprects);
668 cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect);
669 if (batch->num_cliprects < 0)
671 if (batch->num_cliprects != 0) {
672 cliprects = malloc(batch->num_cliprects *
673 sizeof(struct drm_clip_rect), DRM_MEM_DMA,
676 ret = -copyin(batch->cliprects, cliprects,
677 batch->num_cliprects * sizeof(struct drm_clip_rect));
684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
685 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
689 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
692 free(cliprects, DRM_MEM_DMA);
696 int i915_cmdbuffer(struct drm_device *dev, void *data,
697 struct drm_file *file_priv)
699 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
700 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
701 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
702 master_priv->sarea_priv;
703 drm_i915_cmdbuffer_t *cmdbuf = data;
704 struct drm_clip_rect *cliprects = NULL;
708 if (drm_core_check_feature(dev, DRIVER_MODESET))
711 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
712 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
714 if (cmdbuf->num_cliprects < 0)
717 batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
719 ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
721 goto fail_batch_free;
723 if (cmdbuf->num_cliprects) {
724 cliprects = malloc(cmdbuf->num_cliprects *
725 sizeof(struct drm_clip_rect), DRM_MEM_DMA,
727 ret = -copyin(cmdbuf->cliprects, cliprects,
728 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
734 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
735 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
738 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
743 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
746 free(cliprects, DRM_MEM_DMA);
748 free(batch_data, DRM_MEM_DMA);
752 static int i915_emit_irq(struct drm_device * dev)
754 drm_i915_private_t *dev_priv = dev->dev_private;
755 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
757 i915_kernel_lost_context(dev);
759 DRM_DEBUG("i915: emit_irq\n");
762 if (dev_priv->counter > 0x7FFFFFFFUL)
763 dev_priv->counter = 1;
764 if (master_priv->sarea_priv)
765 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
767 if (BEGIN_LP_RING(4) == 0) {
768 OUT_RING(MI_STORE_DWORD_INDEX);
769 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
770 OUT_RING(dev_priv->counter);
771 OUT_RING(MI_USER_INTERRUPT);
775 return dev_priv->counter;
778 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
780 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
781 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
783 struct intel_ring_buffer *ring = LP_RING(dev_priv);
785 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
786 READ_BREADCRUMB(dev_priv));
788 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
789 if (master_priv->sarea_priv)
790 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
794 if (master_priv->sarea_priv)
795 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
798 mtx_lock(&dev_priv->irq_lock);
799 if (ring->irq_get(ring)) {
800 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
801 ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
803 if (ret == -ERESTART)
807 mtx_unlock(&dev_priv->irq_lock);
809 mtx_unlock(&dev_priv->irq_lock);
810 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
816 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
817 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
823 /* Needs the lock as it touches the ring.
825 int i915_irq_emit(struct drm_device *dev, void *data,
826 struct drm_file *file_priv)
828 drm_i915_private_t *dev_priv = dev->dev_private;
829 drm_i915_irq_emit_t *emit = data;
832 if (drm_core_check_feature(dev, DRIVER_MODESET))
835 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
836 DRM_ERROR("called with no initialization\n");
840 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
843 result = i915_emit_irq(dev);
846 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
847 DRM_ERROR("copy_to_user\n");
854 /* Doesn't need the hardware lock.
856 static int i915_irq_wait(struct drm_device *dev, void *data,
857 struct drm_file *file_priv)
859 drm_i915_private_t *dev_priv = dev->dev_private;
860 drm_i915_irq_wait_t *irqwait = data;
862 if (drm_core_check_feature(dev, DRIVER_MODESET))
866 DRM_ERROR("called with no initialization\n");
870 return i915_wait_irq(dev, irqwait->irq_seq);
873 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
874 struct drm_file *file_priv)
876 drm_i915_private_t *dev_priv = dev->dev_private;
877 drm_i915_vblank_pipe_t *pipe = data;
879 if (drm_core_check_feature(dev, DRIVER_MODESET))
883 DRM_ERROR("called with no initialization\n");
887 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
893 * Schedule buffer swap at given vertical blank.
895 static int i915_vblank_swap(struct drm_device *dev, void *data,
896 struct drm_file *file_priv)
898 /* The delayed swap mechanism was fundamentally racy, and has been
899 * removed. The model was that the client requested a delayed flip/swap
900 * from the kernel, then waited for vblank before continuing to perform
901 * rendering. The problem was that the kernel might wake the client
902 * up before it dispatched the vblank swap (since the lock has to be
903 * held while touching the ringbuffer), in which case the client would
904 * clear and start the next frame before the swap occurred, and
905 * flicker would occur in addition to likely missing the vblank.
907 * In the absence of this ioctl, userland falls back to a correct path
908 * of waiting for a vblank, then dispatching the swap on its own.
909 * Context switching to userland and back is plenty fast enough for
910 * meeting the requirements of vblank swapping.
915 static int i915_flip_bufs(struct drm_device *dev, void *data,
916 struct drm_file *file_priv)
920 if (drm_core_check_feature(dev, DRIVER_MODESET))
923 DRM_DEBUG("%s\n", __func__);
925 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
928 ret = i915_dispatch_flip(dev);
934 int i915_getparam(struct drm_device *dev, void *data,
935 struct drm_file *file_priv)
937 drm_i915_private_t *dev_priv = dev->dev_private;
938 drm_i915_getparam_t *param = data;
942 DRM_ERROR("called with no initialization\n");
946 switch (param->param) {
947 case I915_PARAM_IRQ_ACTIVE:
948 value = dev->irq_enabled ? 1 : 0;
950 case I915_PARAM_ALLOW_BATCHBUFFER:
951 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
953 case I915_PARAM_LAST_DISPATCH:
954 value = READ_BREADCRUMB(dev_priv);
956 case I915_PARAM_CHIPSET_ID:
957 value = dev->pci_device;
959 case I915_PARAM_HAS_GEM:
962 case I915_PARAM_NUM_FENCES_AVAIL:
963 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
965 case I915_PARAM_HAS_OVERLAY:
966 value = dev_priv->overlay ? 1 : 0;
968 case I915_PARAM_HAS_PAGEFLIPPING:
971 case I915_PARAM_HAS_EXECBUF2:
974 case I915_PARAM_HAS_BSD:
975 value = intel_ring_initialized(&dev_priv->rings[VCS]);
977 case I915_PARAM_HAS_BLT:
978 value = intel_ring_initialized(&dev_priv->rings[BCS]);
980 case I915_PARAM_HAS_RELAXED_FENCING:
983 case I915_PARAM_HAS_COHERENT_RINGS:
986 case I915_PARAM_HAS_EXEC_CONSTANTS:
987 value = INTEL_INFO(dev)->gen >= 4;
989 case I915_PARAM_HAS_RELAXED_DELTA:
992 case I915_PARAM_HAS_GEN7_SOL_RESET:
995 case I915_PARAM_HAS_LLC:
996 value = HAS_LLC(dev);
998 case I915_PARAM_HAS_ALIASING_PPGTT:
999 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1002 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1007 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1008 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1015 static int i915_setparam(struct drm_device *dev, void *data,
1016 struct drm_file *file_priv)
1018 drm_i915_private_t *dev_priv = dev->dev_private;
1019 drm_i915_setparam_t *param = data;
1022 DRM_ERROR("called with no initialization\n");
1026 switch (param->param) {
1027 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1029 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1031 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1032 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1034 case I915_SETPARAM_NUM_USED_FENCES:
1035 if (param->value > dev_priv->num_fence_regs ||
1038 /* Userspace can use first N regs */
1039 dev_priv->fence_reg_start = param->value;
1042 DRM_DEBUG("unknown parameter %d\n", param->param);
1049 static int i915_set_status_page(struct drm_device *dev, void *data,
1050 struct drm_file *file_priv)
1052 drm_i915_private_t *dev_priv = dev->dev_private;
1053 drm_i915_hws_addr_t *hws = data;
1054 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1056 if (drm_core_check_feature(dev, DRIVER_MODESET))
1059 if (!I915_NEED_GFX_HWS(dev))
1063 DRM_ERROR("called with no initialization\n");
1067 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1068 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1069 DRM_ERROR("tried to set status page when mode setting active\n");
1073 ring->status_page.gfx_addr = dev_priv->status_gfx_addr =
1074 hws->addr & (0x1ffff<<12);
1076 dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr(
1077 dev->agp->base + hws->addr, PAGE_SIZE,
1078 VM_MEMATTR_WRITE_COMBINING);
1079 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1080 i915_dma_cleanup(dev);
1081 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0;
1082 DRM_ERROR("can not ioremap virtual address for"
1083 " G33 hw status page\n");
1087 memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1088 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1089 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
1090 dev_priv->status_gfx_addr);
1091 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1096 i915_load_modeset_init(struct drm_device *dev)
1098 struct drm_i915_private *dev_priv = dev->dev_private;
1101 ret = intel_parse_bios(dev);
1103 DRM_INFO("failed to find VBIOS tables\n");
1106 intel_register_dsm_handler();
1109 /* Initialise stolen first so that we may reserve preallocated
1110 * objects for the BIOS to KMS transition.
1112 ret = i915_gem_init_stolen(dev);
1114 goto cleanup_vga_switcheroo;
1116 intel_modeset_init(dev);
1118 ret = i915_gem_init(dev);
1120 goto cleanup_gem_stolen;
1122 intel_modeset_gem_init(dev);
1124 ret = drm_irq_install(dev);
1128 dev->vblank_disable_allowed = 1;
1130 ret = intel_fbdev_init(dev);
1134 drm_kms_helper_poll_init(dev);
1136 /* We're off and running w/KMS */
1137 dev_priv->mm.suspended = 0;
1143 i915_gem_cleanup_ringbuffer(dev);
1145 i915_gem_cleanup_aliasing_ppgtt(dev);
1147 i915_gem_cleanup_stolen(dev);
1148 cleanup_vga_switcheroo:
1152 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1154 struct drm_i915_master_private *master_priv;
1156 master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA,
1161 master->driver_priv = master_priv;
1165 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1167 struct drm_i915_master_private *master_priv = master->driver_priv;
1172 free(master_priv, DRM_MEM_DMA);
1174 master->driver_priv = NULL;
1178 i915_get_bridge_dev(struct drm_device *dev)
1180 struct drm_i915_private *dev_priv;
1182 dev_priv = dev->dev_private;
1184 dev_priv->bridge_dev = intel_gtt_get_bridge_device();
1185 if (dev_priv->bridge_dev == NULL) {
1186 DRM_ERROR("bridge device not found\n");
1192 #define MCHBAR_I915 0x44
1193 #define MCHBAR_I965 0x48
1194 #define MCHBAR_SIZE (4*4096)
1196 #define DEVEN_REG 0x54
1197 #define DEVEN_MCHBAR_EN (1 << 28)
1199 /* Allocate space for the MCH regs if needed, return nonzero on error */
1201 intel_alloc_mchbar_resource(struct drm_device *dev)
1203 drm_i915_private_t *dev_priv;
1206 u32 temp_lo, temp_hi;
1207 u64 mchbar_addr, temp;
1209 dev_priv = dev->dev_private;
1210 reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1212 if (INTEL_INFO(dev)->gen >= 4)
1213 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4);
1216 temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4);
1217 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1219 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1220 #ifdef XXX_CONFIG_PNP
1222 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1226 /* Get some space for it */
1227 vga = device_get_parent(dev->dev);
1228 dev_priv->mch_res_rid = 0x100;
1229 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1230 dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1231 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1232 if (dev_priv->mch_res == NULL) {
1233 DRM_ERROR("failed mchbar resource alloc\n");
1237 if (INTEL_INFO(dev)->gen >= 4) {
1238 temp = rman_get_start(dev_priv->mch_res);
1240 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4);
1242 pci_write_config(dev_priv->bridge_dev, reg,
1243 rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4);
1248 intel_setup_mchbar(struct drm_device *dev)
1250 drm_i915_private_t *dev_priv;
1255 dev_priv = dev->dev_private;
1256 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1258 dev_priv->mchbar_need_disable = false;
1260 if (IS_I915G(dev) || IS_I915GM(dev)) {
1261 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4);
1262 enabled = (temp & DEVEN_MCHBAR_EN) != 0;
1264 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1268 /* If it's already enabled, don't have to do anything */
1270 DRM_DEBUG("mchbar already enabled\n");
1274 if (intel_alloc_mchbar_resource(dev))
1277 dev_priv->mchbar_need_disable = true;
1279 /* Space is allocated or reserved, so enable it. */
1280 if (IS_I915G(dev) || IS_I915GM(dev)) {
1281 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1282 temp | DEVEN_MCHBAR_EN, 4);
1284 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4);
1285 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4);
1290 intel_teardown_mchbar(struct drm_device *dev)
1292 drm_i915_private_t *dev_priv;
1297 dev_priv = dev->dev_private;
1298 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1300 if (dev_priv->mchbar_need_disable) {
1301 if (IS_I915G(dev) || IS_I915GM(dev)) {
1302 temp = pci_read_config(dev_priv->bridge_dev,
1304 temp &= ~DEVEN_MCHBAR_EN;
1305 pci_write_config(dev_priv->bridge_dev, DEVEN_REG,
1308 temp = pci_read_config(dev_priv->bridge_dev,
1311 pci_write_config(dev_priv->bridge_dev, mchbar_reg,
1316 if (dev_priv->mch_res != NULL) {
1317 vga = device_get_parent(dev->dev);
1318 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1319 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1320 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1321 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1322 dev_priv->mch_res = NULL;
1327 * i915_driver_load - setup chip and create an initial config
1329 * @flags: startup flags
1331 * The driver load routine has to do several things:
1332 * - drive output discovery via intel_modeset_init()
1333 * - initialize the memory manager
1334 * - allocate initial config memory
1335 * - setup the DRM framebuffer with the allocated memory
1337 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1339 struct drm_i915_private *dev_priv = dev->dev_private;
1340 const struct intel_device_info *info;
1341 unsigned long base, size;
1344 info = i915_get_device_id(dev->pci_device);
1346 /* Refuse to load on gen6+ without kms enabled. */
1347 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1353 /* i915 has 4 more counters */
1355 dev->types[6] = _DRM_STAT_IRQ;
1356 dev->types[7] = _DRM_STAT_PRIMARY;
1357 dev->types[8] = _DRM_STAT_SECONDARY;
1358 dev->types[9] = _DRM_STAT_DMA;
1360 dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1363 dev->dev_private = (void *)dev_priv;
1364 dev_priv->dev = dev;
1365 dev_priv->info = info;
1367 if (i915_get_bridge_dev(dev)) {
1368 free(dev_priv, DRM_MEM_DRIVER);
1371 dev_priv->mm.gtt = intel_gtt_get();
1373 /* Add register map (needed for suspend/resume) */
1374 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1375 base = drm_get_resource_start(dev, mmio_bar);
1376 size = drm_get_resource_len(dev, mmio_bar);
1378 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1379 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1381 DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
1382 free(dev_priv, DRM_MEM_DRIVER);
1386 dev_priv->tq = taskqueue_create("915", M_WAITOK,
1387 taskqueue_thread_enqueue, &dev_priv->tq);
1388 taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq");
1389 mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF);
1390 mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1391 mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF);
1392 mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF);
1393 mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF);
1395 intel_irq_init(dev);
1397 intel_setup_mchbar(dev);
1398 intel_setup_gmbus(dev);
1399 intel_opregion_setup(dev);
1401 intel_setup_bios(dev);
1405 /* On the 945G/GM, the chipset reports the MSI capability on the
1406 * integrated graphics even though the support isn't actually there
1407 * according to the published specs. It doesn't appear to function
1408 * correctly in testing on 945G.
1409 * This may be a side effect of MSI having been made available for PEG
1410 * and the registers being closely associated.
1412 * According to chipset errata, on the 965GM, MSI interrupts may
1413 * be lost or delayed, but we use them anyways to avoid
1414 * stuck interrupts on some machines.
1416 if (!IS_I945G(dev) && !IS_I945GM(dev))
1417 drm_pci_enable_msi(dev);
1420 if (!I915_NEED_GFX_HWS(dev)) {
1421 ret = i915_init_phys_hws(dev);
1423 drm_rmmap(dev, dev_priv->mmio_map);
1424 free(dev_priv, DRM_MEM_DRIVER);
1429 mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1431 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1432 dev_priv->num_pipe = 3;
1433 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1434 dev_priv->num_pipe = 2;
1436 dev_priv->num_pipe = 1;
1438 ret = drm_vblank_init(dev, dev_priv->num_pipe);
1440 goto out_gem_unload;
1442 /* Start out suspended */
1443 dev_priv->mm.suspended = 1;
1445 intel_detect_pch(dev);
1447 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1448 ret = i915_load_modeset_init(dev);
1450 DRM_ERROR("failed to init modeset\n");
1451 goto out_gem_unload;
1455 pci_enable_busmaster(dev->dev);
1457 intel_opregion_init(dev);
1459 callout_init(&dev_priv->hangcheck_timer, 1);
1460 callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1461 i915_hangcheck_elapsed, dev);
1464 intel_gpu_ips_init(dev_priv);
1470 (void) i915_driver_unload(dev);
1474 int i915_driver_unload(struct drm_device *dev)
1476 struct drm_i915_private *dev_priv = dev->dev_private;
1480 ret = i915_gpu_idle(dev);
1482 DRM_ERROR("failed to idle hardware: %d\n", ret);
1483 i915_gem_retire_requests(dev);
1488 intel_teardown_mchbar(dev);
1490 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1491 intel_fbdev_fini(dev);
1492 intel_modeset_cleanup(dev);
1495 /* Free error state after interrupts are fully disabled. */
1496 callout_stop(&dev_priv->hangcheck_timer);
1497 callout_drain(&dev_priv->hangcheck_timer);
1499 i915_destroy_error_state(dev);
1501 if (dev->msi_enabled)
1502 drm_pci_disable_msi(dev);
1504 intel_opregion_fini(dev);
1506 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1508 i915_gem_free_all_phys_object(dev);
1509 i915_gem_cleanup_ringbuffer(dev);
1510 i915_gem_context_fini(dev);
1512 i915_gem_cleanup_aliasing_ppgtt(dev);
1516 if (I915_HAS_FBC(dev) && i915_powersave)
1517 i915_cleanup_compression(dev);
1519 drm_mm_takedown(&dev_priv->mm.stolen);
1521 intel_cleanup_overlay(dev);
1523 if (!I915_NEED_GFX_HWS(dev))
1527 i915_gem_unload(dev);
1529 mtx_destroy(&dev_priv->irq_lock);
1531 if (dev_priv->tq != NULL)
1532 taskqueue_free(dev_priv->tq);
1534 bus_generic_detach(dev->dev);
1535 drm_rmmap(dev, dev_priv->mmio_map);
1536 intel_teardown_gmbus(dev);
1538 mtx_destroy(&dev_priv->dpio_lock);
1539 mtx_destroy(&dev_priv->error_lock);
1540 mtx_destroy(&dev_priv->error_completion_lock);
1541 mtx_destroy(&dev_priv->rps_lock);
1542 free(dev->dev_private, DRM_MEM_DRIVER);
1547 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1549 struct drm_i915_file_private *i915_file_priv;
1551 i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES,
1554 mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF);
1555 INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1556 file->driver_priv = i915_file_priv;
1558 drm_gem_names_init(&i915_file_priv->context_idr);
1564 * i915_driver_lastclose - clean up after all DRM clients have exited
1567 * Take care of cleaning up after all DRM clients have exited. In the
1568 * mode setting case, we want to restore the kernel's initial mode (just
1569 * in case the last client left us in a bad state).
1571 * Additionally, in the non-mode setting case, we'll tear down the GTT
1572 * and DMA structures, since the kernel won't be using them, and clea
1575 void i915_driver_lastclose(struct drm_device * dev)
1577 drm_i915_private_t *dev_priv = dev->dev_private;
1579 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1580 * goes right around and calls lastclose. Check for this and don't clean
1584 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1588 drm_fb_helper_restore();
1589 vga_switcheroo_process_delayed_switch();
1594 i915_gem_lastclose(dev);
1596 i915_dma_cleanup(dev);
1599 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1601 i915_gem_context_close(dev, file_priv);
1602 i915_gem_release(dev, file_priv);
1605 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1607 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1609 mtx_destroy(&i915_file_priv->mm.lck);
1610 free(i915_file_priv, DRM_MEM_FILES);
1613 struct drm_ioctl_desc i915_ioctls[] = {
1614 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1615 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1616 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1617 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1618 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1619 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1620 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1621 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1622 DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH),
1623 DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH),
1624 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1625 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1626 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1627 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1628 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1629 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1630 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1631 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1632 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
1633 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
1634 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1635 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1636 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1637 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1638 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1639 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1640 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1641 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1642 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1643 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1644 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1645 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1646 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1647 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1648 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1649 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1650 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1651 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1652 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1653 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1654 DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1655 DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1656 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1657 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1660 #ifdef COMPAT_FREEBSD32
1661 extern struct drm_ioctl_desc i915_compat_ioctls[];
1662 extern int i915_compat_ioctls_nr;
1665 struct drm_driver i915_driver_info = {
1667 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
1671 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
1672 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
1674 .buf_priv_size = sizeof(drm_i915_private_t),
1675 .load = i915_driver_load,
1676 .open = i915_driver_open,
1677 .unload = i915_driver_unload,
1678 .preclose = i915_driver_preclose,
1679 .lastclose = i915_driver_lastclose,
1680 .postclose = i915_driver_postclose,
1681 .device_is_agp = i915_driver_device_is_agp,
1682 .master_create = i915_master_create,
1683 .master_destroy = i915_master_destroy,
1684 .gem_init_object = i915_gem_init_object,
1685 .gem_free_object = i915_gem_free_object,
1686 .gem_pager_ops = &i915_gem_pager_ops,
1687 .dumb_create = i915_gem_dumb_create,
1688 .dumb_map_offset = i915_gem_mmap_gtt,
1689 .dumb_destroy = i915_gem_dumb_destroy,
1690 .sysctl_init = i915_sysctl_init,
1691 .sysctl_cleanup = i915_sysctl_cleanup,
1693 .ioctls = i915_ioctls,
1694 #ifdef COMPAT_FREEBSD32
1695 .compat_ioctls = i915_compat_ioctls,
1696 .num_compat_ioctls = &i915_compat_ioctls_nr,
1698 .num_ioctls = ARRAY_SIZE(i915_ioctls),
1700 .name = DRIVER_NAME,
1701 .desc = DRIVER_DESC,
1702 .date = DRIVER_DATE,
1703 .major = DRIVER_MAJOR,
1704 .minor = DRIVER_MINOR,
1705 .patchlevel = DRIVER_PATCHLEVEL,
1709 * This is really ugly: Because old userspace abused the linux agp interface to
1710 * manage the gtt, we need to claim that all intel devices are agp. For
1711 * otherwise the drm core refuses to initialize the agp support code.
1713 int i915_driver_device_is_agp(struct drm_device * dev)