1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <dev/drm2/drmP.h>
35 #include <dev/drm2/drm_crtc_helper.h>
36 #include <dev/drm2/drm_fb_helper.h>
37 #include <dev/drm2/i915/intel_drv.h>
38 #include <dev/drm2/i915/i915_drm.h>
39 #include <dev/drm2/i915/i915_drv.h>
41 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
43 #define BEGIN_LP_RING(n) \
44 intel_ring_begin(LP_RING(dev_priv), (n))
47 intel_ring_emit(LP_RING(dev_priv), x)
49 #define ADVANCE_LP_RING() \
50 intel_ring_advance(LP_RING(dev_priv))
53 * Lock test for when it's just for synchronization of ring access.
55 * In that case, we don't need to do it when GEM is initialized as nobody else
56 * has access to the ring.
58 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
59 if (LP_RING(dev->dev_private)->obj == NULL) \
60 LOCK_TEST_WITH_RETURN(dev, file); \
64 intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
66 if (I915_NEED_GFX_HWS(dev_priv->dev))
67 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
69 return intel_read_status_page(LP_RING(dev_priv), reg);
72 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
73 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
74 #define I915_BREADCRUMB_INDEX 0x21
76 void i915_update_dri1_breadcrumb(struct drm_device *dev)
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 struct drm_i915_master_private *master_priv;
81 if (dev->primary->master) {
82 master_priv = dev->primary->master->driver_priv;
83 if (master_priv->sarea_priv)
84 master_priv->sarea_priv->last_dispatch =
85 READ_BREADCRUMB(dev_priv);
89 static void i915_write_hws_pga(struct drm_device *dev)
91 drm_i915_private_t *dev_priv = dev->dev_private;
94 addr = dev_priv->status_page_dmah->busaddr;
95 if (INTEL_INFO(dev)->gen >= 4)
96 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
97 I915_WRITE(HWS_PGA, addr);
101 * Frees the hardware status page, whether it's a physical address or a virtual
102 * address set up by the X Server.
104 static void i915_free_hws(struct drm_device *dev)
106 drm_i915_private_t *dev_priv = dev->dev_private;
107 struct intel_ring_buffer *ring = LP_RING(dev_priv);
109 if (dev_priv->status_page_dmah) {
110 drm_pci_free(dev, dev_priv->status_page_dmah);
111 dev_priv->status_page_dmah = NULL;
114 if (ring->status_page.gfx_addr) {
115 ring->status_page.gfx_addr = 0;
116 pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr,
120 /* Need to rewrite hardware status page */
121 I915_WRITE(HWS_PGA, 0x1ffff000);
124 void i915_kernel_lost_context(struct drm_device * dev)
126 drm_i915_private_t *dev_priv = dev->dev_private;
127 struct drm_i915_master_private *master_priv;
128 struct intel_ring_buffer *ring = LP_RING(dev_priv);
131 * We should never lose context on the ring with modesetting
132 * as we don't expose it to userspace
134 if (drm_core_check_feature(dev, DRIVER_MODESET))
137 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
138 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
139 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
141 ring->space += ring->size;
143 if (!dev->primary->master)
146 master_priv = dev->primary->master->driver_priv;
147 if (ring->head == ring->tail && master_priv->sarea_priv)
148 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
151 static int i915_dma_cleanup(struct drm_device * dev)
153 drm_i915_private_t *dev_priv = dev->dev_private;
156 /* Make sure interrupts are disabled here because the uninstall ioctl
157 * may not have been called from userspace and after dev_private
158 * is freed, it's too late.
160 if (dev->irq_enabled)
161 drm_irq_uninstall(dev);
164 for (i = 0; i < I915_NUM_RINGS; i++)
165 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
168 /* Clear the HWS virtual address at teardown */
169 if (I915_NEED_GFX_HWS(dev))
175 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
177 drm_i915_private_t *dev_priv = dev->dev_private;
178 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
181 master_priv->sarea = drm_getsarea(dev);
182 if (master_priv->sarea) {
183 master_priv->sarea_priv = (drm_i915_sarea_t *)
184 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
186 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
189 if (init->ring_size != 0) {
190 if (LP_RING(dev_priv)->obj != NULL) {
191 i915_dma_cleanup(dev);
192 DRM_ERROR("Client tried to initialize ringbuffer in "
197 ret = intel_render_ring_init_dri(dev,
201 i915_dma_cleanup(dev);
206 dev_priv->dri1.cpp = init->cpp;
207 dev_priv->dri1.back_offset = init->back_offset;
208 dev_priv->dri1.front_offset = init->front_offset;
209 dev_priv->dri1.current_page = 0;
210 if (master_priv->sarea_priv)
211 master_priv->sarea_priv->pf_current_page = 0;
213 /* Allow hardware batchbuffers unless told otherwise.
215 dev_priv->dri1.allow_batchbuffer = 1;
220 static int i915_dma_resume(struct drm_device * dev)
222 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
223 struct intel_ring_buffer *ring = LP_RING(dev_priv);
225 DRM_DEBUG_DRIVER("%s\n", __func__);
227 if (ring->virtual_start == NULL) {
228 DRM_ERROR("can not ioremap virtual address for"
233 /* Program Hardware Status Page */
234 if (!ring->status_page.page_addr) {
235 DRM_ERROR("Can not find hardware status page\n");
238 DRM_DEBUG_DRIVER("hw status page @ %p\n",
239 ring->status_page.page_addr);
240 if (ring->status_page.gfx_addr != 0)
241 intel_ring_setup_status_page(ring);
243 i915_write_hws_pga(dev);
245 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
250 static int i915_dma_init(struct drm_device *dev, void *data,
251 struct drm_file *file_priv)
253 drm_i915_init_t *init = data;
256 if (drm_core_check_feature(dev, DRIVER_MODESET))
259 switch (init->func) {
261 retcode = i915_initialize(dev, init);
263 case I915_CLEANUP_DMA:
264 retcode = i915_dma_cleanup(dev);
266 case I915_RESUME_DMA:
267 retcode = i915_dma_resume(dev);
277 /* Implement basically the same security restrictions as hardware does
278 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
280 * Most of the calculations below involve calculating the size of a
281 * particular instruction. It's important to get the size right as
282 * that tells us where the next instruction to check is. Any illegal
283 * instruction detected will be given a size of zero, which is a
284 * signal to abort the rest of the buffer.
286 static int validate_cmd(int cmd)
288 switch (((cmd >> 29) & 0x7)) {
290 switch ((cmd >> 23) & 0x3f) {
292 return 1; /* MI_NOOP */
294 return 1; /* MI_FLUSH */
296 return 0; /* disallow everything else */
300 return 0; /* reserved */
302 return (cmd & 0xff) + 2; /* 2d commands */
304 if (((cmd >> 24) & 0x1f) <= 0x18)
307 switch ((cmd >> 24) & 0x1f) {
311 switch ((cmd >> 16) & 0xff) {
313 return (cmd & 0x1f) + 2;
315 return (cmd & 0xf) + 2;
317 return (cmd & 0xffff) + 2;
321 return (cmd & 0xffff) + 1;
325 if ((cmd & (1 << 23)) == 0) /* inline vertices */
326 return (cmd & 0x1ffff) + 2;
327 else if (cmd & (1 << 17)) /* indirect random */
328 if ((cmd & 0xffff) == 0)
329 return 0; /* unknown length, too hard */
331 return (((cmd & 0xffff) + 1) / 2) + 1;
333 return 2; /* indirect sequential */
344 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
346 drm_i915_private_t *dev_priv = dev->dev_private;
349 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
352 for (i = 0; i < dwords;) {
353 int sz = validate_cmd(buffer[i]);
354 if (sz == 0 || i + sz > dwords)
359 ret = BEGIN_LP_RING((dwords+1)&~1);
363 for (i = 0; i < dwords; i++)
374 i915_emit_box(struct drm_device *dev,
375 struct drm_clip_rect *box,
378 struct drm_i915_private *dev_priv = dev->dev_private;
381 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
382 box->y2 <= 0 || box->x2 <= 0) {
383 DRM_ERROR("Bad box %d,%d..%d,%d\n",
384 box->x1, box->y1, box->x2, box->y2);
388 if (INTEL_INFO(dev)->gen >= 4) {
389 ret = BEGIN_LP_RING(4);
393 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
394 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
395 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
398 ret = BEGIN_LP_RING(6);
402 OUT_RING(GFX_OP_DRAWRECT_INFO);
404 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
405 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
414 /* XXX: Emitting the counter should really be moved to part of the IRQ
415 * emit. For now, do it in both places:
418 static void i915_emit_breadcrumb(struct drm_device *dev)
420 drm_i915_private_t *dev_priv = dev->dev_private;
421 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
423 dev_priv->dri1.counter++;
424 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
425 dev_priv->dri1.counter = 0;
426 if (master_priv->sarea_priv)
427 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
429 if (BEGIN_LP_RING(4) == 0) {
430 OUT_RING(MI_STORE_DWORD_INDEX);
431 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
432 OUT_RING(dev_priv->dri1.counter);
438 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
439 drm_i915_cmdbuffer_t *cmd,
440 struct drm_clip_rect *cliprects,
443 int nbox = cmd->num_cliprects;
444 int i = 0, count, ret;
447 DRM_ERROR("alignment");
451 i915_kernel_lost_context(dev);
453 count = nbox ? nbox : 1;
455 for (i = 0; i < count; i++) {
457 ret = i915_emit_box(dev, &cliprects[i],
463 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
468 i915_emit_breadcrumb(dev);
472 static int i915_dispatch_batchbuffer(struct drm_device * dev,
473 drm_i915_batchbuffer_t * batch,
474 struct drm_clip_rect *cliprects)
476 struct drm_i915_private *dev_priv = dev->dev_private;
477 int nbox = batch->num_cliprects;
480 if ((batch->start | batch->used) & 0x7) {
481 DRM_ERROR("alignment");
485 i915_kernel_lost_context(dev);
487 count = nbox ? nbox : 1;
488 for (i = 0; i < count; i++) {
490 ret = i915_emit_box(dev, &cliprects[i],
491 batch->DR1, batch->DR4);
496 if (!IS_I830(dev) && !IS_845G(dev)) {
497 ret = BEGIN_LP_RING(2);
501 if (INTEL_INFO(dev)->gen >= 4) {
502 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
503 OUT_RING(batch->start);
505 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
506 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
509 ret = BEGIN_LP_RING(4);
513 OUT_RING(MI_BATCH_BUFFER);
514 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
515 OUT_RING(batch->start + batch->used - 4);
522 if (IS_G4X(dev) || IS_GEN5(dev)) {
523 if (BEGIN_LP_RING(2) == 0) {
524 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
530 i915_emit_breadcrumb(dev);
534 static int i915_dispatch_flip(struct drm_device * dev)
536 drm_i915_private_t *dev_priv = dev->dev_private;
537 struct drm_i915_master_private *master_priv =
538 dev->primary->master->driver_priv;
541 if (!master_priv->sarea_priv)
544 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
546 dev_priv->dri1.current_page,
547 master_priv->sarea_priv->pf_current_page);
549 i915_kernel_lost_context(dev);
551 ret = BEGIN_LP_RING(10);
555 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
558 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
560 if (dev_priv->dri1.current_page == 0) {
561 OUT_RING(dev_priv->dri1.back_offset);
562 dev_priv->dri1.current_page = 1;
564 OUT_RING(dev_priv->dri1.front_offset);
565 dev_priv->dri1.current_page = 0;
569 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
574 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
576 if (BEGIN_LP_RING(4) == 0) {
577 OUT_RING(MI_STORE_DWORD_INDEX);
578 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
579 OUT_RING(dev_priv->dri1.counter);
584 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
588 static int i915_quiescent(struct drm_device *dev)
590 i915_kernel_lost_context(dev);
591 return intel_ring_idle(LP_RING(dev->dev_private));
594 static int i915_flush_ioctl(struct drm_device *dev, void *data,
595 struct drm_file *file_priv)
599 if (drm_core_check_feature(dev, DRIVER_MODESET))
602 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
605 ret = i915_quiescent(dev);
611 int i915_batchbuffer(struct drm_device *dev, void *data,
612 struct drm_file *file_priv)
614 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
615 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
616 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
617 master_priv->sarea_priv;
618 drm_i915_batchbuffer_t *batch = data;
620 struct drm_clip_rect *cliprects = NULL;
622 if (drm_core_check_feature(dev, DRIVER_MODESET))
625 if (!dev_priv->dri1.allow_batchbuffer) {
626 DRM_ERROR("Batchbuffer ioctl disabled\n");
630 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
631 batch->start, batch->used, batch->num_cliprects);
633 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
635 if (batch->num_cliprects < 0)
638 if (batch->num_cliprects) {
639 cliprects = malloc(batch->num_cliprects *
640 sizeof(struct drm_clip_rect),
641 DRM_MEM_DMA, M_WAITOK | M_ZERO);
642 if (cliprects == NULL)
645 ret = copy_from_user(cliprects, batch->cliprects,
646 batch->num_cliprects *
647 sizeof(struct drm_clip_rect));
655 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
659 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
662 free(cliprects, DRM_MEM_DMA);
667 int i915_cmdbuffer(struct drm_device *dev, void *data,
668 struct drm_file *file_priv)
670 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
671 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
672 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
673 master_priv->sarea_priv;
674 drm_i915_cmdbuffer_t *cmdbuf = data;
675 struct drm_clip_rect *cliprects = NULL;
679 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
680 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
682 if (drm_core_check_feature(dev, DRIVER_MODESET))
685 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
687 if (cmdbuf->num_cliprects < 0)
690 batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
691 if (batch_data == NULL)
694 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
697 goto fail_batch_free;
700 if (cmdbuf->num_cliprects) {
701 cliprects = malloc(cmdbuf->num_cliprects *
702 sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO);
703 if (cliprects == NULL) {
705 goto fail_batch_free;
708 ret = copy_from_user(cliprects, cmdbuf->cliprects,
709 cmdbuf->num_cliprects *
710 sizeof(struct drm_clip_rect));
718 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
721 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
726 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
729 free(cliprects, DRM_MEM_DMA);
731 free(batch_data, DRM_MEM_DMA);
736 static int i915_emit_irq(struct drm_device * dev)
738 drm_i915_private_t *dev_priv = dev->dev_private;
739 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
741 i915_kernel_lost_context(dev);
743 DRM_DEBUG_DRIVER("\n");
745 dev_priv->dri1.counter++;
746 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
747 dev_priv->dri1.counter = 1;
748 if (master_priv->sarea_priv)
749 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
751 if (BEGIN_LP_RING(4) == 0) {
752 OUT_RING(MI_STORE_DWORD_INDEX);
753 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
754 OUT_RING(dev_priv->dri1.counter);
755 OUT_RING(MI_USER_INTERRUPT);
759 return dev_priv->dri1.counter;
762 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
764 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
765 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
767 struct intel_ring_buffer *ring = LP_RING(dev_priv);
769 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
770 READ_BREADCRUMB(dev_priv));
772 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
773 if (master_priv->sarea_priv)
774 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
778 if (master_priv->sarea_priv)
779 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
781 if (ring->irq_get(ring)) {
782 mtx_lock(&dev_priv->irq_lock);
783 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
784 ret = -msleep(&ring->irq_queue, &dev_priv->irq_lock,
785 PCATCH, "915wtq", 3 * DRM_HZ);
786 if (ret == -ERESTART)
789 mtx_unlock(&dev_priv->irq_lock);
791 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
795 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
796 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
802 /* Needs the lock as it touches the ring.
804 int i915_irq_emit(struct drm_device *dev, void *data,
805 struct drm_file *file_priv)
807 drm_i915_private_t *dev_priv = dev->dev_private;
808 drm_i915_irq_emit_t *emit = data;
811 if (drm_core_check_feature(dev, DRIVER_MODESET))
814 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
815 DRM_ERROR("called with no initialization\n");
819 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
822 result = i915_emit_irq(dev);
825 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
826 DRM_ERROR("copy_to_user\n");
833 /* Doesn't need the hardware lock.
835 static int i915_irq_wait(struct drm_device *dev, void *data,
836 struct drm_file *file_priv)
838 drm_i915_private_t *dev_priv = dev->dev_private;
839 drm_i915_irq_wait_t *irqwait = data;
841 if (drm_core_check_feature(dev, DRIVER_MODESET))
845 DRM_ERROR("called with no initialization\n");
849 return i915_wait_irq(dev, irqwait->irq_seq);
852 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
853 struct drm_file *file_priv)
855 drm_i915_private_t *dev_priv = dev->dev_private;
856 drm_i915_vblank_pipe_t *pipe = data;
858 if (drm_core_check_feature(dev, DRIVER_MODESET))
862 DRM_ERROR("called with no initialization\n");
866 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
872 * Schedule buffer swap at given vertical blank.
874 static int i915_vblank_swap(struct drm_device *dev, void *data,
875 struct drm_file *file_priv)
877 /* The delayed swap mechanism was fundamentally racy, and has been
878 * removed. The model was that the client requested a delayed flip/swap
879 * from the kernel, then waited for vblank before continuing to perform
880 * rendering. The problem was that the kernel might wake the client
881 * up before it dispatched the vblank swap (since the lock has to be
882 * held while touching the ringbuffer), in which case the client would
883 * clear and start the next frame before the swap occurred, and
884 * flicker would occur in addition to likely missing the vblank.
886 * In the absence of this ioctl, userland falls back to a correct path
887 * of waiting for a vblank, then dispatching the swap on its own.
888 * Context switching to userland and back is plenty fast enough for
889 * meeting the requirements of vblank swapping.
894 static int i915_flip_bufs(struct drm_device *dev, void *data,
895 struct drm_file *file_priv)
899 if (drm_core_check_feature(dev, DRIVER_MODESET))
902 DRM_DEBUG_DRIVER("%s\n", __func__);
904 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
907 ret = i915_dispatch_flip(dev);
913 int i915_getparam(struct drm_device *dev, void *data,
914 struct drm_file *file_priv)
916 drm_i915_private_t *dev_priv = dev->dev_private;
917 drm_i915_getparam_t *param = data;
921 DRM_ERROR("called with no initialization\n");
925 switch (param->param) {
926 case I915_PARAM_IRQ_ACTIVE:
927 value = dev->irq_enabled ? 1 : 0;
929 case I915_PARAM_ALLOW_BATCHBUFFER:
930 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
932 case I915_PARAM_LAST_DISPATCH:
933 value = READ_BREADCRUMB(dev_priv);
935 case I915_PARAM_CHIPSET_ID:
936 value = dev->pci_device;
938 case I915_PARAM_HAS_GEM:
941 case I915_PARAM_NUM_FENCES_AVAIL:
942 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
944 case I915_PARAM_HAS_OVERLAY:
945 value = dev_priv->overlay ? 1 : 0;
947 case I915_PARAM_HAS_PAGEFLIPPING:
950 case I915_PARAM_HAS_EXECBUF2:
954 case I915_PARAM_HAS_BSD:
955 value = intel_ring_initialized(&dev_priv->ring[VCS]);
957 case I915_PARAM_HAS_BLT:
958 value = intel_ring_initialized(&dev_priv->ring[BCS]);
960 case I915_PARAM_HAS_RELAXED_FENCING:
963 case I915_PARAM_HAS_COHERENT_RINGS:
966 case I915_PARAM_HAS_EXEC_CONSTANTS:
967 value = INTEL_INFO(dev)->gen >= 4;
969 case I915_PARAM_HAS_RELAXED_DELTA:
972 case I915_PARAM_HAS_GEN7_SOL_RESET:
975 case I915_PARAM_HAS_LLC:
976 value = HAS_LLC(dev);
978 case I915_PARAM_HAS_ALIASING_PPGTT:
979 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
981 case I915_PARAM_HAS_WAIT_TIMEOUT:
984 case I915_PARAM_HAS_SEMAPHORES:
985 value = i915_semaphore_is_enabled(dev);
987 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
990 case I915_PARAM_HAS_SECURE_BATCHES:
991 /* FIXME Linux<->FreeBSD: Is there a better choice than
993 value = DRM_SUSER(curthread);
995 case I915_PARAM_HAS_PINNED_BATCHES:
999 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1004 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1005 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1012 static int i915_setparam(struct drm_device *dev, void *data,
1013 struct drm_file *file_priv)
1015 drm_i915_private_t *dev_priv = dev->dev_private;
1016 drm_i915_setparam_t *param = data;
1019 DRM_ERROR("called with no initialization\n");
1023 switch (param->param) {
1024 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1026 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1028 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1029 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1031 case I915_SETPARAM_NUM_USED_FENCES:
1032 if (param->value > dev_priv->num_fence_regs ||
1035 /* Userspace can use first N regs */
1036 dev_priv->fence_reg_start = param->value;
1039 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1047 static int i915_set_status_page(struct drm_device *dev, void *data,
1048 struct drm_file *file_priv)
1050 drm_i915_private_t *dev_priv = dev->dev_private;
1051 drm_i915_hws_addr_t *hws = data;
1052 struct intel_ring_buffer *ring;
1054 if (drm_core_check_feature(dev, DRIVER_MODESET))
1057 if (!I915_NEED_GFX_HWS(dev))
1061 DRM_ERROR("called with no initialization\n");
1065 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1066 WARN(1, "tried to set status page when mode setting active\n");
1070 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1072 ring = LP_RING(dev_priv);
1073 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1075 dev_priv->dri1.gfx_hws_cpu_addr =
1076 pmap_mapdev_attr(dev_priv->mm.gtt_base_addr + hws->addr, PAGE_SIZE,
1077 VM_MEMATTR_WRITE_COMBINING);
1078 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1079 i915_dma_cleanup(dev);
1080 ring->status_page.gfx_addr = 0;
1081 DRM_ERROR("can not ioremap virtual address for"
1082 " G33 hw status page\n");
1086 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1087 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1089 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1090 ring->status_page.gfx_addr);
1091 DRM_DEBUG_DRIVER("load hws at %p\n",
1092 ring->status_page.page_addr);
1096 static int i915_get_bridge_dev(struct drm_device *dev)
1098 struct drm_i915_private *dev_priv = dev->dev_private;
1100 dev_priv->bridge_dev = pci_find_dbsf(0, 0, 0, 0);
1101 if (!dev_priv->bridge_dev) {
1102 DRM_ERROR("bridge device not found\n");
1108 #define MCHBAR_I915 0x44
1109 #define MCHBAR_I965 0x48
1110 #define MCHBAR_SIZE (4*4096)
1112 #define DEVEN_REG 0x54
1113 #define DEVEN_MCHBAR_EN (1 << 28)
1115 /* Allocate space for the MCH regs if needed, return nonzero on error */
1117 intel_alloc_mchbar_resource(struct drm_device *dev)
1119 drm_i915_private_t *dev_priv = dev->dev_private;
1120 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1121 u32 temp_lo, temp_hi = 0;
1124 if (INTEL_INFO(dev)->gen >= 4)
1125 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
1126 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
1127 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1129 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1132 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1136 /* Get some space for it */
1138 vga = device_get_parent(dev->dev);
1139 dev_priv->mch_res_rid = 0x100;
1140 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
1141 dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
1142 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
1143 if (dev_priv->mch_res == NULL) {
1144 DRM_DEBUG_DRIVER("failed bus alloc\n");
1148 if (INTEL_INFO(dev)->gen >= 4)
1149 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
1150 upper_32_bits(rman_get_start(dev_priv->mch_res)));
1152 pci_write_config_dword(dev_priv->bridge_dev, reg,
1153 lower_32_bits(rman_get_start(dev_priv->mch_res)));
1157 /* Setup MCHBAR if possible, return true if we should disable it again */
1159 intel_setup_mchbar(struct drm_device *dev)
1161 drm_i915_private_t *dev_priv = dev->dev_private;
1162 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1166 dev_priv->mchbar_need_disable = false;
1168 if (IS_I915G(dev) || IS_I915GM(dev)) {
1169 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1170 enabled = !!(temp & DEVEN_MCHBAR_EN);
1172 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1176 /* If it's already enabled, don't have to do anything */
1180 if (intel_alloc_mchbar_resource(dev))
1183 dev_priv->mchbar_need_disable = true;
1185 /* Space is allocated or reserved, so enable it. */
1186 if (IS_I915G(dev) || IS_I915GM(dev)) {
1187 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1188 temp | DEVEN_MCHBAR_EN);
1190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1191 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1196 intel_teardown_mchbar(struct drm_device *dev)
1198 drm_i915_private_t *dev_priv = dev->dev_private;
1199 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1202 if (dev_priv->mchbar_need_disable) {
1203 if (IS_I915G(dev) || IS_I915GM(dev)) {
1204 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1205 temp &= ~DEVEN_MCHBAR_EN;
1206 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1208 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1210 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1214 if (dev_priv->mch_res != NULL) {
1216 vga = device_get_parent(dev->dev);
1217 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
1218 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1219 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
1220 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
1221 dev_priv->mch_res = NULL;
1226 /* true = enable decode, false = disable decoder */
1227 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1229 struct drm_device *dev = cookie;
1231 intel_modeset_vga_set_state(dev, state);
1233 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1234 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1236 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1239 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1241 struct drm_device *dev = pci_get_drvdata(pdev);
1242 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1243 if (state == VGA_SWITCHEROO_ON) {
1244 pr_info("switched on\n");
1245 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1246 /* i915 resume handler doesn't set to D0 */
1247 pci_set_power_state(dev->pdev, PCI_D0);
1249 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1251 pr_err("switched off\n");
1252 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1253 i915_suspend(dev, pmm);
1254 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1258 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1260 struct drm_device *dev = pci_get_drvdata(pdev);
1263 spin_lock(&dev->count_lock);
1264 can_switch = (dev->open_count == 0);
1265 spin_unlock(&dev->count_lock);
1269 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1270 .set_gpu_state = i915_switcheroo_set_state,
1272 .can_switch = i915_switcheroo_can_switch,
1276 static int i915_load_modeset_init(struct drm_device *dev)
1278 struct drm_i915_private *dev_priv = dev->dev_private;
1281 ret = intel_parse_bios(dev);
1283 DRM_INFO("failed to find VBIOS tables\n");
1286 /* If we have > 1 VGA cards, then we need to arbitrate access
1287 * to the common VGA resources.
1289 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1290 * then we do not take part in VGA arbitration and the
1291 * vga_client_register() fails with -ENODEV.
1293 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1294 if (ret && ret != -ENODEV)
1297 intel_register_dsm_handler();
1299 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
1301 goto cleanup_vga_client;
1304 /* Initialise stolen first so that we may reserve preallocated
1305 * objects for the BIOS to KMS transition.
1307 ret = i915_gem_init_stolen(dev);
1309 goto cleanup_vga_switcheroo;
1311 intel_modeset_init(dev);
1313 ret = i915_gem_init(dev);
1315 goto cleanup_gem_stolen;
1317 intel_modeset_gem_init(dev);
1319 TASK_INIT(&dev_priv->console_resume_work, 0, intel_console_resume,
1322 ret = drm_irq_install(dev);
1326 /* Always safe in the mode setting case. */
1327 /* FIXME: do pre/post-mode set stuff in core KMS code */
1328 dev->vblank_disable_allowed = 1;
1330 ret = intel_fbdev_init(dev);
1334 drm_kms_helper_poll_init(dev);
1336 /* We're off and running w/KMS */
1337 dev_priv->mm.suspended = 0;
1342 drm_irq_uninstall(dev);
1345 i915_gem_cleanup_ringbuffer(dev);
1347 i915_gem_cleanup_aliasing_ppgtt(dev);
1349 i915_gem_cleanup_stolen(dev);
1350 cleanup_vga_switcheroo:
1352 vga_switcheroo_unregister_client(dev->pdev);
1354 vga_client_register(dev->pdev, NULL, NULL, NULL);
1357 intel_free_parsed_bios_data(dev);
1361 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1363 struct drm_i915_master_private *master_priv;
1365 master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, M_WAITOK | M_ZERO);
1369 master->driver_priv = master_priv;
1373 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1375 struct drm_i915_master_private *master_priv = master->driver_priv;
1380 free(master_priv, DRM_MEM_DMA);
1382 master->driver_priv = NULL;
1386 i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1389 dev_priv->mm.gtt_mtrr = -1;
1391 #if defined(CONFIG_X86_PAT)
1396 /* Set up a WC MTRR for non-PAT systems. This is more common than
1397 * one would think, because the kernel disables PAT on first
1398 * generation Core chips because WC PAT gets overridden by a UC
1399 * MTRR if present. Even if a UC MTRR isn't present.
1401 dev_priv->mm.gtt_mtrr = drm_mtrr_add(base, size, DRM_MTRR_WC);
1402 if (dev_priv->mm.gtt_mtrr < 0) {
1403 DRM_INFO("MTRR allocation failed. Graphics "
1404 "performance may suffer.\n");
1409 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1411 struct apertures_struct *ap;
1412 struct pci_dev *pdev = dev_priv->dev->pdev;
1415 ap = alloc_apertures(1);
1419 ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
1420 ap->ranges[0].size =
1421 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1423 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1425 remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1431 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1433 const struct intel_device_info *info = dev_priv->info;
1435 #define DEV_INFO_FLAG(name) info->name ? #name "," : ""
1436 #define DEV_INFO_SEP ,
1437 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1438 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
1440 dev_priv->dev->pci_device,
1442 #undef DEV_INFO_FLAG
1447 * i915_driver_load - setup chip and create an initial config
1449 * @flags: startup flags
1451 * The driver load routine has to do several things:
1452 * - drive output discovery via intel_modeset_init()
1453 * - initialize the memory manager
1454 * - allocate initial config memory
1455 * - setup the DRM framebuffer with the allocated memory
1457 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1459 struct drm_i915_private *dev_priv;
1460 const struct intel_device_info *info;
1461 int ret = 0, mmio_bar, mmio_size;
1462 uint32_t aperture_size;
1464 info = i915_get_device_id(dev->pci_device);
1466 /* Refuse to load on gen6+ without kms enabled. */
1467 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1470 /* i915 has 4 more counters */
1472 dev->types[6] = _DRM_STAT_IRQ;
1473 dev->types[7] = _DRM_STAT_PRIMARY;
1474 dev->types[8] = _DRM_STAT_SECONDARY;
1475 dev->types[9] = _DRM_STAT_DMA;
1477 dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER,
1479 if (dev_priv == NULL)
1482 dev->dev_private = (void *)dev_priv;
1483 dev_priv->dev = dev;
1484 dev_priv->info = info;
1486 i915_dump_device_info(dev_priv);
1488 if (i915_get_bridge_dev(dev)) {
1493 ret = i915_gem_gtt_init(dev);
1498 if (drm_core_check_feature(dev, DRIVER_MODESET))
1499 i915_kick_out_firmware_fb(dev_priv);
1501 pci_set_master(dev->pdev);
1503 /* overlay on gen2 is broken and can't address above 1G */
1505 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1507 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1508 * using 32bit addressing, overwriting memory if HWS is located
1511 * The documentation also mentions an issue with undefined
1512 * behaviour if any general state is accessed within a page above 4GB,
1513 * which also needs to be handled carefully.
1515 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1516 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1519 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1520 /* Before gen4, the registers and the GTT are behind different BARs.
1521 * However, from gen4 onwards, the registers and the GTT are shared
1522 * in the same BAR, so we want to restrict this ioremap from
1523 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1524 * the register BAR remains the same size for all the earlier
1525 * generations up to Ironlake.
1528 mmio_size = 512*1024;
1530 mmio_size = 2*1024*1024;
1532 ret = drm_addmap(dev,
1533 drm_get_resource_start(dev, mmio_bar), mmio_size,
1534 _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1536 DRM_ERROR("failed to map registers\n");
1541 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1542 dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
1545 dev_priv->mm.gtt_mapping =
1546 io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
1548 if (dev_priv->mm.gtt_mapping == NULL) {
1554 i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
1557 /* The i915 workqueue is primarily used for batched retirement of
1558 * requests (and thus managing bo) once the task has been completed
1559 * by the GPU. i915_gem_retire_requests() is called directly when we
1560 * need high-priority retirement, such as waiting for an explicit
1563 * It is also used for periodic low-priority events, such as
1564 * idle-timers and recording error state.
1566 * All tasks on the workqueue are expected to acquire the dev mutex
1567 * so there is no point in running more than one instance of the
1568 * workqueue at any time. Use an ordered one.
1570 dev_priv->wq = taskqueue_create("915", M_WAITOK,
1571 taskqueue_thread_enqueue, &dev_priv->wq);
1572 if (dev_priv->wq == NULL) {
1573 DRM_ERROR("Failed to create our workqueue.\n");
1577 taskqueue_start_threads(&dev_priv->wq, 1, PWAIT, "i915 taskq");
1579 /* This must be called before any calls to HAS_PCH_* */
1580 intel_detect_pch(dev);
1582 intel_irq_init(dev);
1585 /* Try to make sure MCHBAR is enabled before poking at it */
1586 intel_setup_mchbar(dev);
1587 intel_setup_gmbus(dev);
1588 intel_opregion_setup(dev);
1590 intel_setup_bios(dev);
1594 /* On the 945G/GM, the chipset reports the MSI capability on the
1595 * integrated graphics even though the support isn't actually there
1596 * according to the published specs. It doesn't appear to function
1597 * correctly in testing on 945G.
1598 * This may be a side effect of MSI having been made available for PEG
1599 * and the registers being closely associated.
1601 * According to chipset errata, on the 965GM, MSI interrupts may
1602 * be lost or delayed, but we use them anyways to avoid
1603 * stuck interrupts on some machines.
1605 if (!IS_I945G(dev) && !IS_I945GM(dev))
1606 drm_pci_enable_msi(dev);
1608 mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF);
1609 mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF);
1610 mtx_init(&dev_priv->rps.lock, "915rps", NULL, MTX_DEF);
1611 sx_init(&dev_priv->dpio_lock, "915dpi");
1613 sx_init(&dev_priv->rps.hw_lock, "915rpshw");
1615 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1616 dev_priv->num_pipe = 3;
1617 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1618 dev_priv->num_pipe = 2;
1620 dev_priv->num_pipe = 1;
1622 ret = drm_vblank_init(dev, dev_priv->num_pipe);
1624 goto out_gem_unload;
1626 /* Start out suspended */
1627 dev_priv->mm.suspended = 1;
1629 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1630 ret = i915_load_modeset_init(dev);
1632 DRM_ERROR("failed to init modeset\n");
1633 goto out_gem_unload;
1637 pci_enable_busmaster(dev->dev);
1640 i915_setup_sysfs(dev);
1643 /* Must be done after probing outputs */
1644 intel_opregion_init(dev);
1646 acpi_video_register();
1649 callout_init(&dev_priv->hangcheck_timer, 1);
1650 callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD,
1651 i915_hangcheck_elapsed, dev);
1654 intel_gpu_ips_init(dev_priv);
1659 EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.inactive_shrinker);
1661 free_completion(&dev_priv->error_completion);
1662 mtx_destroy(&dev_priv->irq_lock);
1663 mtx_destroy(&dev_priv->error_lock);
1664 mtx_destroy(&dev_priv->rps.lock);
1665 sx_destroy(&dev_priv->dpio_lock);
1667 sx_destroy(&dev_priv->rps.hw_lock);
1669 if (dev->msi_enabled)
1670 drm_pci_disable_msi(dev);
1672 intel_teardown_gmbus(dev);
1673 intel_teardown_mchbar(dev);
1674 if (dev_priv->wq != NULL) {
1675 taskqueue_free(dev_priv->wq);
1676 dev_priv->wq = NULL;
1679 if (dev_priv->mm.gtt_mtrr >= 0) {
1680 drm_mtrr_del(dev_priv->mm.gtt_mtrr,
1681 dev_priv->mm.gtt_base_addr,
1684 dev_priv->mm.gtt_mtrr = -1;
1687 io_mapping_free(dev_priv->mm.gtt_mapping);
1690 if (dev_priv->mmio_map != NULL)
1691 drm_rmmap(dev, dev_priv->mmio_map);
1693 i915_gem_gtt_fini(dev);
1696 pci_dev_put(dev_priv->bridge_dev);
1699 free(dev_priv, DRM_MEM_DRIVER);
1703 int i915_driver_unload(struct drm_device *dev)
1705 struct drm_i915_private *dev_priv = dev->dev_private;
1708 intel_gpu_ips_teardown();
1711 i915_teardown_sysfs(dev);
1713 if (dev_priv->mm.inactive_shrinker.shrink)
1714 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1717 intel_free_parsed_bios_data(dev);
1720 ret = i915_gpu_idle(dev);
1722 DRM_ERROR("failed to idle hardware: %d\n", ret);
1723 i915_gem_retire_requests(dev);
1726 /* Cancel the retire work handler, which should be idle now. */
1727 while (taskqueue_cancel_timeout(dev_priv->wq,
1728 &dev_priv->mm.retire_work, NULL) != 0)
1729 taskqueue_drain_timeout(dev_priv->wq,
1730 &dev_priv->mm.retire_work);
1733 io_mapping_free(dev_priv->mm.gtt_mapping);
1735 if (dev_priv->mm.gtt_mtrr >= 0) {
1736 drm_mtrr_del(dev_priv->mm.gtt_mtrr,
1737 dev_priv->mm.gtt_base_addr,
1738 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE,
1740 dev_priv->mm.gtt_mtrr = -1;
1744 acpi_video_unregister();
1747 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1748 intel_fbdev_fini(dev);
1749 intel_modeset_cleanup(dev);
1750 while (taskqueue_cancel(dev_priv->wq,
1751 &dev_priv->console_resume_work, NULL) != 0)
1752 taskqueue_drain(dev_priv->wq,
1753 &dev_priv->console_resume_work);
1756 * free the memory space allocated for the child device
1757 * config parsed from VBT
1759 if (dev_priv->child_dev && dev_priv->child_dev_num) {
1760 free(dev_priv->child_dev, DRM_MEM_DRIVER);
1761 dev_priv->child_dev = NULL;
1762 dev_priv->child_dev_num = 0;
1766 vga_switcheroo_unregister_client(dev->pdev);
1767 vga_client_register(dev->pdev, NULL, NULL, NULL);
1771 /* Free error state after interrupts are fully disabled. */
1772 callout_stop(&dev_priv->hangcheck_timer);
1773 callout_drain(&dev_priv->hangcheck_timer);
1774 while (taskqueue_cancel(dev_priv->wq, &dev_priv->error_work, NULL) != 0)
1775 taskqueue_drain(dev_priv->wq, &dev_priv->error_work);
1776 i915_destroy_error_state(dev);
1778 if (dev->msi_enabled)
1779 drm_pci_disable_msi(dev);
1781 intel_opregion_fini(dev);
1783 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1784 /* Flush any outstanding unpin_work. */
1785 taskqueue_drain_all(dev_priv->wq);
1788 i915_gem_free_all_phys_object(dev);
1789 i915_gem_cleanup_ringbuffer(dev);
1790 i915_gem_context_fini(dev);
1792 i915_gem_cleanup_aliasing_ppgtt(dev);
1793 i915_gem_cleanup_stolen(dev);
1794 drm_mm_takedown(&dev_priv->mm.stolen);
1796 intel_cleanup_overlay(dev);
1798 if (!I915_NEED_GFX_HWS(dev))
1802 intel_teardown_gmbus(dev);
1803 intel_teardown_mchbar(dev);
1806 * NOTE Linux<->FreeBSD: Free mmio_map after
1807 * intel_teardown_gmbus(), because, on FreeBSD,
1808 * intel_i2c_reset() is called during iicbus_detach().
1810 if (dev_priv->mmio_map != NULL)
1811 drm_rmmap(dev, dev_priv->mmio_map);
1814 * NOTE Linux<->FreeBSD: Linux forgots to call
1815 * i915_gem_gtt_fini(), causing memory leaks.
1817 i915_gem_gtt_fini(dev);
1819 if (dev_priv->wq != NULL)
1820 taskqueue_free(dev_priv->wq);
1822 free_completion(&dev_priv->error_completion);
1823 mtx_destroy(&dev_priv->irq_lock);
1824 mtx_destroy(&dev_priv->error_lock);
1825 mtx_destroy(&dev_priv->rps.lock);
1826 sx_destroy(&dev_priv->dpio_lock);
1828 sx_destroy(&dev_priv->rps.hw_lock);
1831 pci_dev_put(dev_priv->bridge_dev);
1833 free(dev->dev_private, DRM_MEM_DRIVER);
1838 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1840 struct drm_i915_file_private *file_priv;
1842 DRM_DEBUG_DRIVER("\n");
1843 file_priv = malloc(sizeof(*file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO);
1847 file->driver_priv = file_priv;
1849 mtx_init(&file_priv->mm.lock, "915fp", NULL, MTX_DEF);
1850 INIT_LIST_HEAD(&file_priv->mm.request_list);
1852 drm_gem_names_init(&file_priv->context_idr);
1858 * i915_driver_lastclose - clean up after all DRM clients have exited
1861 * Take care of cleaning up after all DRM clients have exited. In the
1862 * mode setting case, we want to restore the kernel's initial mode (just
1863 * in case the last client left us in a bad state).
1865 * Additionally, in the non-mode setting case, we'll tear down the GTT
1866 * and DMA structures, since the kernel won't be using them, and clea
1869 void i915_driver_lastclose(struct drm_device * dev)
1871 drm_i915_private_t *dev_priv = dev->dev_private;
1873 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1874 * goes right around and calls lastclose. Check for this and don't clean
1879 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1880 intel_fb_restore_mode(dev);
1882 vga_switcheroo_process_delayed_switch();
1887 i915_gem_lastclose(dev);
1889 i915_dma_cleanup(dev);
1892 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1894 i915_gem_context_close(dev, file_priv);
1895 i915_gem_release(dev, file_priv);
1898 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1900 struct drm_i915_file_private *file_priv = file->driver_priv;
1902 mtx_destroy(&file_priv->mm.lock);
1903 free(file_priv, DRM_MEM_FILES);
1906 struct drm_ioctl_desc i915_ioctls[] = {
1907 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1908 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1909 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1910 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1911 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1912 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1913 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1914 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1915 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1916 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1917 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1918 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1919 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1920 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1921 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
1922 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1923 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1924 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1925 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1926 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
1927 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1928 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1929 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1930 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1931 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1932 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1933 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1934 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1935 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1936 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1937 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1938 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1939 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1940 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1941 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1942 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1943 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1944 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1945 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1946 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1947 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1948 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1949 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1950 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1951 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1952 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1953 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1954 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
1957 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1960 * This is really ugly: Because old userspace abused the linux agp interface to
1961 * manage the gtt, we need to claim that all intel devices are agp. For
1962 * otherwise the drm core refuses to initialize the agp support code.
1964 int i915_driver_device_is_agp(struct drm_device * dev)