1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <dev/drm2/drmP.h>
33 #include <dev/drm2/drm.h>
34 #include <dev/drm2/i915/i915_drm.h>
35 #include <dev/drm2/i915/i915_drv.h>
36 #include <dev/drm2/i915/intel_drv.h>
37 #include <sys/sched.h>
38 #include <sys/sf_buf.h>
39 #include <sys/sleepqueue.h>
41 static void i915_capture_error_state(struct drm_device *dev);
42 static u32 ring_last_seqno(struct intel_ring_buffer *ring);
44 /* For display hotplug interrupt */
46 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
48 if ((dev_priv->irq_mask & mask) != 0) {
49 dev_priv->irq_mask &= ~mask;
50 I915_WRITE(DEIMR, dev_priv->irq_mask);
56 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
58 if ((dev_priv->irq_mask & mask) != mask) {
59 dev_priv->irq_mask |= mask;
60 I915_WRITE(DEIMR, dev_priv->irq_mask);
66 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
68 if ((dev_priv->pipestat[pipe] & mask) != mask) {
69 u32 reg = PIPESTAT(pipe);
71 dev_priv->pipestat[pipe] |= mask;
72 /* Enable the interrupt, clear any pending status */
73 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
79 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
81 if ((dev_priv->pipestat[pipe] & mask) != 0) {
82 u32 reg = PIPESTAT(pipe);
84 dev_priv->pipestat[pipe] &= ~mask;
85 I915_WRITE(reg, dev_priv->pipestat[pipe]);
91 * intel_enable_asle - enable ASLE interrupt for OpRegion
93 void intel_enable_asle(struct drm_device *dev)
95 drm_i915_private_t *dev_priv = dev->dev_private;
97 /* FIXME: opregion/asle for VLV */
98 if (IS_VALLEYVIEW(dev))
101 mtx_lock(&dev_priv->irq_lock);
103 if (HAS_PCH_SPLIT(dev))
104 ironlake_enable_display_irq(dev_priv, DE_GSE);
106 i915_enable_pipestat(dev_priv, 1,
107 PIPE_LEGACY_BLC_EVENT_ENABLE);
108 if (INTEL_INFO(dev)->gen >= 4)
109 i915_enable_pipestat(dev_priv, 0,
110 PIPE_LEGACY_BLC_EVENT_ENABLE);
113 mtx_unlock(&dev_priv->irq_lock);
117 * i915_pipe_enabled - check if a pipe is enabled
119 * @pipe: pipe to check
121 * Reading certain registers when the pipe is disabled can hang the chip.
122 * Use this routine to make sure the PLL is running and the pipe is active
123 * before reading such registers if unsure.
126 i915_pipe_enabled(struct drm_device *dev, int pipe)
128 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
129 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
132 /* Called from drm generic code, passed a 'crtc', which
133 * we use as a pipe index
136 i915_get_vblank_counter(struct drm_device *dev, int pipe)
138 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
139 unsigned long high_frame;
140 unsigned long low_frame;
141 u32 high1, high2, low;
143 if (!i915_pipe_enabled(dev, pipe)) {
144 DRM_DEBUG("trying to get vblank count for disabled "
145 "pipe %c\n", pipe_name(pipe));
149 high_frame = PIPEFRAME(pipe);
150 low_frame = PIPEFRAMEPIXEL(pipe);
153 * High & low register fields aren't synchronized, so make sure
154 * we get a low value that's stable across two reads of the high
158 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
159 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
160 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
161 } while (high1 != high2);
163 high1 >>= PIPE_FRAME_HIGH_SHIFT;
164 low >>= PIPE_FRAME_LOW_SHIFT;
165 return (high1 << 8) | low;
169 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
171 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172 int reg = PIPE_FRMCOUNT_GM45(pipe);
174 if (!i915_pipe_enabled(dev, pipe)) {
175 DRM_DEBUG("i915: trying to get vblank count for disabled "
176 "pipe %c\n", pipe_name(pipe));
180 return I915_READ(reg);
184 i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
185 int *vpos, int *hpos)
187 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
188 u32 vbl = 0, position = 0;
189 int vbl_start, vbl_end, htotal, vtotal;
193 if (!i915_pipe_enabled(dev, pipe)) {
194 DRM_DEBUG("i915: trying to get scanoutpos for disabled "
195 "pipe %c\n", pipe_name(pipe));
200 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
202 if (INTEL_INFO(dev)->gen >= 4) {
203 /* No obvious pixelcount register. Only query vertical
204 * scanout position from Display scan line register.
206 position = I915_READ(PIPEDSL(pipe));
208 /* Decode into vertical scanout position. Don't have
209 * horizontal scanout position.
211 *vpos = position & 0x1fff;
214 /* Have access to pixelcount since start of frame.
215 * We can split this into vertical and horizontal
218 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
220 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
221 *vpos = position / htotal;
222 *hpos = position - (*vpos * htotal);
225 /* Query vblank area. */
226 vbl = I915_READ(VBLANK(pipe));
228 /* Test position against vblank region. */
229 vbl_start = vbl & 0x1fff;
230 vbl_end = (vbl >> 16) & 0x1fff;
232 if ((*vpos < vbl_start) || (*vpos > vbl_end))
235 /* Inside "upper part" of vblank area? Apply corrective offset: */
236 if (in_vbl && (*vpos >= vbl_start))
237 *vpos = *vpos - vtotal;
239 /* Readouts valid? */
241 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
245 ret |= DRM_SCANOUTPOS_INVBL;
251 i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
252 struct timeval *vblank_time, unsigned flags)
254 struct drm_i915_private *dev_priv = dev->dev_private;
255 struct drm_crtc *crtc;
257 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
258 DRM_ERROR("Invalid crtc %d\n", pipe);
262 /* Get drm_crtc to timestamp: */
263 crtc = intel_get_crtc_for_pipe(dev, pipe);
265 DRM_ERROR("Invalid crtc %d\n", pipe);
269 if (!crtc->enabled) {
271 DRM_DEBUG("crtc %d is disabled\n", pipe);
276 /* Helper routine in DRM core does all the work: */
277 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
283 * Handle hotplug events outside the interrupt handler proper.
286 i915_hotplug_work_func(void *context, int pending)
288 drm_i915_private_t *dev_priv = context;
289 struct drm_device *dev = dev_priv->dev;
290 struct drm_mode_config *mode_config;
291 struct intel_encoder *encoder;
293 DRM_DEBUG("running encoder hotplug functions\n");
297 mode_config = &dev->mode_config;
299 sx_xlock(&mode_config->mutex);
300 DRM_DEBUG_KMS("running encoder hotplug functions\n");
302 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
303 if (encoder->hot_plug)
304 encoder->hot_plug(encoder);
306 sx_xunlock(&mode_config->mutex);
308 /* Just fire off a uevent and let userspace tell us what to do */
310 drm_helper_hpd_irq_event(dev);
314 static void i915_handle_rps_change(struct drm_device *dev)
316 drm_i915_private_t *dev_priv = dev->dev_private;
317 u32 busy_up, busy_down, max_avg, min_avg;
318 u8 new_delay = dev_priv->cur_delay;
320 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
321 busy_up = I915_READ(RCPREVBSYTUPAVG);
322 busy_down = I915_READ(RCPREVBSYTDNAVG);
323 max_avg = I915_READ(RCBMAXAVG);
324 min_avg = I915_READ(RCBMINAVG);
326 /* Handle RCS change request from hw */
327 if (busy_up > max_avg) {
328 if (dev_priv->cur_delay != dev_priv->max_delay)
329 new_delay = dev_priv->cur_delay - 1;
330 if (new_delay < dev_priv->max_delay)
331 new_delay = dev_priv->max_delay;
332 } else if (busy_down < min_avg) {
333 if (dev_priv->cur_delay != dev_priv->min_delay)
334 new_delay = dev_priv->cur_delay + 1;
335 if (new_delay > dev_priv->min_delay)
336 new_delay = dev_priv->min_delay;
339 if (ironlake_set_drps(dev, new_delay))
340 dev_priv->cur_delay = new_delay;
345 static void notify_ring(struct drm_device *dev,
346 struct intel_ring_buffer *ring)
348 struct drm_i915_private *dev_priv = dev->dev_private;
350 if (ring->obj == NULL)
353 CTR2(KTR_DRM, "request_complete %s %d", ring->name,
354 ring->get_seqno(ring));
356 mtx_lock(&dev_priv->irq_lock);
358 mtx_unlock(&dev_priv->irq_lock);
360 if (i915_enable_hangcheck) {
361 dev_priv->hangcheck_count = 0;
362 callout_schedule(&dev_priv->hangcheck_timer,
363 DRM_I915_HANGCHECK_PERIOD);
368 gen6_pm_rps_work_func(void *arg, int pending)
370 struct drm_device *dev;
371 drm_i915_private_t *dev_priv;
375 dev_priv = (drm_i915_private_t *)arg;
377 new_delay = dev_priv->cur_delay;
379 mtx_lock(&dev_priv->rps_lock);
380 pm_iir = dev_priv->pm_iir;
381 dev_priv->pm_iir = 0;
382 pm_imr = I915_READ(GEN6_PMIMR);
383 I915_WRITE(GEN6_PMIMR, 0);
384 mtx_unlock(&dev_priv->rps_lock);
390 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
391 if (dev_priv->cur_delay != dev_priv->max_delay)
392 new_delay = dev_priv->cur_delay + 1;
393 if (new_delay > dev_priv->max_delay)
394 new_delay = dev_priv->max_delay;
395 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
396 gen6_gt_force_wake_get(dev_priv);
397 if (dev_priv->cur_delay != dev_priv->min_delay)
398 new_delay = dev_priv->cur_delay - 1;
399 if (new_delay < dev_priv->min_delay) {
400 new_delay = dev_priv->min_delay;
401 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
402 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
403 ((new_delay << 16) & 0x3f0000));
405 /* Make sure we continue to get down interrupts
406 * until we hit the minimum frequency */
407 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
408 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
410 gen6_gt_force_wake_put(dev_priv);
413 gen6_set_rps(dev, new_delay);
414 dev_priv->cur_delay = new_delay;
417 * rps_lock not held here because clearing is non-destructive. There is
418 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
419 * by holding struct_mutex for the duration of the write.
424 static void snb_gt_irq_handler(struct drm_device *dev,
425 struct drm_i915_private *dev_priv,
429 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
430 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
431 notify_ring(dev, &dev_priv->rings[RCS]);
432 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
433 notify_ring(dev, &dev_priv->rings[VCS]);
434 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
435 notify_ring(dev, &dev_priv->rings[BCS]);
437 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
438 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
439 GT_RENDER_CS_ERROR_INTERRUPT)) {
440 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
441 i915_handle_error(dev, false);
445 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
450 * IIR bits should never already be set because IMR should
451 * prevent an interrupt from being shown in IIR. The warning
452 * displays a case where we've unsafely cleared
453 * dev_priv->pm_iir. Although missing an interrupt of the same
454 * type is not a problem, it displays a problem in the logic.
456 * The mask bit in IMR is cleared by rps_work.
459 mtx_lock(&dev_priv->rps_lock);
460 if (dev_priv->pm_iir & pm_iir)
461 printf("Missed a PM interrupt\n");
462 dev_priv->pm_iir |= pm_iir;
463 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
464 POSTING_READ(GEN6_PMIMR);
465 mtx_unlock(&dev_priv->rps_lock);
467 taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
470 static void valleyview_irq_handler(void *arg)
472 struct drm_device *dev = (struct drm_device *) arg;
473 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
474 u32 iir, gt_iir, pm_iir;
476 u32 pipe_stats[I915_MAX_PIPES];
481 atomic_inc(&dev_priv->irq_received);
483 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
484 PIPE_VBLANK_INTERRUPT_STATUS;
487 iir = I915_READ(VLV_IIR);
488 gt_iir = I915_READ(GTIIR);
489 pm_iir = I915_READ(GEN6_PMIIR);
491 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
494 snb_gt_irq_handler(dev, dev_priv, gt_iir);
496 mtx_lock(&dev_priv->irq_lock);
497 for_each_pipe(pipe) {
498 int reg = PIPESTAT(pipe);
499 pipe_stats[pipe] = I915_READ(reg);
502 * Clear the PIPE*STAT regs before the IIR
504 if (pipe_stats[pipe] & 0x8000ffff) {
505 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
506 DRM_DEBUG_DRIVER("pipe %c underrun\n",
508 I915_WRITE(reg, pipe_stats[pipe]);
511 mtx_unlock(&dev_priv->irq_lock);
513 /* Consume port. Then clear IIR or we'll miss events */
514 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
515 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
517 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
519 if (hotplug_status & dev_priv->hotplug_supported_mask)
520 taskqueue_enqueue(dev_priv->tq,
521 &dev_priv->hotplug_task);
523 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
524 I915_READ(PORT_HOTPLUG_STAT);
528 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
529 drm_handle_vblank(dev, 0);
531 intel_finish_page_flip(dev, 0);
534 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
535 drm_handle_vblank(dev, 1);
537 intel_finish_page_flip(dev, 0);
540 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
543 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
544 gen6_queue_rps_work(dev_priv, pm_iir);
546 I915_WRITE(GTIIR, gt_iir);
547 I915_WRITE(GEN6_PMIIR, pm_iir);
548 I915_WRITE(VLV_IIR, iir);
554 static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
556 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
559 if (pch_iir & SDE_AUDIO_POWER_MASK)
560 DRM_DEBUG("i915: PCH audio power change on port %d\n",
561 (pch_iir & SDE_AUDIO_POWER_MASK) >>
562 SDE_AUDIO_POWER_SHIFT);
564 if (pch_iir & SDE_GMBUS)
565 DRM_DEBUG("i915: PCH GMBUS interrupt\n");
567 if (pch_iir & SDE_AUDIO_HDCP_MASK)
568 DRM_DEBUG("i915: PCH HDCP audio interrupt\n");
570 if (pch_iir & SDE_AUDIO_TRANS_MASK)
571 DRM_DEBUG("i915: PCH transcoder audio interrupt\n");
573 if (pch_iir & SDE_POISON)
574 DRM_ERROR("i915: PCH poison interrupt\n");
576 if (pch_iir & SDE_FDI_MASK)
578 DRM_DEBUG(" pipe %c FDI IIR: 0x%08x\n",
580 I915_READ(FDI_RX_IIR(pipe)));
582 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
583 DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n");
585 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
586 DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n");
588 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
589 DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n");
590 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
591 DRM_DEBUG("PCH transcoder A underrun interrupt\n");
595 ivybridge_irq_handler(void *arg)
597 struct drm_device *dev = (struct drm_device *) arg;
598 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
599 u32 de_iir, gt_iir, de_ier, pm_iir;
602 atomic_inc(&dev_priv->irq_received);
604 /* disable master interrupt before clearing iir */
605 de_ier = I915_READ(DEIER);
606 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
609 gt_iir = I915_READ(GTIIR);
611 snb_gt_irq_handler(dev, dev_priv, gt_iir);
612 I915_WRITE(GTIIR, gt_iir);
615 de_iir = I915_READ(DEIIR);
617 if (de_iir & DE_GSE_IVB)
618 intel_opregion_gse_intr(dev);
620 for (i = 0; i < 3; i++) {
621 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
622 intel_prepare_page_flip(dev, i);
623 intel_finish_page_flip_plane(dev, i);
625 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
626 drm_handle_vblank(dev, i);
629 /* check event from PCH */
630 if (de_iir & DE_PCH_EVENT_IVB) {
631 u32 pch_iir = I915_READ(SDEIIR);
633 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
634 taskqueue_enqueue(dev_priv->tq,
635 &dev_priv->hotplug_task);
636 pch_irq_handler(dev, pch_iir);
638 /* clear PCH hotplug event before clear CPU irq */
639 I915_WRITE(SDEIIR, pch_iir);
642 I915_WRITE(DEIIR, de_iir);
645 pm_iir = I915_READ(GEN6_PMIIR);
647 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
648 gen6_queue_rps_work(dev_priv, pm_iir);
649 I915_WRITE(GEN6_PMIIR, pm_iir);
652 I915_WRITE(DEIER, de_ier);
655 CTR3(KTR_DRM, "ivybridge_irq de %x gt %x pm %x", de_iir,
659 static void ilk_gt_irq_handler(struct drm_device *dev,
660 struct drm_i915_private *dev_priv,
663 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
664 notify_ring(dev, &dev_priv->rings[RCS]);
665 if (gt_iir & GT_BSD_USER_INTERRUPT)
666 notify_ring(dev, &dev_priv->rings[VCS]);
670 ironlake_irq_handler(void *arg)
672 struct drm_device *dev = arg;
673 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
674 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
677 atomic_inc(&dev_priv->irq_received);
679 /* disable master interrupt before clearing iir */
680 de_ier = I915_READ(DEIER);
681 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
684 de_iir = I915_READ(DEIIR);
685 gt_iir = I915_READ(GTIIR);
686 pch_iir = I915_READ(SDEIIR);
687 pm_iir = I915_READ(GEN6_PMIIR);
689 CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
690 gt_iir, pch_iir, pm_iir);
692 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
693 (!IS_GEN6(dev) || pm_iir == 0))
696 if (HAS_PCH_CPT(dev))
697 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
699 hotplug_mask = SDE_HOTPLUG_MASK;
702 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
704 snb_gt_irq_handler(dev, dev_priv, gt_iir);
706 if (de_iir & DE_GSE) {
707 intel_opregion_gse_intr(dev);
710 if (de_iir & DE_PLANEA_FLIP_DONE) {
711 intel_prepare_page_flip(dev, 0);
712 intel_finish_page_flip_plane(dev, 0);
715 if (de_iir & DE_PLANEB_FLIP_DONE) {
716 intel_prepare_page_flip(dev, 1);
717 intel_finish_page_flip_plane(dev, 1);
720 if (de_iir & DE_PIPEA_VBLANK)
721 drm_handle_vblank(dev, 0);
723 if (de_iir & DE_PIPEB_VBLANK)
724 drm_handle_vblank(dev, 1);
726 /* check event from PCH */
727 if (de_iir & DE_PCH_EVENT) {
728 if (pch_iir & hotplug_mask)
729 taskqueue_enqueue(dev_priv->tq,
730 &dev_priv->hotplug_task);
731 pch_irq_handler(dev, pch_iir);
734 if (de_iir & DE_PCU_EVENT) {
735 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
736 i915_handle_rps_change(dev);
739 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
740 gen6_queue_rps_work(dev_priv, pm_iir);
742 /* should clear PCH hotplug event before clear CPU irq */
743 I915_WRITE(SDEIIR, pch_iir);
744 I915_WRITE(GTIIR, gt_iir);
745 I915_WRITE(DEIIR, de_iir);
746 I915_WRITE(GEN6_PMIIR, pm_iir);
749 I915_WRITE(DEIER, de_ier);
754 * i915_error_work_func - do process context error handling work
757 * Fire an error uevent so userspace can see that a hang or error
761 i915_error_work_func(void *context, int pending)
763 drm_i915_private_t *dev_priv = context;
764 struct drm_device *dev = dev_priv->dev;
766 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
768 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
769 DRM_DEBUG("i915: resetting chip\n");
770 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
771 if (!i915_reset(dev)) {
772 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
773 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
775 mtx_lock(&dev_priv->error_completion_lock);
776 dev_priv->error_completion++;
777 wakeup(&dev_priv->error_completion);
778 mtx_unlock(&dev_priv->error_completion_lock);
782 #define pr_err(...) printf(__VA_ARGS__)
784 static void i915_report_and_clear_eir(struct drm_device *dev)
786 struct drm_i915_private *dev_priv = dev->dev_private;
787 u32 eir = I915_READ(EIR);
793 printf("i915: render error detected, EIR: 0x%08x\n", eir);
796 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
797 u32 ipeir = I915_READ(IPEIR_I965);
799 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
800 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
801 pr_err(" INSTDONE: 0x%08x\n",
802 I915_READ(INSTDONE_I965));
803 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
804 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
805 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
806 I915_WRITE(IPEIR_I965, ipeir);
807 POSTING_READ(IPEIR_I965);
809 if (eir & GM45_ERROR_PAGE_TABLE) {
810 u32 pgtbl_err = I915_READ(PGTBL_ER);
811 pr_err("page table error\n");
812 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
813 I915_WRITE(PGTBL_ER, pgtbl_err);
814 POSTING_READ(PGTBL_ER);
819 if (eir & I915_ERROR_PAGE_TABLE) {
820 u32 pgtbl_err = I915_READ(PGTBL_ER);
821 pr_err("page table error\n");
822 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
823 I915_WRITE(PGTBL_ER, pgtbl_err);
824 POSTING_READ(PGTBL_ER);
828 if (eir & I915_ERROR_MEMORY_REFRESH) {
829 pr_err("memory refresh error:\n");
831 pr_err("pipe %c stat: 0x%08x\n",
832 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
833 /* pipestat has already been acked */
835 if (eir & I915_ERROR_INSTRUCTION) {
836 pr_err("instruction error\n");
837 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
838 if (INTEL_INFO(dev)->gen < 4) {
839 u32 ipeir = I915_READ(IPEIR);
841 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
842 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
843 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
844 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
845 I915_WRITE(IPEIR, ipeir);
848 u32 ipeir = I915_READ(IPEIR_I965);
850 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
851 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
852 pr_err(" INSTDONE: 0x%08x\n",
853 I915_READ(INSTDONE_I965));
854 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
855 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
856 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
857 I915_WRITE(IPEIR_I965, ipeir);
858 POSTING_READ(IPEIR_I965);
862 I915_WRITE(EIR, eir);
864 eir = I915_READ(EIR);
867 * some errors might have become stuck,
870 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
871 I915_WRITE(EMR, I915_READ(EMR) | eir);
872 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
877 * i915_handle_error - handle an error interrupt
880 * Do some basic checking of regsiter state at error interrupt time and
881 * dump it to the syslog. Also call i915_capture_error_state() to make
882 * sure we get a record and make it available in debugfs. Fire a uevent
883 * so userspace knows something bad happened (should trigger collection
884 * of a ring dump etc.).
886 void i915_handle_error(struct drm_device *dev, bool wedged)
888 struct drm_i915_private *dev_priv = dev->dev_private;
889 struct intel_ring_buffer *ring;
892 i915_capture_error_state(dev);
893 i915_report_and_clear_eir(dev);
896 mtx_lock(&dev_priv->error_completion_lock);
897 dev_priv->error_completion = 0;
898 dev_priv->mm.wedged = 1;
899 /* unlock acts as rel barrier for store to wedged */
900 mtx_unlock(&dev_priv->error_completion_lock);
903 * Wakeup waiting processes so they don't hang
905 for_each_ring(ring, dev_priv, i) {
906 mtx_lock(&dev_priv->irq_lock);
908 mtx_unlock(&dev_priv->irq_lock);
912 taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
915 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
917 drm_i915_private_t *dev_priv = dev->dev_private;
918 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
919 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
920 struct drm_i915_gem_object *obj;
921 struct intel_unpin_work *work;
924 /* Ignore early vblank irqs */
925 if (intel_crtc == NULL)
928 mtx_lock(&dev->event_lock);
929 work = intel_crtc->unpin_work;
931 if (work == NULL || work->pending || !work->enable_stall_check) {
932 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
933 mtx_unlock(&dev->event_lock);
937 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
938 obj = work->pending_flip_obj;
939 if (INTEL_INFO(dev)->gen >= 4) {
940 int dspsurf = DSPSURF(intel_crtc->plane);
941 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
944 int dspaddr = DSPADDR(intel_crtc->plane);
945 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
946 crtc->y * crtc->fb->pitches[0] +
947 crtc->x * crtc->fb->bits_per_pixel/8);
950 mtx_unlock(&dev->event_lock);
952 if (stall_detected) {
953 DRM_DEBUG("Pageflip stall detected\n");
954 intel_prepare_page_flip(dev, intel_crtc->plane);
958 /* Called from drm generic code, passed 'crtc' which
959 * we use as a pipe index
962 i915_enable_vblank(struct drm_device *dev, int pipe)
964 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
966 if (!i915_pipe_enabled(dev, pipe))
969 mtx_lock(&dev_priv->irq_lock);
970 if (INTEL_INFO(dev)->gen >= 4)
971 i915_enable_pipestat(dev_priv, pipe,
972 PIPE_START_VBLANK_INTERRUPT_ENABLE);
974 i915_enable_pipestat(dev_priv, pipe,
975 PIPE_VBLANK_INTERRUPT_ENABLE);
977 /* maintain vblank delivery even in deep C-states */
978 if (dev_priv->info->gen == 3)
979 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
980 mtx_unlock(&dev_priv->irq_lock);
981 CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
987 ironlake_enable_vblank(struct drm_device *dev, int pipe)
989 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
991 if (!i915_pipe_enabled(dev, pipe))
994 mtx_lock(&dev_priv->irq_lock);
995 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
996 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
997 mtx_unlock(&dev_priv->irq_lock);
998 CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
1004 ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1006 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1008 if (!i915_pipe_enabled(dev, pipe))
1011 mtx_lock(&dev_priv->irq_lock);
1012 ironlake_enable_display_irq(dev_priv,
1013 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1014 mtx_unlock(&dev_priv->irq_lock);
1015 CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
1020 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1022 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1025 if (!i915_pipe_enabled(dev, pipe))
1028 mtx_lock(&dev_priv->irq_lock);
1029 dpfl = I915_READ(VLV_DPFLIPSTAT);
1030 imr = I915_READ(VLV_IMR);
1032 dpfl |= PIPEA_VBLANK_INT_EN;
1033 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1035 dpfl |= PIPEA_VBLANK_INT_EN;
1036 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1038 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1039 I915_WRITE(VLV_IMR, imr);
1040 mtx_unlock(&dev_priv->irq_lock);
1045 /* Called from drm generic code, passed 'crtc' which
1046 * we use as a pipe index
1049 i915_disable_vblank(struct drm_device *dev, int pipe)
1051 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1053 mtx_lock(&dev_priv->irq_lock);
1054 if (dev_priv->info->gen == 3)
1055 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1057 i915_disable_pipestat(dev_priv, pipe,
1058 PIPE_VBLANK_INTERRUPT_ENABLE |
1059 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1060 mtx_unlock(&dev_priv->irq_lock);
1061 CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
1065 ironlake_disable_vblank(struct drm_device *dev, int pipe)
1067 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1069 mtx_lock(&dev_priv->irq_lock);
1070 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1071 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1072 mtx_unlock(&dev_priv->irq_lock);
1073 CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
1077 ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1079 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1081 mtx_lock(&dev_priv->irq_lock);
1082 ironlake_disable_display_irq(dev_priv,
1083 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1084 mtx_unlock(&dev_priv->irq_lock);
1085 CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
1088 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1090 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1093 mtx_lock(&dev_priv->irq_lock);
1094 dpfl = I915_READ(VLV_DPFLIPSTAT);
1095 imr = I915_READ(VLV_IMR);
1097 dpfl &= ~PIPEA_VBLANK_INT_EN;
1098 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1100 dpfl &= ~PIPEB_VBLANK_INT_EN;
1101 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1103 I915_WRITE(VLV_IMR, imr);
1104 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1105 mtx_unlock(&dev_priv->irq_lock);
1109 ring_last_seqno(struct intel_ring_buffer *ring)
1112 if (list_empty(&ring->request_list))
1115 return (list_entry(ring->request_list.prev,
1116 struct drm_i915_gem_request, list)->seqno);
1119 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1121 if (list_empty(&ring->request_list) ||
1122 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1123 /* Issue a wake-up to catch stuck h/w. */
1125 if (sleepq_sleepcnt(ring, 0) != 0) {
1126 sleepq_release(ring);
1127 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1132 sleepq_release(ring);
1138 static bool kick_ring(struct intel_ring_buffer *ring)
1140 struct drm_device *dev = ring->dev;
1141 struct drm_i915_private *dev_priv = dev->dev_private;
1142 u32 tmp = I915_READ_CTL(ring);
1143 if (tmp & RING_WAIT) {
1144 DRM_ERROR("Kicking stuck wait on %s\n",
1146 I915_WRITE_CTL(ring, tmp);
1152 static bool i915_hangcheck_hung(struct drm_device *dev)
1154 drm_i915_private_t *dev_priv = dev->dev_private;
1156 if (dev_priv->hangcheck_count++ > 1) {
1159 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1160 i915_handle_error(dev, true);
1162 if (!IS_GEN2(dev)) {
1163 struct intel_ring_buffer *ring;
1166 /* Is the chip hanging on a WAIT_FOR_EVENT?
1167 * If so we can simply poke the RB_WAIT bit
1168 * and break the hang. This should work on
1169 * all but the second generation chipsets.
1171 for_each_ring(ring, dev_priv, i)
1172 hung &= !kick_ring(ring);
1182 * This is called when the chip hasn't reported back with completed
1183 * batchbuffers in a long time. The first time this is called we simply record
1184 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1185 * again, we assume the chip is wedged and try to fix it.
1188 i915_hangcheck_elapsed(void *context)
1190 struct drm_device *dev = (struct drm_device *)context;
1191 drm_i915_private_t *dev_priv = dev->dev_private;
1192 uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
1193 struct intel_ring_buffer *ring;
1194 bool err = false, idle;
1197 if (!i915_enable_hangcheck)
1200 memset(acthd, 0, sizeof(acthd));
1202 for_each_ring(ring, dev_priv, i) {
1203 idle &= i915_hangcheck_ring_idle(ring, &err);
1204 acthd[i] = intel_ring_get_active_head(ring);
1207 /* If all work is done then ACTHD clearly hasn't advanced. */
1210 if (i915_hangcheck_hung(dev))
1216 dev_priv->hangcheck_count = 0;
1220 if (INTEL_INFO(dev)->gen < 4) {
1221 instdone = I915_READ(INSTDONE);
1224 instdone = I915_READ(INSTDONE_I965);
1225 instdone1 = I915_READ(INSTDONE1);
1227 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1228 dev_priv->last_instdone == instdone &&
1229 dev_priv->last_instdone1 == instdone1) {
1230 if (i915_hangcheck_hung(dev))
1233 dev_priv->hangcheck_count = 0;
1235 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1236 dev_priv->last_instdone = instdone;
1237 dev_priv->last_instdone1 = instdone1;
1241 /* Reset timer case chip hangs without another request being added */
1242 callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
1248 ironlake_irq_preinstall(struct drm_device *dev)
1250 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1252 atomic_set(&dev_priv->irq_received, 0);
1254 I915_WRITE(HWSTAM, 0xeffe);
1256 /* XXX hotplug from PCH */
1258 I915_WRITE(DEIMR, 0xffffffff);
1259 I915_WRITE(DEIER, 0x0);
1260 POSTING_READ(DEIER);
1263 I915_WRITE(GTIMR, 0xffffffff);
1264 I915_WRITE(GTIER, 0x0);
1265 POSTING_READ(GTIER);
1267 /* south display irq */
1268 I915_WRITE(SDEIMR, 0xffffffff);
1269 I915_WRITE(SDEIER, 0x0);
1270 POSTING_READ(SDEIER);
1273 static void valleyview_irq_preinstall(struct drm_device *dev)
1275 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1278 atomic_set(&dev_priv->irq_received, 0);
1281 I915_WRITE(VLV_IMR, 0);
1282 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1283 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1284 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1287 I915_WRITE(GTIIR, I915_READ(GTIIR));
1288 I915_WRITE(GTIIR, I915_READ(GTIIR));
1289 I915_WRITE(GTIMR, 0xffffffff);
1290 I915_WRITE(GTIER, 0x0);
1291 POSTING_READ(GTIER);
1293 I915_WRITE(DPINVGTT, 0xff);
1295 I915_WRITE(PORT_HOTPLUG_EN, 0);
1296 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1298 I915_WRITE(PIPESTAT(pipe), 0xffff);
1299 I915_WRITE(VLV_IIR, 0xffffffff);
1300 I915_WRITE(VLV_IMR, 0xffffffff);
1301 I915_WRITE(VLV_IER, 0x0);
1302 POSTING_READ(VLV_IER);
1306 * Enable digital hotplug on the PCH, and configure the DP short pulse
1307 * duration to 2ms (which is the minimum in the Display Port spec)
1309 * This register is the same on all known PCH chips.
1312 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1317 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1318 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1319 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1320 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1321 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1322 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1325 static int ironlake_irq_postinstall(struct drm_device *dev)
1327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1328 /* enable kind of interrupts always enabled */
1329 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1330 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1334 dev_priv->irq_mask = ~display_mask;
1336 /* should always can generate irq */
1337 I915_WRITE(DEIIR, I915_READ(DEIIR));
1338 I915_WRITE(DEIMR, dev_priv->irq_mask);
1339 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1340 POSTING_READ(DEIER);
1342 dev_priv->gt_irq_mask = ~0;
1344 I915_WRITE(GTIIR, I915_READ(GTIIR));
1345 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1350 GEN6_BSD_USER_INTERRUPT |
1351 GEN6_BLITTER_USER_INTERRUPT;
1356 GT_BSD_USER_INTERRUPT;
1357 I915_WRITE(GTIER, render_irqs);
1358 POSTING_READ(GTIER);
1360 if (HAS_PCH_CPT(dev)) {
1361 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1362 SDE_PORTB_HOTPLUG_CPT |
1363 SDE_PORTC_HOTPLUG_CPT |
1364 SDE_PORTD_HOTPLUG_CPT);
1366 hotplug_mask = (SDE_CRT_HOTPLUG |
1373 dev_priv->pch_irq_mask = ~hotplug_mask;
1375 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1376 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1377 I915_WRITE(SDEIER, hotplug_mask);
1378 POSTING_READ(SDEIER);
1380 ironlake_enable_pch_hotplug(dev);
1382 if (IS_IRONLAKE_M(dev)) {
1383 /* Clear & enable PCU event interrupts */
1384 I915_WRITE(DEIIR, DE_PCU_EVENT);
1385 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1386 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1393 ivybridge_irq_postinstall(struct drm_device *dev)
1395 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1396 /* enable kind of interrupts always enabled */
1398 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1399 DE_PLANEC_FLIP_DONE_IVB |
1400 DE_PLANEB_FLIP_DONE_IVB |
1401 DE_PLANEA_FLIP_DONE_IVB;
1405 dev_priv->irq_mask = ~display_mask;
1407 /* should always can generate irq */
1408 I915_WRITE(DEIIR, I915_READ(DEIIR));
1409 I915_WRITE(DEIMR, dev_priv->irq_mask);
1412 DE_PIPEC_VBLANK_IVB |
1413 DE_PIPEB_VBLANK_IVB |
1414 DE_PIPEA_VBLANK_IVB);
1415 POSTING_READ(DEIER);
1417 dev_priv->gt_irq_mask = ~0;
1419 I915_WRITE(GTIIR, I915_READ(GTIIR));
1420 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1422 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1423 GEN6_BLITTER_USER_INTERRUPT;
1424 I915_WRITE(GTIER, render_irqs);
1425 POSTING_READ(GTIER);
1427 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1428 SDE_PORTB_HOTPLUG_CPT |
1429 SDE_PORTC_HOTPLUG_CPT |
1430 SDE_PORTD_HOTPLUG_CPT);
1431 dev_priv->pch_irq_mask = ~hotplug_mask;
1433 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1434 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1435 I915_WRITE(SDEIER, hotplug_mask);
1436 POSTING_READ(SDEIER);
1438 ironlake_enable_pch_hotplug(dev);
1443 static int valleyview_irq_postinstall(struct drm_device *dev)
1445 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1448 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1451 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1452 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1453 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1455 dev_priv->irq_mask = ~enable_mask;
1457 dev_priv->pipestat[0] = 0;
1458 dev_priv->pipestat[1] = 0;
1460 /* Hack for broken MSIs on VLV */
1461 pci_write_config(dev->dev, 0x94, 0xfee00000, 4);
1462 msid = pci_read_config(dev->dev, 0x98, 2);
1463 msid &= 0xff; /* mask out delivery bits */
1465 pci_write_config(dev->dev, 0x98, msid, 2);
1467 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1468 I915_WRITE(VLV_IER, enable_mask);
1469 I915_WRITE(VLV_IIR, 0xffffffff);
1470 I915_WRITE(PIPESTAT(0), 0xffff);
1471 I915_WRITE(PIPESTAT(1), 0xffff);
1472 POSTING_READ(VLV_IER);
1474 I915_WRITE(VLV_IIR, 0xffffffff);
1475 I915_WRITE(VLV_IIR, 0xffffffff);
1477 render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1478 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1479 GT_GEN6_BLT_USER_INTERRUPT |
1480 GT_GEN6_BSD_USER_INTERRUPT |
1481 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1482 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1484 GT_RENDER_CS_ERROR_INTERRUPT |
1488 dev_priv->gt_irq_mask = ~render_irqs;
1490 I915_WRITE(GTIIR, I915_READ(GTIIR));
1491 I915_WRITE(GTIIR, I915_READ(GTIIR));
1492 I915_WRITE(GTIMR, 0);
1493 I915_WRITE(GTIER, render_irqs);
1494 POSTING_READ(GTIER);
1496 /* ack & enable invalid PTE error interrupts */
1497 #if 0 /* FIXME: add support to irq handler for checking these bits */
1498 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1499 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1502 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1503 #if 0 /* FIXME: check register definitions; some have moved */
1504 /* Note HDMI and DP share bits */
1505 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1506 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1507 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1508 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1509 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1510 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1511 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1512 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1513 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1514 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1515 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1516 hotplug_en |= CRT_HOTPLUG_INT_EN;
1517 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1521 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1526 static void valleyview_irq_uninstall(struct drm_device *dev)
1528 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1535 I915_WRITE(PIPESTAT(pipe), 0xffff);
1537 I915_WRITE(HWSTAM, 0xffffffff);
1538 I915_WRITE(PORT_HOTPLUG_EN, 0);
1539 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1541 I915_WRITE(PIPESTAT(pipe), 0xffff);
1542 I915_WRITE(VLV_IIR, 0xffffffff);
1543 I915_WRITE(VLV_IMR, 0xffffffff);
1544 I915_WRITE(VLV_IER, 0x0);
1545 POSTING_READ(VLV_IER);
1549 ironlake_irq_uninstall(struct drm_device *dev)
1551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1553 if (dev_priv == NULL)
1556 I915_WRITE(HWSTAM, 0xffffffff);
1558 I915_WRITE(DEIMR, 0xffffffff);
1559 I915_WRITE(DEIER, 0x0);
1560 I915_WRITE(DEIIR, I915_READ(DEIIR));
1562 I915_WRITE(GTIMR, 0xffffffff);
1563 I915_WRITE(GTIER, 0x0);
1564 I915_WRITE(GTIIR, I915_READ(GTIIR));
1566 I915_WRITE(SDEIMR, 0xffffffff);
1567 I915_WRITE(SDEIER, 0x0);
1568 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1571 static void i8xx_irq_preinstall(struct drm_device * dev)
1573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1576 atomic_set(&dev_priv->irq_received, 0);
1579 I915_WRITE(PIPESTAT(pipe), 0);
1580 I915_WRITE16(IMR, 0xffff);
1581 I915_WRITE16(IER, 0x0);
1582 POSTING_READ16(IER);
1585 static int i8xx_irq_postinstall(struct drm_device *dev)
1587 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1589 dev_priv->pipestat[0] = 0;
1590 dev_priv->pipestat[1] = 0;
1593 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1595 /* Unmask the interrupts that we always want on. */
1596 dev_priv->irq_mask =
1597 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1598 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1599 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1600 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1601 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1602 I915_WRITE16(IMR, dev_priv->irq_mask);
1605 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1606 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1607 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1608 I915_USER_INTERRUPT);
1609 POSTING_READ16(IER);
1614 static void i8xx_irq_handler(void *arg)
1616 struct drm_device *dev = (struct drm_device *) arg;
1617 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1623 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1624 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1626 atomic_inc(&dev_priv->irq_received);
1628 iir = I915_READ16(IIR);
1632 while (iir & ~flip_mask) {
1633 /* Can't rely on pipestat interrupt bit in iir as it might
1634 * have been cleared after the pipestat interrupt was received.
1635 * It doesn't set the bit in iir again, but it still produces
1636 * interrupts (for non-MSI).
1638 mtx_lock(&dev_priv->irq_lock);
1639 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1640 i915_handle_error(dev, false);
1642 for_each_pipe(pipe) {
1643 int reg = PIPESTAT(pipe);
1644 pipe_stats[pipe] = I915_READ(reg);
1647 * Clear the PIPE*STAT regs before the IIR
1649 if (pipe_stats[pipe] & 0x8000ffff) {
1650 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1651 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1653 I915_WRITE(reg, pipe_stats[pipe]);
1657 mtx_unlock(&dev_priv->irq_lock);
1659 I915_WRITE16(IIR, iir & ~flip_mask);
1660 new_iir = I915_READ16(IIR); /* Flush posted writes */
1662 i915_update_dri1_breadcrumb(dev);
1664 if (iir & I915_USER_INTERRUPT)
1665 notify_ring(dev, &dev_priv->rings[RCS]);
1667 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
1668 drm_handle_vblank(dev, 0)) {
1669 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1670 intel_prepare_page_flip(dev, 0);
1671 intel_finish_page_flip(dev, 0);
1672 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1676 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
1677 drm_handle_vblank(dev, 1)) {
1678 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1679 intel_prepare_page_flip(dev, 1);
1680 intel_finish_page_flip(dev, 1);
1681 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1689 static void i8xx_irq_uninstall(struct drm_device * dev)
1691 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1694 for_each_pipe(pipe) {
1695 /* Clear enable bits; then clear status bits */
1696 I915_WRITE(PIPESTAT(pipe), 0);
1697 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1699 I915_WRITE16(IMR, 0xffff);
1700 I915_WRITE16(IER, 0x0);
1701 I915_WRITE16(IIR, I915_READ16(IIR));
1704 static void i915_irq_preinstall(struct drm_device * dev)
1706 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1709 atomic_set(&dev_priv->irq_received, 0);
1711 if (I915_HAS_HOTPLUG(dev)) {
1712 I915_WRITE(PORT_HOTPLUG_EN, 0);
1713 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1716 I915_WRITE16(HWSTAM, 0xeffe);
1718 I915_WRITE(PIPESTAT(pipe), 0);
1719 I915_WRITE(IMR, 0xffffffff);
1720 I915_WRITE(IER, 0x0);
1724 static int i915_irq_postinstall(struct drm_device *dev)
1726 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1729 dev_priv->pipestat[0] = 0;
1730 dev_priv->pipestat[1] = 0;
1732 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1734 /* Unmask the interrupts that we always want on. */
1735 dev_priv->irq_mask =
1736 ~(I915_ASLE_INTERRUPT |
1737 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1738 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1739 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1740 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1741 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1744 I915_ASLE_INTERRUPT |
1745 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1746 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1747 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1748 I915_USER_INTERRUPT;
1750 if (I915_HAS_HOTPLUG(dev)) {
1751 /* Enable in IER... */
1752 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1753 /* and unmask in IMR */
1754 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1757 I915_WRITE(IMR, dev_priv->irq_mask);
1758 I915_WRITE(IER, enable_mask);
1761 if (I915_HAS_HOTPLUG(dev)) {
1762 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1764 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1765 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1766 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1767 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1768 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1769 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1770 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1771 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1772 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1773 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1774 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1775 hotplug_en |= CRT_HOTPLUG_INT_EN;
1776 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1779 /* Ignore TV since it's buggy */
1781 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1784 intel_opregion_enable_asle(dev);
1789 static void i915_irq_handler(void *arg)
1791 struct drm_device *dev = (struct drm_device *) arg;
1792 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1793 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
1795 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1796 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1798 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
1799 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
1803 atomic_inc(&dev_priv->irq_received);
1805 iir = I915_READ(IIR);
1807 bool irq_received = (iir & ~flip_mask) != 0;
1808 bool blc_event = false;
1810 /* Can't rely on pipestat interrupt bit in iir as it might
1811 * have been cleared after the pipestat interrupt was received.
1812 * It doesn't set the bit in iir again, but it still produces
1813 * interrupts (for non-MSI).
1815 mtx_lock(&dev_priv->irq_lock);
1816 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1817 i915_handle_error(dev, false);
1819 for_each_pipe(pipe) {
1820 int reg = PIPESTAT(pipe);
1821 pipe_stats[pipe] = I915_READ(reg);
1823 /* Clear the PIPE*STAT regs before the IIR */
1824 if (pipe_stats[pipe] & 0x8000ffff) {
1825 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1826 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1828 I915_WRITE(reg, pipe_stats[pipe]);
1829 irq_received = true;
1832 mtx_unlock(&dev_priv->irq_lock);
1837 /* Consume port. Then clear IIR or we'll miss events */
1838 if ((I915_HAS_HOTPLUG(dev)) &&
1839 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
1840 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1842 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1844 if (hotplug_status & dev_priv->hotplug_supported_mask)
1845 taskqueue_enqueue(dev_priv->tq,
1846 &dev_priv->hotplug_task);
1848 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1849 POSTING_READ(PORT_HOTPLUG_STAT);
1852 I915_WRITE(IIR, iir & ~flip_mask);
1853 new_iir = I915_READ(IIR); /* Flush posted writes */
1855 if (iir & I915_USER_INTERRUPT)
1856 notify_ring(dev, &dev_priv->rings[RCS]);
1858 for_each_pipe(pipe) {
1862 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1863 drm_handle_vblank(dev, pipe)) {
1864 if (iir & flip[plane]) {
1865 intel_prepare_page_flip(dev, plane);
1866 intel_finish_page_flip(dev, pipe);
1867 flip_mask &= ~flip[plane];
1871 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1875 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1876 intel_opregion_asle_intr(dev);
1879 /* With MSI, interrupts are only generated when iir
1880 * transitions from zero to nonzero. If another bit got
1881 * set while we were handling the existing iir bits, then
1882 * we would never get another interrupt.
1884 * This is fine on non-MSI as well, as if we hit this path
1885 * we avoid exiting the interrupt handler only to generate
1888 * Note that for MSI this could cause a stray interrupt report
1889 * if an interrupt landed in the time between writing IIR and
1890 * the posting read. This should be rare enough to never
1891 * trigger the 99% of 100,000 interrupts test for disabling
1895 } while (iir & ~flip_mask);
1897 i915_update_dri1_breadcrumb(dev);
1900 static void i915_irq_uninstall(struct drm_device * dev)
1902 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1908 if (I915_HAS_HOTPLUG(dev)) {
1909 I915_WRITE(PORT_HOTPLUG_EN, 0);
1910 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1913 I915_WRITE16(HWSTAM, 0xffff);
1914 for_each_pipe(pipe) {
1915 /* Clear enable bits; then clear status bits */
1916 I915_WRITE(PIPESTAT(pipe), 0);
1917 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1919 I915_WRITE(IMR, 0xffffffff);
1920 I915_WRITE(IER, 0x0);
1922 I915_WRITE(IIR, I915_READ(IIR));
1925 static void i965_irq_preinstall(struct drm_device * dev)
1927 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1930 atomic_set(&dev_priv->irq_received, 0);
1932 if (I915_HAS_HOTPLUG(dev)) {
1933 I915_WRITE(PORT_HOTPLUG_EN, 0);
1934 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1937 I915_WRITE(HWSTAM, 0xeffe);
1939 I915_WRITE(PIPESTAT(pipe), 0);
1940 I915_WRITE(IMR, 0xffffffff);
1941 I915_WRITE(IER, 0x0);
1945 static int i965_irq_postinstall(struct drm_device *dev)
1947 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1951 /* Unmask the interrupts that we always want on. */
1952 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
1953 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1954 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1955 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1956 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1957 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1959 enable_mask = ~dev_priv->irq_mask;
1960 enable_mask |= I915_USER_INTERRUPT;
1963 enable_mask |= I915_BSD_USER_INTERRUPT;
1965 dev_priv->pipestat[0] = 0;
1966 dev_priv->pipestat[1] = 0;
1968 if (I915_HAS_HOTPLUG(dev)) {
1969 /* Enable in IER... */
1970 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1971 /* and unmask in IMR */
1972 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1976 * Enable some error detection, note the instruction error mask
1977 * bit is reserved, so we leave it masked.
1980 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1981 GM45_ERROR_MEM_PRIV |
1982 GM45_ERROR_CP_PRIV |
1983 I915_ERROR_MEMORY_REFRESH);
1985 error_mask = ~(I915_ERROR_PAGE_TABLE |
1986 I915_ERROR_MEMORY_REFRESH);
1988 I915_WRITE(EMR, error_mask);
1990 I915_WRITE(IMR, dev_priv->irq_mask);
1991 I915_WRITE(IER, enable_mask);
1994 if (I915_HAS_HOTPLUG(dev)) {
1995 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1997 /* Note HDMI and DP share bits */
1998 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1999 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2000 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2001 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2002 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2003 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2004 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2005 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2006 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2007 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2008 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2009 hotplug_en |= CRT_HOTPLUG_INT_EN;
2011 /* Programming the CRT detection parameters tends
2012 to generate a spurious hotplug event about three
2013 seconds later. So just do it once.
2016 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2017 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2020 /* Ignore TV since it's buggy */
2022 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2025 intel_opregion_enable_asle(dev);
2030 static void i965_irq_handler(void *arg)
2032 struct drm_device *dev = (struct drm_device *) arg;
2033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2035 u32 pipe_stats[I915_MAX_PIPES];
2039 atomic_inc(&dev_priv->irq_received);
2041 iir = I915_READ(IIR);
2044 bool blc_event = false;
2046 irq_received = iir != 0;
2048 /* Can't rely on pipestat interrupt bit in iir as it might
2049 * have been cleared after the pipestat interrupt was received.
2050 * It doesn't set the bit in iir again, but it still produces
2051 * interrupts (for non-MSI).
2053 mtx_lock(&dev_priv->irq_lock);
2054 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2055 i915_handle_error(dev, false);
2057 for_each_pipe(pipe) {
2058 int reg = PIPESTAT(pipe);
2059 pipe_stats[pipe] = I915_READ(reg);
2062 * Clear the PIPE*STAT regs before the IIR
2064 if (pipe_stats[pipe] & 0x8000ffff) {
2065 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2066 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2068 I915_WRITE(reg, pipe_stats[pipe]);
2072 mtx_unlock(&dev_priv->irq_lock);
2077 /* Consume port. Then clear IIR or we'll miss events */
2078 if ((I915_HAS_HOTPLUG(dev)) &&
2079 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2080 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2082 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2084 if (hotplug_status & dev_priv->hotplug_supported_mask)
2085 taskqueue_enqueue(dev_priv->tq,
2086 &dev_priv->hotplug_task);
2088 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2089 I915_READ(PORT_HOTPLUG_STAT);
2092 I915_WRITE(IIR, iir);
2093 new_iir = I915_READ(IIR); /* Flush posted writes */
2095 if (iir & I915_USER_INTERRUPT)
2096 notify_ring(dev, &dev_priv->rings[RCS]);
2097 if (iir & I915_BSD_USER_INTERRUPT)
2098 notify_ring(dev, &dev_priv->rings[VCS]);
2100 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2101 intel_prepare_page_flip(dev, 0);
2103 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2104 intel_prepare_page_flip(dev, 1);
2106 for_each_pipe(pipe) {
2107 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2108 drm_handle_vblank(dev, pipe)) {
2109 i915_pageflip_stall_check(dev, pipe);
2110 intel_finish_page_flip(dev, pipe);
2113 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2118 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2119 intel_opregion_asle_intr(dev);
2121 /* With MSI, interrupts are only generated when iir
2122 * transitions from zero to nonzero. If another bit got
2123 * set while we were handling the existing iir bits, then
2124 * we would never get another interrupt.
2126 * This is fine on non-MSI as well, as if we hit this path
2127 * we avoid exiting the interrupt handler only to generate
2130 * Note that for MSI this could cause a stray interrupt report
2131 * if an interrupt landed in the time between writing IIR and
2132 * the posting read. This should be rare enough to never
2133 * trigger the 99% of 100,000 interrupts test for disabling
2139 i915_update_dri1_breadcrumb(dev);
2142 static void i965_irq_uninstall(struct drm_device * dev)
2144 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2147 if (I915_HAS_HOTPLUG(dev)) {
2148 I915_WRITE(PORT_HOTPLUG_EN, 0);
2149 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2152 I915_WRITE(HWSTAM, 0xffffffff);
2154 I915_WRITE(PIPESTAT(pipe), 0);
2155 I915_WRITE(IMR, 0xffffffff);
2156 I915_WRITE(IER, 0x0);
2159 I915_WRITE(PIPESTAT(pipe),
2160 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2161 I915_WRITE(IIR, I915_READ(IIR));
2164 void intel_irq_init(struct drm_device *dev)
2166 struct drm_i915_private *dev_priv = dev->dev_private;
2168 TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
2170 TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
2172 TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
2175 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2176 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2177 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2178 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2179 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2181 if (drm_core_check_feature(dev, DRIVER_MODESET))
2182 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2184 dev->driver->get_vblank_timestamp = NULL;
2185 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2187 if (IS_VALLEYVIEW(dev)) {
2188 dev->driver->irq_handler = valleyview_irq_handler;
2189 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2190 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2191 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2192 dev->driver->enable_vblank = valleyview_enable_vblank;
2193 dev->driver->disable_vblank = valleyview_disable_vblank;
2194 } else if (IS_IVYBRIDGE(dev)) {
2195 /* Share pre & uninstall handlers with ILK/SNB */
2196 dev->driver->irq_handler = ivybridge_irq_handler;
2197 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2198 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2199 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2200 dev->driver->enable_vblank = ivybridge_enable_vblank;
2201 dev->driver->disable_vblank = ivybridge_disable_vblank;
2202 } else if (IS_HASWELL(dev)) {
2203 /* Share interrupts handling with IVB */
2204 dev->driver->irq_handler = ivybridge_irq_handler;
2205 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2206 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2207 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2208 dev->driver->enable_vblank = ivybridge_enable_vblank;
2209 dev->driver->disable_vblank = ivybridge_disable_vblank;
2210 } else if (HAS_PCH_SPLIT(dev)) {
2211 dev->driver->irq_handler = ironlake_irq_handler;
2212 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2213 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2214 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2215 dev->driver->enable_vblank = ironlake_enable_vblank;
2216 dev->driver->disable_vblank = ironlake_disable_vblank;
2218 if (INTEL_INFO(dev)->gen == 2) {
2219 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2220 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2221 dev->driver->irq_handler = i8xx_irq_handler;
2222 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2223 } else if (INTEL_INFO(dev)->gen == 3) {
2224 /* IIR "flip pending" means done if this bit is set */
2225 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
2227 dev->driver->irq_preinstall = i915_irq_preinstall;
2228 dev->driver->irq_postinstall = i915_irq_postinstall;
2229 dev->driver->irq_uninstall = i915_irq_uninstall;
2230 dev->driver->irq_handler = i915_irq_handler;
2232 dev->driver->irq_preinstall = i965_irq_preinstall;
2233 dev->driver->irq_postinstall = i965_irq_postinstall;
2234 dev->driver->irq_uninstall = i965_irq_uninstall;
2235 dev->driver->irq_handler = i965_irq_handler;
2237 dev->driver->enable_vblank = i915_enable_vblank;
2238 dev->driver->disable_vblank = i915_disable_vblank;
2242 static struct drm_i915_error_object *
2243 i915_error_object_create(struct drm_i915_private *dev_priv,
2244 struct drm_i915_gem_object *src)
2246 struct drm_i915_error_object *dst;
2249 int page, page_count;
2252 if (src == NULL || src->pages == NULL)
2255 page_count = src->base.size / PAGE_SIZE;
2257 dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
2262 reloc_offset = src->gtt_offset;
2263 for (page = 0; page < page_count; page++) {
2264 d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
2268 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
2269 src->has_global_gtt_mapping) {
2270 /* Simply ignore tiling or any overlapping fence.
2271 * It's part of the error state, and this hopefully
2272 * captures what the GPU read.
2274 s = pmap_mapdev_attr(src->base.dev->agp->base +
2275 reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
2276 memcpy(d, s, PAGE_SIZE);
2277 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
2279 drm_clflush_pages(&src->pages[page], 1);
2282 sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE |
2285 s = (void *)(uintptr_t)sf_buf_kva(sf);
2286 memcpy(d, s, PAGE_SIZE);
2289 bzero(d, PAGE_SIZE);
2290 strcpy(d, "XXXKIB");
2294 drm_clflush_pages(&src->pages[page], 1);
2297 dst->pages[page] = d;
2299 reloc_offset += PAGE_SIZE;
2301 dst->page_count = page_count;
2302 dst->gtt_offset = src->gtt_offset;
2308 free(dst->pages[page], DRM_I915_GEM);
2309 free(dst, DRM_I915_GEM);
2314 i915_error_object_free(struct drm_i915_error_object *obj)
2321 for (page = 0; page < obj->page_count; page++)
2322 free(obj->pages[page], DRM_I915_GEM);
2324 free(obj, DRM_I915_GEM);
2328 i915_error_state_free(struct drm_i915_error_state *error)
2332 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
2333 i915_error_object_free(error->ring[i].batchbuffer);
2334 i915_error_object_free(error->ring[i].ringbuffer);
2335 free(error->ring[i].requests, DRM_I915_GEM);
2338 free(error->active_bo, DRM_I915_GEM);
2339 free(error->overlay, DRM_I915_GEM);
2340 free(error, DRM_I915_GEM);
2343 static void capture_bo(struct drm_i915_error_buffer *err,
2344 struct drm_i915_gem_object *obj)
2346 err->size = obj->base.size;
2347 err->name = obj->base.name;
2348 err->seqno = obj->last_rendering_seqno;
2349 err->gtt_offset = obj->gtt_offset;
2350 err->read_domains = obj->base.read_domains;
2351 err->write_domain = obj->base.write_domain;
2352 err->fence_reg = obj->fence_reg;
2354 if (obj->pin_count > 0)
2356 if (obj->user_pin_count > 0)
2358 err->tiling = obj->tiling_mode;
2359 err->dirty = obj->dirty;
2360 err->purgeable = obj->madv != I915_MADV_WILLNEED;
2361 err->ring = obj->ring ? obj->ring->id : -1;
2362 err->cache_level = obj->cache_level;
2365 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
2366 int count, struct list_head *head)
2368 struct drm_i915_gem_object *obj;
2371 list_for_each_entry(obj, head, mm_list) {
2372 capture_bo(err++, obj);
2380 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
2381 int count, struct list_head *head)
2383 struct drm_i915_gem_object *obj;
2386 list_for_each_entry(obj, head, gtt_list) {
2387 if (obj->pin_count == 0)
2390 capture_bo(err++, obj);
2399 i915_gem_record_fences(struct drm_device *dev,
2400 struct drm_i915_error_state *error)
2402 struct drm_i915_private *dev_priv = dev->dev_private;
2406 switch (INTEL_INFO(dev)->gen) {
2409 for (i = 0; i < 16; i++)
2410 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
2414 for (i = 0; i < 16; i++)
2415 error->fence[i] = I915_READ64(FENCE_REG_965_0 +
2419 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2420 for (i = 0; i < 8; i++)
2421 error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
2424 for (i = 0; i < 8; i++)
2425 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
2431 static struct drm_i915_error_object *
2432 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
2433 struct intel_ring_buffer *ring)
2435 struct drm_i915_gem_object *obj;
2438 if (!ring->get_seqno)
2441 seqno = ring->get_seqno(ring);
2442 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
2443 if (obj->ring != ring)
2446 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
2449 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
2452 /* We need to copy these to an anonymous buffer as the simplest
2453 * method to avoid being overwritten by userspace.
2455 return (i915_error_object_create(dev_priv, obj));
2462 i915_record_ring_state(struct drm_device *dev,
2463 struct drm_i915_error_state *error,
2464 struct intel_ring_buffer *ring)
2466 struct drm_i915_private *dev_priv = dev->dev_private;
2468 if (INTEL_INFO(dev)->gen >= 6) {
2469 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
2470 error->semaphore_mboxes[ring->id][0]
2471 = I915_READ(RING_SYNC_0(ring->mmio_base));
2472 error->semaphore_mboxes[ring->id][1]
2473 = I915_READ(RING_SYNC_1(ring->mmio_base));
2476 if (INTEL_INFO(dev)->gen >= 4) {
2477 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
2478 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
2479 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
2480 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
2481 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
2482 if (ring->id == RCS) {
2483 error->instdone1 = I915_READ(INSTDONE1);
2484 error->bbaddr = I915_READ64(BB_ADDR);
2487 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
2488 error->ipeir[ring->id] = I915_READ(IPEIR);
2489 error->ipehr[ring->id] = I915_READ(IPEHR);
2490 error->instdone[ring->id] = I915_READ(INSTDONE);
2494 error->waiting[ring->id] = sleepq_sleepcnt(ring, 0) != 0;
2495 sleepq_release(ring);
2496 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
2497 error->seqno[ring->id] = ring->get_seqno(ring);
2498 error->acthd[ring->id] = intel_ring_get_active_head(ring);
2499 error->head[ring->id] = I915_READ_HEAD(ring);
2500 error->tail[ring->id] = I915_READ_TAIL(ring);
2502 error->cpu_ring_head[ring->id] = ring->head;
2503 error->cpu_ring_tail[ring->id] = ring->tail;
2507 i915_gem_record_rings(struct drm_device *dev,
2508 struct drm_i915_error_state *error)
2510 struct drm_i915_private *dev_priv = dev->dev_private;
2511 struct intel_ring_buffer *ring;
2512 struct drm_i915_gem_request *request;
2515 for_each_ring(ring, dev_priv, i) {
2516 i915_record_ring_state(dev, error, ring);
2518 error->ring[i].batchbuffer =
2519 i915_error_first_batchbuffer(dev_priv, ring);
2521 error->ring[i].ringbuffer =
2522 i915_error_object_create(dev_priv, ring->obj);
2525 list_for_each_entry(request, &ring->request_list, list)
2528 error->ring[i].num_requests = count;
2529 error->ring[i].requests = malloc(count *
2530 sizeof(struct drm_i915_error_request), DRM_I915_GEM,
2532 if (error->ring[i].requests == NULL) {
2533 error->ring[i].num_requests = 0;
2538 list_for_each_entry(request, &ring->request_list, list) {
2539 struct drm_i915_error_request *erq;
2541 erq = &error->ring[i].requests[count++];
2542 erq->seqno = request->seqno;
2543 erq->jiffies = request->emitted_jiffies;
2544 erq->tail = request->tail;
2550 i915_capture_error_state(struct drm_device *dev)
2552 struct drm_i915_private *dev_priv = dev->dev_private;
2553 struct drm_i915_gem_object *obj;
2554 struct drm_i915_error_state *error;
2557 mtx_lock(&dev_priv->error_lock);
2558 error = dev_priv->first_error;
2559 mtx_unlock(&dev_priv->error_lock);
2563 /* Account for pipe specific data like PIPE*STAT */
2564 error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
2565 if (error == NULL) {
2566 DRM_DEBUG("out of memory, not capturing error state\n");
2570 DRM_INFO("capturing error event; look for more information in "
2571 "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
2573 refcount_init(&error->ref, 1);
2574 error->eir = I915_READ(EIR);
2575 error->pgtbl_er = I915_READ(PGTBL_ER);
2577 if (HAS_PCH_SPLIT(dev))
2578 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
2579 else if (IS_VALLEYVIEW(dev))
2580 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
2581 else if (IS_GEN2(dev))
2582 error->ier = I915_READ16(IER);
2584 error->ier = I915_READ(IER);
2587 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
2589 if (INTEL_INFO(dev)->gen >= 6) {
2590 error->error = I915_READ(ERROR_GEN6);
2591 error->done_reg = I915_READ(DONE_REG);
2594 i915_gem_record_fences(dev, error);
2595 i915_gem_record_rings(dev, error);
2597 /* Record buffers on the active and pinned lists. */
2598 error->active_bo = NULL;
2599 error->pinned_bo = NULL;
2602 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
2604 error->active_bo_count = i;
2605 list_for_each_entry(obj, &dev_priv->mm.gtt_list, mm_list)
2608 error->pinned_bo_count = i - error->active_bo_count;
2610 error->active_bo = NULL;
2611 error->pinned_bo = NULL;
2613 error->active_bo = malloc(sizeof(*error->active_bo) * i,
2614 DRM_I915_GEM, M_NOWAIT);
2615 if (error->active_bo)
2616 error->pinned_bo = error->active_bo +
2617 error->active_bo_count;
2620 if (error->active_bo)
2621 error->active_bo_count =
2622 capture_active_bo(error->active_bo,
2623 error->active_bo_count,
2624 &dev_priv->mm.active_list);
2626 if (error->pinned_bo)
2627 error->pinned_bo_count =
2628 capture_pinned_bo(error->pinned_bo,
2629 error->pinned_bo_count,
2630 &dev_priv->mm.gtt_list);
2632 microtime(&error->time);
2634 error->overlay = intel_overlay_capture_error_state(dev);
2635 error->display = intel_display_capture_error_state(dev);
2637 mtx_lock(&dev_priv->error_lock);
2638 if (dev_priv->first_error == NULL) {
2639 dev_priv->first_error = error;
2642 mtx_unlock(&dev_priv->error_lock);
2645 i915_error_state_free(error);
2649 i915_destroy_error_state(struct drm_device *dev)
2651 struct drm_i915_private *dev_priv = dev->dev_private;
2652 struct drm_i915_error_state *error;
2654 mtx_lock(&dev_priv->error_lock);
2655 error = dev_priv->first_error;
2656 dev_priv->first_error = NULL;
2657 mtx_unlock(&dev_priv->error_lock);
2659 if (error != NULL && refcount_release(&error->ref))
2660 i915_error_state_free(error);