1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <dev/drm2/drmP.h>
35 #include <dev/drm2/i915/i915_drm.h>
36 #include <dev/drm2/i915/i915_drv.h>
37 #include <dev/drm2/i915/intel_drv.h>
39 #include <sys/sched.h>
40 #include <sys/sf_buf.h>
41 #include <sys/sleepqueue.h>
43 /* For display hotplug interrupt */
45 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
47 if ((dev_priv->irq_mask & mask) != 0) {
48 dev_priv->irq_mask &= ~mask;
49 I915_WRITE(DEIMR, dev_priv->irq_mask);
55 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
57 if ((dev_priv->irq_mask & mask) != mask) {
58 dev_priv->irq_mask |= mask;
59 I915_WRITE(DEIMR, dev_priv->irq_mask);
65 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
67 if ((dev_priv->pipestat[pipe] & mask) != mask) {
68 u32 reg = PIPESTAT(pipe);
70 dev_priv->pipestat[pipe] |= mask;
71 /* Enable the interrupt, clear any pending status */
72 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
78 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
80 if ((dev_priv->pipestat[pipe] & mask) != 0) {
81 u32 reg = PIPESTAT(pipe);
83 dev_priv->pipestat[pipe] &= ~mask;
84 I915_WRITE(reg, dev_priv->pipestat[pipe]);
90 * intel_enable_asle - enable ASLE interrupt for OpRegion
92 void intel_enable_asle(struct drm_device *dev)
94 drm_i915_private_t *dev_priv = dev->dev_private;
96 /* FIXME: opregion/asle for VLV */
97 if (IS_VALLEYVIEW(dev))
100 mtx_lock(&dev_priv->irq_lock);
102 if (HAS_PCH_SPLIT(dev))
103 ironlake_enable_display_irq(dev_priv, DE_GSE);
105 i915_enable_pipestat(dev_priv, 1,
106 PIPE_LEGACY_BLC_EVENT_ENABLE);
107 if (INTEL_INFO(dev)->gen >= 4)
108 i915_enable_pipestat(dev_priv, 0,
109 PIPE_LEGACY_BLC_EVENT_ENABLE);
112 mtx_unlock(&dev_priv->irq_lock);
116 * i915_pipe_enabled - check if a pipe is enabled
118 * @pipe: pipe to check
120 * Reading certain registers when the pipe is disabled can hang the chip.
121 * Use this routine to make sure the PLL is running and the pipe is active
122 * before reading such registers if unsure.
125 i915_pipe_enabled(struct drm_device *dev, int pipe)
127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
128 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
131 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
134 /* Called from drm generic code, passed a 'crtc', which
135 * we use as a pipe index
137 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
139 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
140 unsigned long high_frame;
141 unsigned long low_frame;
142 u32 high1, high2, low;
144 if (!i915_pipe_enabled(dev, pipe)) {
145 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
146 "pipe %c\n", pipe_name(pipe));
150 high_frame = PIPEFRAME(pipe);
151 low_frame = PIPEFRAMEPIXEL(pipe);
154 * High & low register fields aren't synchronized, so make sure
155 * we get a low value that's stable across two reads of the high
159 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
160 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
161 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
162 } while (high1 != high2);
164 high1 >>= PIPE_FRAME_HIGH_SHIFT;
165 low >>= PIPE_FRAME_LOW_SHIFT;
166 return (high1 << 8) | low;
169 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
171 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172 int reg = PIPE_FRMCOUNT_GM45(pipe);
174 if (!i915_pipe_enabled(dev, pipe)) {
175 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
176 "pipe %c\n", pipe_name(pipe));
180 return I915_READ(reg);
183 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
184 int *vpos, int *hpos)
186 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
187 u32 vbl = 0, position = 0;
188 int vbl_start, vbl_end, htotal, vtotal;
191 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
194 if (!i915_pipe_enabled(dev, pipe)) {
195 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
196 "pipe %c\n", pipe_name(pipe));
201 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
203 if (INTEL_INFO(dev)->gen >= 4) {
204 /* No obvious pixelcount register. Only query vertical
205 * scanout position from Display scan line register.
207 position = I915_READ(PIPEDSL(pipe));
209 /* Decode into vertical scanout position. Don't have
210 * horizontal scanout position.
212 *vpos = position & 0x1fff;
215 /* Have access to pixelcount since start of frame.
216 * We can split this into vertical and horizontal
219 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
221 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
222 *vpos = position / htotal;
223 *hpos = position - (*vpos * htotal);
226 /* Query vblank area. */
227 vbl = I915_READ(VBLANK(cpu_transcoder));
229 /* Test position against vblank region. */
230 vbl_start = vbl & 0x1fff;
231 vbl_end = (vbl >> 16) & 0x1fff;
233 if ((*vpos < vbl_start) || (*vpos > vbl_end))
236 /* Inside "upper part" of vblank area? Apply corrective offset: */
237 if (in_vbl && (*vpos >= vbl_start))
238 *vpos = *vpos - vtotal;
240 /* Readouts valid? */
242 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
246 ret |= DRM_SCANOUTPOS_INVBL;
251 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
253 struct timeval *vblank_time,
256 struct drm_i915_private *dev_priv = dev->dev_private;
257 struct drm_crtc *crtc;
259 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
260 DRM_ERROR("Invalid crtc %d\n", pipe);
264 /* Get drm_crtc to timestamp: */
265 crtc = intel_get_crtc_for_pipe(dev, pipe);
267 DRM_ERROR("Invalid crtc %d\n", pipe);
271 if (!crtc->enabled) {
272 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
276 /* Helper routine in DRM core does all the work: */
277 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
283 * Handle hotplug events outside the interrupt handler proper.
285 static void i915_hotplug_work_func(void *context, int pending)
287 drm_i915_private_t *dev_priv = context;
288 struct drm_device *dev = dev_priv->dev;
289 struct drm_mode_config *mode_config = &dev->mode_config;
290 struct intel_encoder *encoder;
292 sx_xlock(&mode_config->mutex);
293 DRM_DEBUG_KMS("running encoder hotplug functions\n");
295 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
296 if (encoder->hot_plug)
297 encoder->hot_plug(encoder);
299 sx_xunlock(&mode_config->mutex);
301 /* Just fire off a uevent and let userspace tell us what to do */
302 drm_helper_hpd_irq_event(dev);
305 /* defined intel_pm.c */
306 extern struct mtx mchdev_lock;
308 static void ironlake_handle_rps_change(struct drm_device *dev)
310 drm_i915_private_t *dev_priv = dev->dev_private;
311 u32 busy_up, busy_down, max_avg, min_avg;
314 mtx_lock(&mchdev_lock);
316 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
318 new_delay = dev_priv->ips.cur_delay;
320 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
321 busy_up = I915_READ(RCPREVBSYTUPAVG);
322 busy_down = I915_READ(RCPREVBSYTDNAVG);
323 max_avg = I915_READ(RCBMAXAVG);
324 min_avg = I915_READ(RCBMINAVG);
326 /* Handle RCS change request from hw */
327 if (busy_up > max_avg) {
328 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
329 new_delay = dev_priv->ips.cur_delay - 1;
330 if (new_delay < dev_priv->ips.max_delay)
331 new_delay = dev_priv->ips.max_delay;
332 } else if (busy_down < min_avg) {
333 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
334 new_delay = dev_priv->ips.cur_delay + 1;
335 if (new_delay > dev_priv->ips.min_delay)
336 new_delay = dev_priv->ips.min_delay;
339 if (ironlake_set_drps(dev, new_delay))
340 dev_priv->ips.cur_delay = new_delay;
342 mtx_unlock(&mchdev_lock);
347 static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring)
350 struct drm_i915_private *dev_priv = dev->dev_private;
352 if (ring->obj == NULL)
355 CTR2(KTR_DRM, "request_complete %s %d", ring->name, ring->get_seqno(ring, false));
357 wake_up_all(&ring->irq_queue);
358 if (i915_enable_hangcheck) {
359 dev_priv->hangcheck_count = 0;
360 callout_schedule(&dev_priv->hangcheck_timer,
361 DRM_I915_HANGCHECK_PERIOD);
365 static void gen6_pm_rps_work(void *context, int pending)
367 drm_i915_private_t *dev_priv = context;
371 mtx_lock(&dev_priv->rps.lock);
372 pm_iir = dev_priv->rps.pm_iir;
373 dev_priv->rps.pm_iir = 0;
374 pm_imr = I915_READ(GEN6_PMIMR);
375 I915_WRITE(GEN6_PMIMR, 0);
376 mtx_unlock(&dev_priv->rps.lock);
378 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
381 sx_xlock(&dev_priv->rps.hw_lock);
383 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
384 new_delay = dev_priv->rps.cur_delay + 1;
386 new_delay = dev_priv->rps.cur_delay - 1;
388 /* sysfs frequency interfaces may have snuck in while servicing the
391 if (!(new_delay > dev_priv->rps.max_delay ||
392 new_delay < dev_priv->rps.min_delay)) {
393 gen6_set_rps(dev_priv->dev, new_delay);
396 sx_xunlock(&dev_priv->rps.hw_lock);
401 * ivybridge_parity_work - Workqueue called when a parity error interrupt
403 * @work: workqueue struct
405 * Doesn't actually do anything except notify userspace. As a consequence of
406 * this event, userspace should try to remap the bad rows since statistically
407 * it is likely the same row is more likely to go bad again.
409 static void ivybridge_parity_work(void *context, int pending)
411 drm_i915_private_t *dev_priv = context;
412 u32 error_status, row, bank, subbank;
414 char *parity_event[5];
418 /* We must turn off DOP level clock gating to access the L3 registers.
419 * In order to prevent a get/put style interface, acquire struct mutex
420 * any time we access those registers.
422 DRM_LOCK(dev_priv->dev);
424 misccpctl = I915_READ(GEN7_MISCCPCTL);
425 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
426 POSTING_READ(GEN7_MISCCPCTL);
428 error_status = I915_READ(GEN7_L3CDERRST1);
429 row = GEN7_PARITY_ERROR_ROW(error_status);
430 bank = GEN7_PARITY_ERROR_BANK(error_status);
431 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
433 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
434 GEN7_L3CDERRST1_ENABLE);
435 POSTING_READ(GEN7_L3CDERRST1);
437 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
439 mtx_lock(&dev_priv->irq_lock);
440 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
441 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
442 mtx_unlock(&dev_priv->irq_lock);
444 DRM_UNLOCK(dev_priv->dev);
447 parity_event[0] = "L3_PARITY_ERROR=1";
448 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
449 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
450 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
451 parity_event[4] = NULL;
453 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
454 KOBJ_CHANGE, parity_event);
457 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
461 kfree(parity_event[3]);
462 kfree(parity_event[2]);
463 kfree(parity_event[1]);
467 static void ivybridge_handle_parity_error(struct drm_device *dev)
469 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
471 if (!HAS_L3_GPU_CACHE(dev))
474 mtx_lock(&dev_priv->irq_lock);
475 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
476 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
477 mtx_unlock(&dev_priv->irq_lock);
479 taskqueue_enqueue(dev_priv->wq, &dev_priv->l3_parity.error_work);
482 static void snb_gt_irq_handler(struct drm_device *dev,
483 struct drm_i915_private *dev_priv,
487 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
488 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
489 notify_ring(dev, &dev_priv->ring[RCS]);
490 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
491 notify_ring(dev, &dev_priv->ring[VCS]);
492 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
493 notify_ring(dev, &dev_priv->ring[BCS]);
495 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
496 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
497 GT_RENDER_CS_ERROR_INTERRUPT)) {
498 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
499 i915_handle_error(dev, false);
502 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
503 ivybridge_handle_parity_error(dev);
506 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
511 * IIR bits should never already be set because IMR should
512 * prevent an interrupt from being shown in IIR. The warning
513 * displays a case where we've unsafely cleared
514 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
515 * type is not a problem, it displays a problem in the logic.
517 * The mask bit in IMR is cleared by dev_priv->rps.work.
520 mtx_lock(&dev_priv->rps.lock);
521 dev_priv->rps.pm_iir |= pm_iir;
522 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
523 POSTING_READ(GEN6_PMIMR);
524 mtx_unlock(&dev_priv->rps.lock);
526 taskqueue_enqueue(dev_priv->wq, &dev_priv->rps.work);
529 static void valleyview_irq_handler(DRM_IRQ_ARGS)
531 struct drm_device *dev = (struct drm_device *) arg;
532 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
533 u32 iir, gt_iir, pm_iir;
535 u32 pipe_stats[I915_MAX_PIPES];
538 atomic_inc(&dev_priv->irq_received);
541 iir = I915_READ(VLV_IIR);
542 gt_iir = I915_READ(GTIIR);
543 pm_iir = I915_READ(GEN6_PMIIR);
545 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
548 snb_gt_irq_handler(dev, dev_priv, gt_iir);
550 mtx_lock(&dev_priv->irq_lock);
551 for_each_pipe(pipe) {
552 int reg = PIPESTAT(pipe);
553 pipe_stats[pipe] = I915_READ(reg);
556 * Clear the PIPE*STAT regs before the IIR
558 if (pipe_stats[pipe] & 0x8000ffff) {
559 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
560 DRM_DEBUG_DRIVER("pipe %c underrun\n",
562 I915_WRITE(reg, pipe_stats[pipe]);
565 mtx_unlock(&dev_priv->irq_lock);
567 for_each_pipe(pipe) {
568 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
569 drm_handle_vblank(dev, pipe);
571 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
572 intel_prepare_page_flip(dev, pipe);
573 intel_finish_page_flip(dev, pipe);
577 /* Consume port. Then clear IIR or we'll miss events */
578 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
579 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
581 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
583 if (hotplug_status & dev_priv->hotplug_supported_mask)
584 taskqueue_enqueue(dev_priv->wq,
585 &dev_priv->hotplug_work);
587 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
588 I915_READ(PORT_HOTPLUG_STAT);
591 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
594 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
595 gen6_queue_rps_work(dev_priv, pm_iir);
597 I915_WRITE(GTIIR, gt_iir);
598 I915_WRITE(GEN6_PMIIR, pm_iir);
599 I915_WRITE(VLV_IIR, iir);
606 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
608 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
611 if (pch_iir & SDE_HOTPLUG_MASK)
612 taskqueue_enqueue(dev_priv->wq, &dev_priv->hotplug_work);
614 if (pch_iir & SDE_AUDIO_POWER_MASK)
615 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
616 (pch_iir & SDE_AUDIO_POWER_MASK) >>
617 SDE_AUDIO_POWER_SHIFT);
619 if (pch_iir & SDE_GMBUS)
620 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
622 if (pch_iir & SDE_AUDIO_HDCP_MASK)
623 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
625 if (pch_iir & SDE_AUDIO_TRANS_MASK)
626 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
628 if (pch_iir & SDE_POISON)
629 DRM_ERROR("PCH poison interrupt\n");
631 if (pch_iir & SDE_FDI_MASK)
633 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
635 I915_READ(FDI_RX_IIR(pipe)));
637 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
638 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
640 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
641 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
643 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
644 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
645 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
646 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
649 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
654 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
655 taskqueue_enqueue(dev_priv->wq, &dev_priv->hotplug_work);
657 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
658 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
659 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
660 SDE_AUDIO_POWER_SHIFT_CPT);
662 if (pch_iir & SDE_AUX_MASK_CPT)
663 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
665 if (pch_iir & SDE_GMBUS_CPT)
666 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
668 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
669 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
671 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
672 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
674 if (pch_iir & SDE_FDI_MASK_CPT)
676 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
678 I915_READ(FDI_RX_IIR(pipe)));
681 static void ivybridge_irq_handler(DRM_IRQ_ARGS)
683 struct drm_device *dev = (struct drm_device *) arg;
684 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
685 u32 de_iir, gt_iir, de_ier, pm_iir;
688 atomic_inc(&dev_priv->irq_received);
690 /* disable master interrupt before clearing iir */
691 de_ier = I915_READ(DEIER);
692 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
694 gt_iir = I915_READ(GTIIR);
696 snb_gt_irq_handler(dev, dev_priv, gt_iir);
697 I915_WRITE(GTIIR, gt_iir);
700 de_iir = I915_READ(DEIIR);
702 if (de_iir & DE_GSE_IVB)
703 intel_opregion_gse_intr(dev);
705 for (i = 0; i < 3; i++) {
706 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
707 drm_handle_vblank(dev, i);
708 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
709 intel_prepare_page_flip(dev, i);
710 intel_finish_page_flip_plane(dev, i);
714 /* check event from PCH */
715 if (de_iir & DE_PCH_EVENT_IVB) {
716 u32 pch_iir = I915_READ(SDEIIR);
718 cpt_irq_handler(dev, pch_iir);
720 /* clear PCH hotplug event before clear CPU irq */
721 I915_WRITE(SDEIIR, pch_iir);
724 I915_WRITE(DEIIR, de_iir);
727 pm_iir = I915_READ(GEN6_PMIIR);
729 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
730 gen6_queue_rps_work(dev_priv, pm_iir);
731 I915_WRITE(GEN6_PMIIR, pm_iir);
734 I915_WRITE(DEIER, de_ier);
737 CTR3(KTR_DRM, "ivybridge_irq de %x gt %x pm %x", de_iir,
741 static void ilk_gt_irq_handler(struct drm_device *dev,
742 struct drm_i915_private *dev_priv,
745 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
746 notify_ring(dev, &dev_priv->ring[RCS]);
747 if (gt_iir & GT_BSD_USER_INTERRUPT)
748 notify_ring(dev, &dev_priv->ring[VCS]);
751 static void ironlake_irq_handler(DRM_IRQ_ARGS)
753 struct drm_device *dev = (struct drm_device *) arg;
754 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
755 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
757 atomic_inc(&dev_priv->irq_received);
759 /* disable master interrupt before clearing iir */
760 de_ier = I915_READ(DEIER);
761 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
764 de_iir = I915_READ(DEIIR);
765 gt_iir = I915_READ(GTIIR);
766 pch_iir = I915_READ(SDEIIR);
767 pm_iir = I915_READ(GEN6_PMIIR);
769 CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
770 gt_iir, pch_iir, pm_iir);
772 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
773 (!IS_GEN6(dev) || pm_iir == 0))
777 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
779 snb_gt_irq_handler(dev, dev_priv, gt_iir);
782 intel_opregion_gse_intr(dev);
784 if (de_iir & DE_PIPEA_VBLANK)
785 drm_handle_vblank(dev, 0);
787 if (de_iir & DE_PIPEB_VBLANK)
788 drm_handle_vblank(dev, 1);
790 if (de_iir & DE_PLANEA_FLIP_DONE) {
791 intel_prepare_page_flip(dev, 0);
792 intel_finish_page_flip_plane(dev, 0);
795 if (de_iir & DE_PLANEB_FLIP_DONE) {
796 intel_prepare_page_flip(dev, 1);
797 intel_finish_page_flip_plane(dev, 1);
800 /* check event from PCH */
801 if (de_iir & DE_PCH_EVENT) {
802 if (HAS_PCH_CPT(dev))
803 cpt_irq_handler(dev, pch_iir);
805 ibx_irq_handler(dev, pch_iir);
808 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
809 ironlake_handle_rps_change(dev);
811 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
812 gen6_queue_rps_work(dev_priv, pm_iir);
814 /* should clear PCH hotplug event before clear CPU irq */
815 I915_WRITE(SDEIIR, pch_iir);
816 I915_WRITE(GTIIR, gt_iir);
817 I915_WRITE(DEIIR, de_iir);
818 I915_WRITE(GEN6_PMIIR, pm_iir);
821 I915_WRITE(DEIER, de_ier);
826 * i915_error_work_func - do process context error handling work
829 * Fire an error uevent so userspace can see that a hang or error
832 static void i915_error_work_func(void *context, int pending)
834 drm_i915_private_t *dev_priv = context;
835 struct drm_device *dev = dev_priv->dev;
837 char *error_event[] = { "ERROR=1", NULL };
838 char *reset_event[] = { "RESET=1", NULL };
839 char *reset_done_event[] = { "ERROR=0", NULL };
841 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
844 if (atomic_read(&dev_priv->mm.wedged)) {
845 DRM_DEBUG_DRIVER("resetting chip\n");
847 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
849 if (!i915_reset(dev)) {
850 atomic_set(&dev_priv->mm.wedged, 0);
852 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
855 complete_all(&dev_priv->error_completion);
859 /* NB: please notice the memset */
860 static void i915_get_extra_instdone(struct drm_device *dev,
863 struct drm_i915_private *dev_priv = dev->dev_private;
864 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
866 switch(INTEL_INFO(dev)->gen) {
869 instdone[0] = I915_READ(INSTDONE);
874 instdone[0] = I915_READ(INSTDONE_I965);
875 instdone[1] = I915_READ(INSTDONE1);
878 WARN_ONCE(1, "Unsupported platform\n");
880 instdone[0] = I915_READ(GEN7_INSTDONE_1);
881 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
882 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
883 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
888 //#ifdef CONFIG_DEBUG_FS
889 static struct drm_i915_error_object *
890 i915_error_object_create(struct drm_i915_private *dev_priv,
891 struct drm_i915_gem_object *src)
893 struct drm_i915_error_object *dst;
897 if (src == NULL || src->pages == NULL)
900 count = src->base.size / PAGE_SIZE;
902 dst = malloc(sizeof(*dst) + count * sizeof(u32 *), DRM_I915_GEM, M_NOWAIT);
906 reloc_offset = src->gtt_offset;
907 for (i = 0; i < count; i++) {
910 d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
914 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
915 src->has_global_gtt_mapping) {
918 /* Simply ignore tiling or any overlapping fence.
919 * It's part of the error state, and this hopefully
920 * captures what the GPU read.
923 s = pmap_mapdev_attr(dev_priv->mm.gtt_base_addr +
925 PAGE_SIZE, PAT_WRITE_COMBINING);
926 memcpy_fromio(d, s, PAGE_SIZE);
927 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
932 drm_clflush_pages(&src->pages[i], 1);
935 sf = sf_buf_alloc(src->pages[i], SFB_CPUPRIVATE |
938 s = (void *)(uintptr_t)sf_buf_kva(sf);
939 memcpy(d, s, PAGE_SIZE);
947 drm_clflush_pages(&src->pages[i], 1);
952 reloc_offset += PAGE_SIZE;
954 dst->page_count = count;
955 dst->gtt_offset = src->gtt_offset;
961 free(dst->pages[i], DRM_I915_GEM);
962 free(dst, DRM_I915_GEM);
967 i915_error_object_free(struct drm_i915_error_object *obj)
974 for (page = 0; page < obj->page_count; page++)
975 free(obj->pages[page], DRM_I915_GEM);
977 free(obj, DRM_I915_GEM);
981 i915_error_state_free(struct drm_i915_error_state *error)
985 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
986 i915_error_object_free(error->ring[i].batchbuffer);
987 i915_error_object_free(error->ring[i].ringbuffer);
988 free(error->ring[i].requests, DRM_I915_GEM);
991 free(error->active_bo, DRM_I915_GEM);
992 free(error->overlay, DRM_I915_GEM);
993 free(error, DRM_I915_GEM);
995 static void capture_bo(struct drm_i915_error_buffer *err,
996 struct drm_i915_gem_object *obj)
998 err->size = obj->base.size;
999 err->name = obj->base.name;
1000 err->rseqno = obj->last_read_seqno;
1001 err->wseqno = obj->last_write_seqno;
1002 err->gtt_offset = obj->gtt_offset;
1003 err->read_domains = obj->base.read_domains;
1004 err->write_domain = obj->base.write_domain;
1005 err->fence_reg = obj->fence_reg;
1007 if (obj->pin_count > 0)
1009 if (obj->user_pin_count > 0)
1011 err->tiling = obj->tiling_mode;
1012 err->dirty = obj->dirty;
1013 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1014 err->ring = obj->ring ? obj->ring->id : -1;
1015 err->cache_level = obj->cache_level;
1018 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1019 int count, struct list_head *head)
1021 struct drm_i915_gem_object *obj;
1024 list_for_each_entry(obj, head, mm_list) {
1025 capture_bo(err++, obj);
1033 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1034 int count, struct list_head *head)
1036 struct drm_i915_gem_object *obj;
1039 list_for_each_entry(obj, head, gtt_list) {
1040 if (obj->pin_count == 0)
1043 capture_bo(err++, obj);
1051 static void i915_gem_record_fences(struct drm_device *dev,
1052 struct drm_i915_error_state *error)
1054 struct drm_i915_private *dev_priv = dev->dev_private;
1058 switch (INTEL_INFO(dev)->gen) {
1061 for (i = 0; i < 16; i++)
1062 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1066 for (i = 0; i < 16; i++)
1067 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1070 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1071 for (i = 0; i < 8; i++)
1072 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1074 for (i = 0; i < 8; i++)
1075 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1081 static struct drm_i915_error_object *
1082 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1083 struct intel_ring_buffer *ring)
1085 struct drm_i915_gem_object *obj;
1088 if (!ring->get_seqno)
1091 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1092 u32 acthd = I915_READ(ACTHD);
1094 if (WARN_ON(ring->id != RCS))
1097 obj = ring->private;
1098 if (acthd >= obj->gtt_offset &&
1099 acthd < obj->gtt_offset + obj->base.size)
1100 return i915_error_object_create(dev_priv, obj);
1103 seqno = ring->get_seqno(ring, false);
1104 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1105 if (obj->ring != ring)
1108 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1111 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1114 /* We need to copy these to an anonymous buffer as the simplest
1115 * method to avoid being overwritten by userspace.
1117 return i915_error_object_create(dev_priv, obj);
1123 static void i915_record_ring_state(struct drm_device *dev,
1124 struct drm_i915_error_state *error,
1125 struct intel_ring_buffer *ring)
1127 struct drm_i915_private *dev_priv = dev->dev_private;
1129 if (INTEL_INFO(dev)->gen >= 6) {
1130 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1131 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1132 error->semaphore_mboxes[ring->id][0]
1133 = I915_READ(RING_SYNC_0(ring->mmio_base));
1134 error->semaphore_mboxes[ring->id][1]
1135 = I915_READ(RING_SYNC_1(ring->mmio_base));
1136 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1137 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1140 if (INTEL_INFO(dev)->gen >= 4) {
1141 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1142 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1143 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1144 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1145 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1146 if (ring->id == RCS)
1147 error->bbaddr = I915_READ64(BB_ADDR);
1149 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1150 error->ipeir[ring->id] = I915_READ(IPEIR);
1151 error->ipehr[ring->id] = I915_READ(IPEHR);
1152 error->instdone[ring->id] = I915_READ(INSTDONE);
1155 sleepq_lock(&ring->irq_queue);
1156 error->waiting[ring->id] = sleepq_sleepcnt(&ring->irq_queue, 0) != 0;
1157 sleepq_release(&ring->irq_queue);
1158 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1159 error->seqno[ring->id] = ring->get_seqno(ring, false);
1160 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1161 error->head[ring->id] = I915_READ_HEAD(ring);
1162 error->tail[ring->id] = I915_READ_TAIL(ring);
1163 error->ctl[ring->id] = I915_READ_CTL(ring);
1165 error->cpu_ring_head[ring->id] = ring->head;
1166 error->cpu_ring_tail[ring->id] = ring->tail;
1169 static void i915_gem_record_rings(struct drm_device *dev,
1170 struct drm_i915_error_state *error)
1172 struct drm_i915_private *dev_priv = dev->dev_private;
1173 struct intel_ring_buffer *ring;
1174 struct drm_i915_gem_request *request;
1177 for_each_ring(ring, dev_priv, i) {
1178 i915_record_ring_state(dev, error, ring);
1180 error->ring[i].batchbuffer =
1181 i915_error_first_batchbuffer(dev_priv, ring);
1183 error->ring[i].ringbuffer =
1184 i915_error_object_create(dev_priv, ring->obj);
1187 list_for_each_entry(request, &ring->request_list, list)
1190 error->ring[i].num_requests = count;
1191 error->ring[i].requests =
1192 malloc(count*sizeof(struct drm_i915_error_request),
1193 DRM_I915_GEM, M_WAITOK);
1194 if (error->ring[i].requests == NULL) {
1195 error->ring[i].num_requests = 0;
1200 list_for_each_entry(request, &ring->request_list, list) {
1201 struct drm_i915_error_request *erq;
1203 erq = &error->ring[i].requests[count++];
1204 erq->seqno = request->seqno;
1205 erq->jiffies = request->emitted_jiffies;
1206 erq->tail = request->tail;
1212 * i915_capture_error_state - capture an error record for later analysis
1215 * Should be called when an error is detected (either a hang or an error
1216 * interrupt) to capture error state from the time of the error. Fills
1217 * out a structure which becomes available in debugfs for user level tools
1220 static void i915_capture_error_state(struct drm_device *dev)
1222 struct drm_i915_private *dev_priv = dev->dev_private;
1223 struct drm_i915_gem_object *obj;
1224 struct drm_i915_error_state *error;
1227 mtx_lock(&dev_priv->error_lock);
1228 error = dev_priv->first_error;
1229 mtx_unlock(&dev_priv->error_lock);
1233 /* Account for pipe specific data like PIPE*STAT */
1234 error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
1236 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1240 DRM_INFO("capturing error event; look for more information in sysctl hw.dri.%d.info.i915_error_state\n",
1241 dev->sysctl_node_idx);
1243 refcount_init(&error->ref, 1);
1244 error->eir = I915_READ(EIR);
1245 error->pgtbl_er = I915_READ(PGTBL_ER);
1246 error->ccid = I915_READ(CCID);
1248 if (HAS_PCH_SPLIT(dev))
1249 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1250 else if (IS_VALLEYVIEW(dev))
1251 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1252 else if (IS_GEN2(dev))
1253 error->ier = I915_READ16(IER);
1255 error->ier = I915_READ(IER);
1257 if (INTEL_INFO(dev)->gen >= 6)
1258 error->derrmr = I915_READ(DERRMR);
1260 if (IS_VALLEYVIEW(dev))
1261 error->forcewake = I915_READ(FORCEWAKE_VLV);
1262 else if (INTEL_INFO(dev)->gen >= 7)
1263 error->forcewake = I915_READ(FORCEWAKE_MT);
1264 else if (INTEL_INFO(dev)->gen == 6)
1265 error->forcewake = I915_READ(FORCEWAKE);
1268 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1270 if (INTEL_INFO(dev)->gen >= 6) {
1271 error->error = I915_READ(ERROR_GEN6);
1272 error->done_reg = I915_READ(DONE_REG);
1275 if (INTEL_INFO(dev)->gen == 7)
1276 error->err_int = I915_READ(GEN7_ERR_INT);
1278 i915_get_extra_instdone(dev, error->extra_instdone);
1280 i915_gem_record_fences(dev, error);
1281 i915_gem_record_rings(dev, error);
1283 /* Record buffers on the active and pinned lists. */
1284 error->active_bo = NULL;
1285 error->pinned_bo = NULL;
1288 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1290 error->active_bo_count = i;
1291 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1294 error->pinned_bo_count = i - error->active_bo_count;
1296 error->active_bo = NULL;
1297 error->pinned_bo = NULL;
1299 error->active_bo = malloc(sizeof(*error->active_bo)*i,
1300 DRM_I915_GEM, M_NOWAIT);
1301 if (error->active_bo)
1303 error->active_bo + error->active_bo_count;
1306 if (error->active_bo)
1307 error->active_bo_count =
1308 capture_active_bo(error->active_bo,
1309 error->active_bo_count,
1310 &dev_priv->mm.active_list);
1312 if (error->pinned_bo)
1313 error->pinned_bo_count =
1314 capture_pinned_bo(error->pinned_bo,
1315 error->pinned_bo_count,
1316 &dev_priv->mm.bound_list);
1318 microtime(&error->time);
1320 error->overlay = intel_overlay_capture_error_state(dev);
1321 error->display = intel_display_capture_error_state(dev);
1323 mtx_lock(&dev_priv->error_lock);
1324 if (dev_priv->first_error == NULL) {
1325 dev_priv->first_error = error;
1328 mtx_unlock(&dev_priv->error_lock);
1331 i915_error_state_free(error);
1334 void i915_destroy_error_state(struct drm_device *dev)
1336 struct drm_i915_private *dev_priv = dev->dev_private;
1337 struct drm_i915_error_state *error;
1339 mtx_lock(&dev_priv->error_lock);
1340 error = dev_priv->first_error;
1341 dev_priv->first_error = NULL;
1342 mtx_unlock(&dev_priv->error_lock);
1344 if (error && refcount_release(&error->ref))
1345 i915_error_state_free(error);
1348 //#define i915_capture_error_state(x)
1351 static void i915_report_and_clear_eir(struct drm_device *dev)
1353 struct drm_i915_private *dev_priv = dev->dev_private;
1354 uint32_t instdone[I915_NUM_INSTDONE_REG];
1355 u32 eir = I915_READ(EIR);
1361 pr_err("render error detected, EIR: 0x%08x\n", eir);
1363 i915_get_extra_instdone(dev, instdone);
1366 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1367 u32 ipeir = I915_READ(IPEIR_I965);
1369 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1370 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1371 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1372 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1373 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1374 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1375 I915_WRITE(IPEIR_I965, ipeir);
1376 POSTING_READ(IPEIR_I965);
1378 if (eir & GM45_ERROR_PAGE_TABLE) {
1379 u32 pgtbl_err = I915_READ(PGTBL_ER);
1380 pr_err("page table error\n");
1381 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1382 I915_WRITE(PGTBL_ER, pgtbl_err);
1383 POSTING_READ(PGTBL_ER);
1387 if (!IS_GEN2(dev)) {
1388 if (eir & I915_ERROR_PAGE_TABLE) {
1389 u32 pgtbl_err = I915_READ(PGTBL_ER);
1390 pr_err("page table error\n");
1391 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1392 I915_WRITE(PGTBL_ER, pgtbl_err);
1393 POSTING_READ(PGTBL_ER);
1397 if (eir & I915_ERROR_MEMORY_REFRESH) {
1398 pr_err("memory refresh error:\n");
1400 pr_err("pipe %c stat: 0x%08x\n",
1401 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1402 /* pipestat has already been acked */
1404 if (eir & I915_ERROR_INSTRUCTION) {
1405 pr_err("instruction error\n");
1406 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1407 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1408 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1409 if (INTEL_INFO(dev)->gen < 4) {
1410 u32 ipeir = I915_READ(IPEIR);
1412 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1413 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1414 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1415 I915_WRITE(IPEIR, ipeir);
1416 POSTING_READ(IPEIR);
1418 u32 ipeir = I915_READ(IPEIR_I965);
1420 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1421 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1422 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1423 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1424 I915_WRITE(IPEIR_I965, ipeir);
1425 POSTING_READ(IPEIR_I965);
1429 I915_WRITE(EIR, eir);
1431 eir = I915_READ(EIR);
1434 * some errors might have become stuck,
1437 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1438 I915_WRITE(EMR, I915_READ(EMR) | eir);
1439 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1444 * i915_handle_error - handle an error interrupt
1447 * Do some basic checking of regsiter state at error interrupt time and
1448 * dump it to the syslog. Also call i915_capture_error_state() to make
1449 * sure we get a record and make it available in debugfs. Fire a uevent
1450 * so userspace knows something bad happened (should trigger collection
1451 * of a ring dump etc.).
1453 void i915_handle_error(struct drm_device *dev, bool wedged)
1455 struct drm_i915_private *dev_priv = dev->dev_private;
1456 struct intel_ring_buffer *ring;
1459 i915_capture_error_state(dev);
1460 i915_report_and_clear_eir(dev);
1463 INIT_COMPLETION(dev_priv->error_completion);
1464 atomic_set(&dev_priv->mm.wedged, 1);
1467 * Wakeup waiting processes so they don't hang
1469 for_each_ring(ring, dev_priv, i)
1470 wake_up_all(&ring->irq_queue);
1473 taskqueue_enqueue(dev_priv->wq, &dev_priv->error_work);
1476 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1478 drm_i915_private_t *dev_priv = dev->dev_private;
1479 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1480 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1481 struct drm_i915_gem_object *obj;
1482 struct intel_unpin_work *work;
1483 bool stall_detected;
1485 /* Ignore early vblank irqs */
1486 if (intel_crtc == NULL)
1489 mtx_lock(&dev->event_lock);
1490 work = intel_crtc->unpin_work;
1493 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1494 !work->enable_stall_check) {
1495 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1496 mtx_unlock(&dev->event_lock);
1500 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1501 obj = work->pending_flip_obj;
1502 if (INTEL_INFO(dev)->gen >= 4) {
1503 int dspsurf = DSPSURF(intel_crtc->plane);
1504 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1507 int dspaddr = DSPADDR(intel_crtc->plane);
1508 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1509 crtc->y * crtc->fb->pitches[0] +
1510 crtc->x * crtc->fb->bits_per_pixel/8);
1513 mtx_unlock(&dev->event_lock);
1515 if (stall_detected) {
1516 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1517 intel_prepare_page_flip(dev, intel_crtc->plane);
1521 /* Called from drm generic code, passed 'crtc' which
1522 * we use as a pipe index
1524 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1528 if (!i915_pipe_enabled(dev, pipe))
1531 mtx_lock(&dev_priv->irq_lock);
1532 if (INTEL_INFO(dev)->gen >= 4)
1533 i915_enable_pipestat(dev_priv, pipe,
1534 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1536 i915_enable_pipestat(dev_priv, pipe,
1537 PIPE_VBLANK_INTERRUPT_ENABLE);
1539 /* maintain vblank delivery even in deep C-states */
1540 if (dev_priv->info->gen == 3)
1541 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1542 mtx_unlock(&dev_priv->irq_lock);
1543 CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
1548 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1550 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1552 if (!i915_pipe_enabled(dev, pipe))
1555 mtx_lock(&dev_priv->irq_lock);
1556 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1557 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1558 mtx_unlock(&dev_priv->irq_lock);
1559 CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
1564 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1566 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1568 if (!i915_pipe_enabled(dev, pipe))
1571 mtx_lock(&dev_priv->irq_lock);
1572 ironlake_enable_display_irq(dev_priv,
1573 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1574 mtx_unlock(&dev_priv->irq_lock);
1575 CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
1580 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1582 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1585 if (!i915_pipe_enabled(dev, pipe))
1588 mtx_lock(&dev_priv->irq_lock);
1589 imr = I915_READ(VLV_IMR);
1591 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1593 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1594 I915_WRITE(VLV_IMR, imr);
1595 i915_enable_pipestat(dev_priv, pipe,
1596 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1597 mtx_unlock(&dev_priv->irq_lock);
1602 /* Called from drm generic code, passed 'crtc' which
1603 * we use as a pipe index
1605 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1607 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1609 mtx_lock(&dev_priv->irq_lock);
1610 if (dev_priv->info->gen == 3)
1611 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1613 i915_disable_pipestat(dev_priv, pipe,
1614 PIPE_VBLANK_INTERRUPT_ENABLE |
1615 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1616 mtx_unlock(&dev_priv->irq_lock);
1617 CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
1620 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1622 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1624 mtx_lock(&dev_priv->irq_lock);
1625 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1626 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1627 mtx_unlock(&dev_priv->irq_lock);
1628 CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
1631 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1633 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1635 mtx_lock(&dev_priv->irq_lock);
1636 ironlake_disable_display_irq(dev_priv,
1637 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1638 mtx_unlock(&dev_priv->irq_lock);
1639 CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
1642 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1647 mtx_lock(&dev_priv->irq_lock);
1648 i915_disable_pipestat(dev_priv, pipe,
1649 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1650 imr = I915_READ(VLV_IMR);
1652 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1654 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1655 I915_WRITE(VLV_IMR, imr);
1656 mtx_unlock(&dev_priv->irq_lock);
1657 CTR2(KTR_DRM, "%s %d", __func__, pipe);
1661 ring_last_seqno(struct intel_ring_buffer *ring)
1663 return list_entry(ring->request_list.prev,
1664 struct drm_i915_gem_request, list)->seqno;
1667 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1669 if (list_empty(&ring->request_list) ||
1670 i915_seqno_passed(ring->get_seqno(ring, false),
1671 ring_last_seqno(ring))) {
1672 /* Issue a wake-up to catch stuck h/w. */
1673 sleepq_lock(&ring->irq_queue);
1674 if (sleepq_sleepcnt(&ring->irq_queue, 0) != 0) {
1675 sleepq_release(&ring->irq_queue);
1676 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1678 wake_up_all(&ring->irq_queue);
1681 sleepq_release(&ring->irq_queue);
1687 static bool kick_ring(struct intel_ring_buffer *ring)
1689 struct drm_device *dev = ring->dev;
1690 struct drm_i915_private *dev_priv = dev->dev_private;
1691 u32 tmp = I915_READ_CTL(ring);
1692 if (tmp & RING_WAIT) {
1693 DRM_ERROR("Kicking stuck wait on %s\n",
1695 I915_WRITE_CTL(ring, tmp);
1701 static bool i915_hangcheck_hung(struct drm_device *dev)
1703 drm_i915_private_t *dev_priv = dev->dev_private;
1705 if (dev_priv->hangcheck_count++ > 1) {
1708 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1709 i915_handle_error(dev, true);
1711 if (!IS_GEN2(dev)) {
1712 struct intel_ring_buffer *ring;
1715 /* Is the chip hanging on a WAIT_FOR_EVENT?
1716 * If so we can simply poke the RB_WAIT bit
1717 * and break the hang. This should work on
1718 * all but the second generation chipsets.
1720 for_each_ring(ring, dev_priv, i)
1721 hung &= !kick_ring(ring);
1731 * This is called when the chip hasn't reported back with completed
1732 * batchbuffers in a long time. The first time this is called we simply record
1733 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1734 * again, we assume the chip is wedged and try to fix it.
1736 void i915_hangcheck_elapsed(void *data)
1738 struct drm_device *dev = (struct drm_device *)data;
1739 drm_i915_private_t *dev_priv = dev->dev_private;
1740 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1741 struct intel_ring_buffer *ring;
1742 bool err = false, idle;
1745 if (!i915_enable_hangcheck)
1748 memset(acthd, 0, sizeof(acthd));
1750 for_each_ring(ring, dev_priv, i) {
1751 idle &= i915_hangcheck_ring_idle(ring, &err);
1752 acthd[i] = intel_ring_get_active_head(ring);
1755 /* If all work is done then ACTHD clearly hasn't advanced. */
1758 if (i915_hangcheck_hung(dev))
1764 dev_priv->hangcheck_count = 0;
1768 i915_get_extra_instdone(dev, instdone);
1769 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1770 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1771 if (i915_hangcheck_hung(dev))
1774 dev_priv->hangcheck_count = 0;
1776 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1777 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1781 /* Reset timer case chip hangs without another request being added */
1782 callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
1787 static void ironlake_irq_preinstall(struct drm_device *dev)
1789 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1791 atomic_set(&dev_priv->irq_received, 0);
1793 I915_WRITE(HWSTAM, 0xeffe);
1795 /* XXX hotplug from PCH */
1797 I915_WRITE(DEIMR, 0xffffffff);
1798 I915_WRITE(DEIER, 0x0);
1799 POSTING_READ(DEIER);
1802 I915_WRITE(GTIMR, 0xffffffff);
1803 I915_WRITE(GTIER, 0x0);
1804 POSTING_READ(GTIER);
1806 /* south display irq */
1807 I915_WRITE(SDEIMR, 0xffffffff);
1808 I915_WRITE(SDEIER, 0x0);
1809 POSTING_READ(SDEIER);
1812 static void valleyview_irq_preinstall(struct drm_device *dev)
1814 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1817 atomic_set(&dev_priv->irq_received, 0);
1820 I915_WRITE(VLV_IMR, 0);
1821 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1822 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1823 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1826 I915_WRITE(GTIIR, I915_READ(GTIIR));
1827 I915_WRITE(GTIIR, I915_READ(GTIIR));
1828 I915_WRITE(GTIMR, 0xffffffff);
1829 I915_WRITE(GTIER, 0x0);
1830 POSTING_READ(GTIER);
1832 I915_WRITE(DPINVGTT, 0xff);
1834 I915_WRITE(PORT_HOTPLUG_EN, 0);
1835 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1837 I915_WRITE(PIPESTAT(pipe), 0xffff);
1838 I915_WRITE(VLV_IIR, 0xffffffff);
1839 I915_WRITE(VLV_IMR, 0xffffffff);
1840 I915_WRITE(VLV_IER, 0x0);
1841 POSTING_READ(VLV_IER);
1845 * Enable digital hotplug on the PCH, and configure the DP short pulse
1846 * duration to 2ms (which is the minimum in the Display Port spec)
1848 * This register is the same on all known PCH chips.
1851 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1853 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1856 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1857 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1858 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1859 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1860 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1861 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1864 static int ironlake_irq_postinstall(struct drm_device *dev)
1866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1867 /* enable kind of interrupts always enabled */
1868 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1869 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1873 dev_priv->irq_mask = ~display_mask;
1875 /* should always can generate irq */
1876 I915_WRITE(DEIIR, I915_READ(DEIIR));
1877 I915_WRITE(DEIMR, dev_priv->irq_mask);
1878 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1879 POSTING_READ(DEIER);
1881 dev_priv->gt_irq_mask = ~0;
1883 I915_WRITE(GTIIR, I915_READ(GTIIR));
1884 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1889 GEN6_BSD_USER_INTERRUPT |
1890 GEN6_BLITTER_USER_INTERRUPT;
1895 GT_BSD_USER_INTERRUPT;
1896 I915_WRITE(GTIER, render_irqs);
1897 POSTING_READ(GTIER);
1899 if (HAS_PCH_CPT(dev)) {
1900 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1901 SDE_PORTB_HOTPLUG_CPT |
1902 SDE_PORTC_HOTPLUG_CPT |
1903 SDE_PORTD_HOTPLUG_CPT);
1905 hotplug_mask = (SDE_CRT_HOTPLUG |
1912 dev_priv->pch_irq_mask = ~hotplug_mask;
1914 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1915 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1916 I915_WRITE(SDEIER, hotplug_mask);
1917 POSTING_READ(SDEIER);
1919 ironlake_enable_pch_hotplug(dev);
1921 if (IS_IRONLAKE_M(dev)) {
1922 /* Clear & enable PCU event interrupts */
1923 I915_WRITE(DEIIR, DE_PCU_EVENT);
1924 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1925 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1931 static int ivybridge_irq_postinstall(struct drm_device *dev)
1933 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1934 /* enable kind of interrupts always enabled */
1936 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1937 DE_PLANEC_FLIP_DONE_IVB |
1938 DE_PLANEB_FLIP_DONE_IVB |
1939 DE_PLANEA_FLIP_DONE_IVB;
1943 dev_priv->irq_mask = ~display_mask;
1945 /* should always can generate irq */
1946 I915_WRITE(DEIIR, I915_READ(DEIIR));
1947 I915_WRITE(DEIMR, dev_priv->irq_mask);
1950 DE_PIPEC_VBLANK_IVB |
1951 DE_PIPEB_VBLANK_IVB |
1952 DE_PIPEA_VBLANK_IVB);
1953 POSTING_READ(DEIER);
1955 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1957 I915_WRITE(GTIIR, I915_READ(GTIIR));
1958 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1960 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1961 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1962 I915_WRITE(GTIER, render_irqs);
1963 POSTING_READ(GTIER);
1965 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1966 SDE_PORTB_HOTPLUG_CPT |
1967 SDE_PORTC_HOTPLUG_CPT |
1968 SDE_PORTD_HOTPLUG_CPT);
1969 dev_priv->pch_irq_mask = ~hotplug_mask;
1971 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1972 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1973 I915_WRITE(SDEIER, hotplug_mask);
1974 POSTING_READ(SDEIER);
1976 ironlake_enable_pch_hotplug(dev);
1981 static int valleyview_irq_postinstall(struct drm_device *dev)
1983 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1985 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1986 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1990 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1991 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1992 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1993 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1994 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1997 *Leave vblank interrupts masked initially. enable/disable will
1998 * toggle them based on usage.
2000 dev_priv->irq_mask = (~enable_mask) |
2001 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2002 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2004 dev_priv->pipestat[0] = 0;
2005 dev_priv->pipestat[1] = 0;
2007 /* Hack for broken MSIs on VLV */
2008 pci_write_config_dword(dev->dev, 0x94, 0xfee00000);
2009 pci_read_config_word(dev->dev, 0x98, &msid);
2010 msid &= 0xff; /* mask out delivery bits */
2012 pci_write_config_word(dev->dev, 0x98, msid);
2014 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2015 I915_WRITE(VLV_IER, enable_mask);
2016 I915_WRITE(VLV_IIR, 0xffffffff);
2017 I915_WRITE(PIPESTAT(0), 0xffff);
2018 I915_WRITE(PIPESTAT(1), 0xffff);
2019 POSTING_READ(VLV_IER);
2021 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2022 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2024 I915_WRITE(VLV_IIR, 0xffffffff);
2025 I915_WRITE(VLV_IIR, 0xffffffff);
2027 I915_WRITE(GTIIR, I915_READ(GTIIR));
2028 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2030 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2031 GEN6_BLITTER_USER_INTERRUPT;
2032 I915_WRITE(GTIER, render_irqs);
2033 POSTING_READ(GTIER);
2035 /* ack & enable invalid PTE error interrupts */
2036 #if 0 /* FIXME: add support to irq handler for checking these bits */
2037 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2038 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2041 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2042 /* Note HDMI and DP share bits */
2043 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2044 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2045 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2046 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2047 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2048 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2049 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2050 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2051 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2052 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2053 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2054 hotplug_en |= CRT_HOTPLUG_INT_EN;
2055 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2058 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2063 static void valleyview_irq_uninstall(struct drm_device *dev)
2065 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2072 I915_WRITE(PIPESTAT(pipe), 0xffff);
2074 I915_WRITE(HWSTAM, 0xffffffff);
2075 I915_WRITE(PORT_HOTPLUG_EN, 0);
2076 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2078 I915_WRITE(PIPESTAT(pipe), 0xffff);
2079 I915_WRITE(VLV_IIR, 0xffffffff);
2080 I915_WRITE(VLV_IMR, 0xffffffff);
2081 I915_WRITE(VLV_IER, 0x0);
2082 POSTING_READ(VLV_IER);
2085 static void ironlake_irq_uninstall(struct drm_device *dev)
2087 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2092 I915_WRITE(HWSTAM, 0xffffffff);
2094 I915_WRITE(DEIMR, 0xffffffff);
2095 I915_WRITE(DEIER, 0x0);
2096 I915_WRITE(DEIIR, I915_READ(DEIIR));
2098 I915_WRITE(GTIMR, 0xffffffff);
2099 I915_WRITE(GTIER, 0x0);
2100 I915_WRITE(GTIIR, I915_READ(GTIIR));
2102 I915_WRITE(SDEIMR, 0xffffffff);
2103 I915_WRITE(SDEIER, 0x0);
2104 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2107 static void i8xx_irq_preinstall(struct drm_device * dev)
2109 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2112 atomic_set(&dev_priv->irq_received, 0);
2115 I915_WRITE(PIPESTAT(pipe), 0);
2116 I915_WRITE16(IMR, 0xffff);
2117 I915_WRITE16(IER, 0x0);
2118 POSTING_READ16(IER);
2121 static int i8xx_irq_postinstall(struct drm_device *dev)
2123 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2125 dev_priv->pipestat[0] = 0;
2126 dev_priv->pipestat[1] = 0;
2129 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2131 /* Unmask the interrupts that we always want on. */
2132 dev_priv->irq_mask =
2133 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2134 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2135 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2136 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2137 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2138 I915_WRITE16(IMR, dev_priv->irq_mask);
2141 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2142 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2143 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2144 I915_USER_INTERRUPT);
2145 POSTING_READ16(IER);
2150 static void i8xx_irq_handler(DRM_IRQ_ARGS)
2152 struct drm_device *dev = (struct drm_device *) arg;
2153 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2159 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2160 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2162 atomic_inc(&dev_priv->irq_received);
2164 iir = I915_READ16(IIR);
2168 while (iir & ~flip_mask) {
2169 /* Can't rely on pipestat interrupt bit in iir as it might
2170 * have been cleared after the pipestat interrupt was received.
2171 * It doesn't set the bit in iir again, but it still produces
2172 * interrupts (for non-MSI).
2174 mtx_lock(&dev_priv->irq_lock);
2175 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2176 i915_handle_error(dev, false);
2178 for_each_pipe(pipe) {
2179 int reg = PIPESTAT(pipe);
2180 pipe_stats[pipe] = I915_READ(reg);
2183 * Clear the PIPE*STAT regs before the IIR
2185 if (pipe_stats[pipe] & 0x8000ffff) {
2186 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2187 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2189 I915_WRITE(reg, pipe_stats[pipe]);
2193 mtx_unlock(&dev_priv->irq_lock);
2195 I915_WRITE16(IIR, iir & ~flip_mask);
2196 new_iir = I915_READ16(IIR); /* Flush posted writes */
2198 i915_update_dri1_breadcrumb(dev);
2200 if (iir & I915_USER_INTERRUPT)
2201 notify_ring(dev, &dev_priv->ring[RCS]);
2203 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2204 drm_handle_vblank(dev, 0)) {
2205 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2206 intel_prepare_page_flip(dev, 0);
2207 intel_finish_page_flip(dev, 0);
2208 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2212 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2213 drm_handle_vblank(dev, 1)) {
2214 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2215 intel_prepare_page_flip(dev, 1);
2216 intel_finish_page_flip(dev, 1);
2217 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2225 static void i8xx_irq_uninstall(struct drm_device * dev)
2227 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2230 for_each_pipe(pipe) {
2231 /* Clear enable bits; then clear status bits */
2232 I915_WRITE(PIPESTAT(pipe), 0);
2233 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2235 I915_WRITE16(IMR, 0xffff);
2236 I915_WRITE16(IER, 0x0);
2237 I915_WRITE16(IIR, I915_READ16(IIR));
2240 static void i915_irq_preinstall(struct drm_device * dev)
2242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2245 atomic_set(&dev_priv->irq_received, 0);
2247 if (I915_HAS_HOTPLUG(dev)) {
2248 I915_WRITE(PORT_HOTPLUG_EN, 0);
2249 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2252 I915_WRITE16(HWSTAM, 0xeffe);
2254 I915_WRITE(PIPESTAT(pipe), 0);
2255 I915_WRITE(IMR, 0xffffffff);
2256 I915_WRITE(IER, 0x0);
2260 static int i915_irq_postinstall(struct drm_device *dev)
2262 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2265 dev_priv->pipestat[0] = 0;
2266 dev_priv->pipestat[1] = 0;
2268 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2270 /* Unmask the interrupts that we always want on. */
2271 dev_priv->irq_mask =
2272 ~(I915_ASLE_INTERRUPT |
2273 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2274 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2275 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2276 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2277 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2280 I915_ASLE_INTERRUPT |
2281 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2282 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2283 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2284 I915_USER_INTERRUPT;
2286 if (I915_HAS_HOTPLUG(dev)) {
2287 /* Enable in IER... */
2288 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2289 /* and unmask in IMR */
2290 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2293 I915_WRITE(IMR, dev_priv->irq_mask);
2294 I915_WRITE(IER, enable_mask);
2297 if (I915_HAS_HOTPLUG(dev)) {
2298 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2300 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2301 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2302 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2303 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2304 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2305 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2306 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2307 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2308 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2309 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2310 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2311 hotplug_en |= CRT_HOTPLUG_INT_EN;
2312 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2315 /* Ignore TV since it's buggy */
2317 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2320 intel_opregion_enable_asle(dev);
2325 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2327 struct drm_device *dev = (struct drm_device *) arg;
2328 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2329 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2331 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2332 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2334 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2335 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2339 atomic_inc(&dev_priv->irq_received);
2341 iir = I915_READ(IIR);
2343 bool irq_received = (iir & ~flip_mask) != 0;
2344 bool blc_event = false;
2346 /* Can't rely on pipestat interrupt bit in iir as it might
2347 * have been cleared after the pipestat interrupt was received.
2348 * It doesn't set the bit in iir again, but it still produces
2349 * interrupts (for non-MSI).
2351 mtx_lock(&dev_priv->irq_lock);
2352 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2353 i915_handle_error(dev, false);
2355 for_each_pipe(pipe) {
2356 int reg = PIPESTAT(pipe);
2357 pipe_stats[pipe] = I915_READ(reg);
2359 /* Clear the PIPE*STAT regs before the IIR */
2360 if (pipe_stats[pipe] & 0x8000ffff) {
2361 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2362 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2364 I915_WRITE(reg, pipe_stats[pipe]);
2365 irq_received = true;
2368 mtx_unlock(&dev_priv->irq_lock);
2373 /* Consume port. Then clear IIR or we'll miss events */
2374 if ((I915_HAS_HOTPLUG(dev)) &&
2375 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2376 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2378 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2380 if (hotplug_status & dev_priv->hotplug_supported_mask)
2381 taskqueue_enqueue(dev_priv->wq,
2382 &dev_priv->hotplug_work);
2384 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2385 POSTING_READ(PORT_HOTPLUG_STAT);
2388 I915_WRITE(IIR, iir & ~flip_mask);
2389 new_iir = I915_READ(IIR); /* Flush posted writes */
2391 if (iir & I915_USER_INTERRUPT)
2392 notify_ring(dev, &dev_priv->ring[RCS]);
2394 for_each_pipe(pipe) {
2398 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2399 drm_handle_vblank(dev, pipe)) {
2400 if (iir & flip[plane]) {
2401 intel_prepare_page_flip(dev, plane);
2402 intel_finish_page_flip(dev, pipe);
2403 flip_mask &= ~flip[plane];
2407 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2411 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2412 intel_opregion_asle_intr(dev);
2414 /* With MSI, interrupts are only generated when iir
2415 * transitions from zero to nonzero. If another bit got
2416 * set while we were handling the existing iir bits, then
2417 * we would never get another interrupt.
2419 * This is fine on non-MSI as well, as if we hit this path
2420 * we avoid exiting the interrupt handler only to generate
2423 * Note that for MSI this could cause a stray interrupt report
2424 * if an interrupt landed in the time between writing IIR and
2425 * the posting read. This should be rare enough to never
2426 * trigger the 99% of 100,000 interrupts test for disabling
2430 } while (iir & ~flip_mask);
2432 i915_update_dri1_breadcrumb(dev);
2435 static void i915_irq_uninstall(struct drm_device * dev)
2437 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2440 if (I915_HAS_HOTPLUG(dev)) {
2441 I915_WRITE(PORT_HOTPLUG_EN, 0);
2442 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2445 I915_WRITE16(HWSTAM, 0xffff);
2446 for_each_pipe(pipe) {
2447 /* Clear enable bits; then clear status bits */
2448 I915_WRITE(PIPESTAT(pipe), 0);
2449 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2451 I915_WRITE(IMR, 0xffffffff);
2452 I915_WRITE(IER, 0x0);
2454 I915_WRITE(IIR, I915_READ(IIR));
2457 static void i965_irq_preinstall(struct drm_device * dev)
2459 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2462 atomic_set(&dev_priv->irq_received, 0);
2464 I915_WRITE(PORT_HOTPLUG_EN, 0);
2465 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2467 I915_WRITE(HWSTAM, 0xeffe);
2469 I915_WRITE(PIPESTAT(pipe), 0);
2470 I915_WRITE(IMR, 0xffffffff);
2471 I915_WRITE(IER, 0x0);
2475 static int i965_irq_postinstall(struct drm_device *dev)
2477 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2482 /* Unmask the interrupts that we always want on. */
2483 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2484 I915_DISPLAY_PORT_INTERRUPT |
2485 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2486 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2487 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2488 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2489 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2491 enable_mask = ~dev_priv->irq_mask;
2492 enable_mask |= I915_USER_INTERRUPT;
2495 enable_mask |= I915_BSD_USER_INTERRUPT;
2497 dev_priv->pipestat[0] = 0;
2498 dev_priv->pipestat[1] = 0;
2501 * Enable some error detection, note the instruction error mask
2502 * bit is reserved, so we leave it masked.
2505 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2506 GM45_ERROR_MEM_PRIV |
2507 GM45_ERROR_CP_PRIV |
2508 I915_ERROR_MEMORY_REFRESH);
2510 error_mask = ~(I915_ERROR_PAGE_TABLE |
2511 I915_ERROR_MEMORY_REFRESH);
2513 I915_WRITE(EMR, error_mask);
2515 I915_WRITE(IMR, dev_priv->irq_mask);
2516 I915_WRITE(IER, enable_mask);
2519 /* Note HDMI and DP share hotplug bits */
2521 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2522 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2523 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2524 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2525 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2526 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2528 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2529 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2530 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2531 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2533 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2534 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2535 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2536 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2538 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2539 hotplug_en |= CRT_HOTPLUG_INT_EN;
2541 /* Programming the CRT detection parameters tends
2542 to generate a spurious hotplug event about three
2543 seconds later. So just do it once.
2546 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2547 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2550 /* Ignore TV since it's buggy */
2552 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2554 intel_opregion_enable_asle(dev);
2559 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2561 struct drm_device *dev = (struct drm_device *) arg;
2562 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2564 u32 pipe_stats[I915_MAX_PIPES];
2568 atomic_inc(&dev_priv->irq_received);
2570 iir = I915_READ(IIR);
2573 bool blc_event = false;
2575 irq_received = iir != 0;
2577 /* Can't rely on pipestat interrupt bit in iir as it might
2578 * have been cleared after the pipestat interrupt was received.
2579 * It doesn't set the bit in iir again, but it still produces
2580 * interrupts (for non-MSI).
2582 mtx_lock(&dev_priv->irq_lock);
2583 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2584 i915_handle_error(dev, false);
2586 for_each_pipe(pipe) {
2587 int reg = PIPESTAT(pipe);
2588 pipe_stats[pipe] = I915_READ(reg);
2591 * Clear the PIPE*STAT regs before the IIR
2593 if (pipe_stats[pipe] & 0x8000ffff) {
2594 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2595 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2597 I915_WRITE(reg, pipe_stats[pipe]);
2601 mtx_unlock(&dev_priv->irq_lock);
2606 /* Consume port. Then clear IIR or we'll miss events */
2607 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2608 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2610 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2612 if (hotplug_status & dev_priv->hotplug_supported_mask)
2613 taskqueue_enqueue(dev_priv->wq,
2614 &dev_priv->hotplug_work);
2616 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2617 I915_READ(PORT_HOTPLUG_STAT);
2620 I915_WRITE(IIR, iir);
2621 new_iir = I915_READ(IIR); /* Flush posted writes */
2623 if (iir & I915_USER_INTERRUPT)
2624 notify_ring(dev, &dev_priv->ring[RCS]);
2625 if (iir & I915_BSD_USER_INTERRUPT)
2626 notify_ring(dev, &dev_priv->ring[VCS]);
2628 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2629 intel_prepare_page_flip(dev, 0);
2631 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2632 intel_prepare_page_flip(dev, 1);
2634 for_each_pipe(pipe) {
2635 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2636 drm_handle_vblank(dev, pipe)) {
2637 i915_pageflip_stall_check(dev, pipe);
2638 intel_finish_page_flip(dev, pipe);
2641 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2646 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2647 intel_opregion_asle_intr(dev);
2649 /* With MSI, interrupts are only generated when iir
2650 * transitions from zero to nonzero. If another bit got
2651 * set while we were handling the existing iir bits, then
2652 * we would never get another interrupt.
2654 * This is fine on non-MSI as well, as if we hit this path
2655 * we avoid exiting the interrupt handler only to generate
2658 * Note that for MSI this could cause a stray interrupt report
2659 * if an interrupt landed in the time between writing IIR and
2660 * the posting read. This should be rare enough to never
2661 * trigger the 99% of 100,000 interrupts test for disabling
2667 i915_update_dri1_breadcrumb(dev);
2670 static void i965_irq_uninstall(struct drm_device * dev)
2672 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2678 I915_WRITE(PORT_HOTPLUG_EN, 0);
2679 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2681 I915_WRITE(HWSTAM, 0xffffffff);
2683 I915_WRITE(PIPESTAT(pipe), 0);
2684 I915_WRITE(IMR, 0xffffffff);
2685 I915_WRITE(IER, 0x0);
2688 I915_WRITE(PIPESTAT(pipe),
2689 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2690 I915_WRITE(IIR, I915_READ(IIR));
2693 void intel_irq_init(struct drm_device *dev)
2695 struct drm_i915_private *dev_priv = dev->dev_private;
2697 TASK_INIT(&dev_priv->hotplug_work, 0, i915_hotplug_work_func, dev->dev_private);
2698 TASK_INIT(&dev_priv->error_work, 0, i915_error_work_func, dev->dev_private);
2699 TASK_INIT(&dev_priv->rps.work, 0, gen6_pm_rps_work, dev->dev_private);
2700 TASK_INIT(&dev_priv->l3_parity.error_work, 0, ivybridge_parity_work, dev->dev_private);
2702 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2703 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2704 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2705 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2706 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2709 if (drm_core_check_feature(dev, DRIVER_MODESET))
2710 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2712 dev->driver->get_vblank_timestamp = NULL;
2713 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2715 if (IS_VALLEYVIEW(dev)) {
2716 dev->driver->irq_handler = valleyview_irq_handler;
2717 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2718 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2719 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2720 dev->driver->enable_vblank = valleyview_enable_vblank;
2721 dev->driver->disable_vblank = valleyview_disable_vblank;
2722 } else if (IS_IVYBRIDGE(dev)) {
2723 /* Share pre & uninstall handlers with ILK/SNB */
2724 dev->driver->irq_handler = ivybridge_irq_handler;
2725 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2726 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2727 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2728 dev->driver->enable_vblank = ivybridge_enable_vblank;
2729 dev->driver->disable_vblank = ivybridge_disable_vblank;
2730 } else if (IS_HASWELL(dev)) {
2731 /* Share interrupts handling with IVB */
2732 dev->driver->irq_handler = ivybridge_irq_handler;
2733 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2734 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2735 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2736 dev->driver->enable_vblank = ivybridge_enable_vblank;
2737 dev->driver->disable_vblank = ivybridge_disable_vblank;
2738 } else if (HAS_PCH_SPLIT(dev)) {
2739 dev->driver->irq_handler = ironlake_irq_handler;
2740 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2741 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2742 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2743 dev->driver->enable_vblank = ironlake_enable_vblank;
2744 dev->driver->disable_vblank = ironlake_disable_vblank;
2746 if (INTEL_INFO(dev)->gen == 2) {
2747 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2748 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2749 dev->driver->irq_handler = i8xx_irq_handler;
2750 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2751 } else if (INTEL_INFO(dev)->gen == 3) {
2752 dev->driver->irq_preinstall = i915_irq_preinstall;
2753 dev->driver->irq_postinstall = i915_irq_postinstall;
2754 dev->driver->irq_uninstall = i915_irq_uninstall;
2755 dev->driver->irq_handler = i915_irq_handler;
2757 dev->driver->irq_preinstall = i965_irq_preinstall;
2758 dev->driver->irq_postinstall = i965_irq_postinstall;
2759 dev->driver->irq_uninstall = i965_irq_uninstall;
2760 dev->driver->irq_handler = i965_irq_handler;
2762 dev->driver->enable_vblank = i915_enable_vblank;
2763 dev->driver->disable_vblank = i915_disable_vblank;