]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/dev/drm2/i915/i915_irq.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / dev / drm2 / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*-
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <dev/drm2/drmP.h>
33 #include <dev/drm2/drm.h>
34 #include <dev/drm2/i915/i915_drm.h>
35 #include <dev/drm2/i915/i915_drv.h>
36 #include <dev/drm2/i915/intel_drv.h>
37 #include <sys/sched.h>
38 #include <sys/sf_buf.h>
39 #include <sys/sleepqueue.h>
40
41 static void i915_capture_error_state(struct drm_device *dev);
42 static u32 ring_last_seqno(struct intel_ring_buffer *ring);
43
44 /* For display hotplug interrupt */
45 static void
46 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
47 {
48         if ((dev_priv->irq_mask & mask) != 0) {
49                 dev_priv->irq_mask &= ~mask;
50                 I915_WRITE(DEIMR, dev_priv->irq_mask);
51                 POSTING_READ(DEIMR);
52         }
53 }
54
55 static inline void
56 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
57 {
58         if ((dev_priv->irq_mask & mask) != mask) {
59                 dev_priv->irq_mask |= mask;
60                 I915_WRITE(DEIMR, dev_priv->irq_mask);
61                 POSTING_READ(DEIMR);
62         }
63 }
64
65 void
66 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
67 {
68         if ((dev_priv->pipestat[pipe] & mask) != mask) {
69                 u32 reg = PIPESTAT(pipe);
70
71                 dev_priv->pipestat[pipe] |= mask;
72                 /* Enable the interrupt, clear any pending status */
73                 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
74                 POSTING_READ(reg);
75         }
76 }
77
78 void
79 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
80 {
81         if ((dev_priv->pipestat[pipe] & mask) != 0) {
82                 u32 reg = PIPESTAT(pipe);
83
84                 dev_priv->pipestat[pipe] &= ~mask;
85                 I915_WRITE(reg, dev_priv->pipestat[pipe]);
86                 POSTING_READ(reg);
87         }
88 }
89
90 /**
91  * intel_enable_asle - enable ASLE interrupt for OpRegion
92  */
93 void intel_enable_asle(struct drm_device *dev)
94 {
95         drm_i915_private_t *dev_priv = dev->dev_private;
96
97         /* FIXME: opregion/asle for VLV */
98         if (IS_VALLEYVIEW(dev))
99                 return;
100
101         mtx_lock(&dev_priv->irq_lock);
102
103         if (HAS_PCH_SPLIT(dev))
104                 ironlake_enable_display_irq(dev_priv, DE_GSE);
105         else {
106                 i915_enable_pipestat(dev_priv, 1,
107                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
108                 if (INTEL_INFO(dev)->gen >= 4)
109                         i915_enable_pipestat(dev_priv, 0,
110                                              PIPE_LEGACY_BLC_EVENT_ENABLE);
111         }
112
113         mtx_unlock(&dev_priv->irq_lock);
114 }
115
116 /**
117  * i915_pipe_enabled - check if a pipe is enabled
118  * @dev: DRM device
119  * @pipe: pipe to check
120  *
121  * Reading certain registers when the pipe is disabled can hang the chip.
122  * Use this routine to make sure the PLL is running and the pipe is active
123  * before reading such registers if unsure.
124  */
125 static int
126 i915_pipe_enabled(struct drm_device *dev, int pipe)
127 {
128         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
129         return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
130 }
131
132 /* Called from drm generic code, passed a 'crtc', which
133  * we use as a pipe index
134  */
135 static u32
136 i915_get_vblank_counter(struct drm_device *dev, int pipe)
137 {
138         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
139         unsigned long high_frame;
140         unsigned long low_frame;
141         u32 high1, high2, low;
142
143         if (!i915_pipe_enabled(dev, pipe)) {
144                 DRM_DEBUG("trying to get vblank count for disabled "
145                                 "pipe %c\n", pipe_name(pipe));
146                 return 0;
147         }
148
149         high_frame = PIPEFRAME(pipe);
150         low_frame = PIPEFRAMEPIXEL(pipe);
151
152         /*
153          * High & low register fields aren't synchronized, so make sure
154          * we get a low value that's stable across two reads of the high
155          * register.
156          */
157         do {
158                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
159                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
160                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
161         } while (high1 != high2);
162
163         high1 >>= PIPE_FRAME_HIGH_SHIFT;
164         low >>= PIPE_FRAME_LOW_SHIFT;
165         return (high1 << 8) | low;
166 }
167
168 static u32
169 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
170 {
171         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172         int reg = PIPE_FRMCOUNT_GM45(pipe);
173
174         if (!i915_pipe_enabled(dev, pipe)) {
175                 DRM_DEBUG("i915: trying to get vblank count for disabled "
176                                  "pipe %c\n", pipe_name(pipe));
177                 return 0;
178         }
179
180         return I915_READ(reg);
181 }
182
183 static int
184 i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
185     int *vpos, int *hpos)
186 {
187         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
188         u32 vbl = 0, position = 0;
189         int vbl_start, vbl_end, htotal, vtotal;
190         bool in_vbl = true;
191         int ret = 0;
192
193         if (!i915_pipe_enabled(dev, pipe)) {
194                 DRM_DEBUG("i915: trying to get scanoutpos for disabled "
195                                  "pipe %c\n", pipe_name(pipe));
196                 return 0;
197         }
198
199         /* Get vtotal. */
200         vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
201
202         if (INTEL_INFO(dev)->gen >= 4) {
203                 /* No obvious pixelcount register. Only query vertical
204                  * scanout position from Display scan line register.
205                  */
206                 position = I915_READ(PIPEDSL(pipe));
207
208                 /* Decode into vertical scanout position. Don't have
209                  * horizontal scanout position.
210                  */
211                 *vpos = position & 0x1fff;
212                 *hpos = 0;
213         } else {
214                 /* Have access to pixelcount since start of frame.
215                  * We can split this into vertical and horizontal
216                  * scanout position.
217                  */
218                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
219
220                 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
221                 *vpos = position / htotal;
222                 *hpos = position - (*vpos * htotal);
223         }
224
225         /* Query vblank area. */
226         vbl = I915_READ(VBLANK(pipe));
227
228         /* Test position against vblank region. */
229         vbl_start = vbl & 0x1fff;
230         vbl_end = (vbl >> 16) & 0x1fff;
231
232         if ((*vpos < vbl_start) || (*vpos > vbl_end))
233                 in_vbl = false;
234
235         /* Inside "upper part" of vblank area? Apply corrective offset: */
236         if (in_vbl && (*vpos >= vbl_start))
237                 *vpos = *vpos - vtotal;
238
239         /* Readouts valid? */
240         if (vbl > 0)
241                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
242
243         /* In vblank? */
244         if (in_vbl)
245                 ret |= DRM_SCANOUTPOS_INVBL;
246
247         return ret;
248 }
249
250 static int
251 i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error,
252     struct timeval *vblank_time, unsigned flags)
253 {
254         struct drm_i915_private *dev_priv = dev->dev_private;
255         struct drm_crtc *crtc;
256
257         if (pipe < 0 || pipe >= dev_priv->num_pipe) {
258                 DRM_ERROR("Invalid crtc %d\n", pipe);
259                 return -EINVAL;
260         }
261
262         /* Get drm_crtc to timestamp: */
263         crtc = intel_get_crtc_for_pipe(dev, pipe);
264         if (crtc == NULL) {
265                 DRM_ERROR("Invalid crtc %d\n", pipe);
266                 return -EINVAL;
267         }
268
269         if (!crtc->enabled) {
270 #if 0
271                 DRM_DEBUG("crtc %d is disabled\n", pipe);
272 #endif
273                 return -EBUSY;
274         }
275
276         /* Helper routine in DRM core does all the work: */
277         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
278                                                      vblank_time, flags,
279                                                      crtc);
280 }
281
282 /*
283  * Handle hotplug events outside the interrupt handler proper.
284  */
285 static void
286 i915_hotplug_work_func(void *context, int pending)
287 {
288         drm_i915_private_t *dev_priv = context;
289         struct drm_device *dev = dev_priv->dev;
290         struct drm_mode_config *mode_config;
291         struct intel_encoder *encoder;
292
293         DRM_DEBUG("running encoder hotplug functions\n");
294         dev_priv = context;
295         dev = dev_priv->dev;
296
297         mode_config = &dev->mode_config;
298
299         sx_xlock(&mode_config->mutex);
300         DRM_DEBUG_KMS("running encoder hotplug functions\n");
301
302         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
303                 if (encoder->hot_plug)
304                         encoder->hot_plug(encoder);
305
306         sx_xunlock(&mode_config->mutex);
307
308         /* Just fire off a uevent and let userspace tell us what to do */
309 #if 0
310         drm_helper_hpd_irq_event(dev);
311 #endif
312 }
313
314 static void i915_handle_rps_change(struct drm_device *dev)
315 {
316         drm_i915_private_t *dev_priv = dev->dev_private;
317         u32 busy_up, busy_down, max_avg, min_avg;
318         u8 new_delay = dev_priv->cur_delay;
319
320         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
321         busy_up = I915_READ(RCPREVBSYTUPAVG);
322         busy_down = I915_READ(RCPREVBSYTDNAVG);
323         max_avg = I915_READ(RCBMAXAVG);
324         min_avg = I915_READ(RCBMINAVG);
325
326         /* Handle RCS change request from hw */
327         if (busy_up > max_avg) {
328                 if (dev_priv->cur_delay != dev_priv->max_delay)
329                         new_delay = dev_priv->cur_delay - 1;
330                 if (new_delay < dev_priv->max_delay)
331                         new_delay = dev_priv->max_delay;
332         } else if (busy_down < min_avg) {
333                 if (dev_priv->cur_delay != dev_priv->min_delay)
334                         new_delay = dev_priv->cur_delay + 1;
335                 if (new_delay > dev_priv->min_delay)
336                         new_delay = dev_priv->min_delay;
337         }
338
339         if (ironlake_set_drps(dev, new_delay))
340                 dev_priv->cur_delay = new_delay;
341
342         return;
343 }
344
345 static void notify_ring(struct drm_device *dev,
346                         struct intel_ring_buffer *ring)
347 {
348         struct drm_i915_private *dev_priv = dev->dev_private;
349
350         if (ring->obj == NULL)
351                 return;
352
353         CTR2(KTR_DRM, "request_complete %s %d", ring->name,
354             ring->get_seqno(ring));
355
356         mtx_lock(&dev_priv->irq_lock);
357         wakeup(ring);
358         mtx_unlock(&dev_priv->irq_lock);
359
360         if (i915_enable_hangcheck) {
361                 dev_priv->hangcheck_count = 0;
362                 callout_schedule(&dev_priv->hangcheck_timer,
363                     DRM_I915_HANGCHECK_PERIOD);
364         }
365 }
366
367 static void
368 gen6_pm_rps_work_func(void *arg, int pending)
369 {
370         struct drm_device *dev;
371         drm_i915_private_t *dev_priv;
372         u8 new_delay;
373         u32 pm_iir, pm_imr;
374
375         dev_priv = (drm_i915_private_t *)arg;
376         dev = dev_priv->dev;
377         new_delay = dev_priv->cur_delay;
378
379         mtx_lock(&dev_priv->rps_lock);
380         pm_iir = dev_priv->pm_iir;
381         dev_priv->pm_iir = 0;
382         pm_imr = I915_READ(GEN6_PMIMR);
383         I915_WRITE(GEN6_PMIMR, 0);
384         mtx_unlock(&dev_priv->rps_lock);
385
386         if (!pm_iir)
387                 return;
388
389         DRM_LOCK(dev);
390         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
391                 if (dev_priv->cur_delay != dev_priv->max_delay)
392                         new_delay = dev_priv->cur_delay + 1;
393                 if (new_delay > dev_priv->max_delay)
394                         new_delay = dev_priv->max_delay;
395         } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
396                 gen6_gt_force_wake_get(dev_priv);
397                 if (dev_priv->cur_delay != dev_priv->min_delay)
398                         new_delay = dev_priv->cur_delay - 1;
399                 if (new_delay < dev_priv->min_delay) {
400                         new_delay = dev_priv->min_delay;
401                         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
402                                    I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
403                                    ((new_delay << 16) & 0x3f0000));
404                 } else {
405                         /* Make sure we continue to get down interrupts
406                          * until we hit the minimum frequency */
407                         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
408                                    I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
409                 }
410                 gen6_gt_force_wake_put(dev_priv);
411         }
412
413         gen6_set_rps(dev, new_delay);
414         dev_priv->cur_delay = new_delay;
415
416         /*
417          * rps_lock not held here because clearing is non-destructive. There is
418          * an *extremely* unlikely race with gen6_rps_enable() that is prevented
419          * by holding struct_mutex for the duration of the write.
420          */
421         DRM_UNLOCK(dev);
422 }
423
424 static void snb_gt_irq_handler(struct drm_device *dev,
425                                struct drm_i915_private *dev_priv,
426                                u32 gt_iir)
427 {
428
429         if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
430                       GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
431                 notify_ring(dev, &dev_priv->rings[RCS]);
432         if (gt_iir & GEN6_BSD_USER_INTERRUPT)
433                 notify_ring(dev, &dev_priv->rings[VCS]);
434         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
435                 notify_ring(dev, &dev_priv->rings[BCS]);
436
437         if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
438                       GT_GEN6_BSD_CS_ERROR_INTERRUPT |
439                       GT_RENDER_CS_ERROR_INTERRUPT)) {
440                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
441                 i915_handle_error(dev, false);
442         }
443 }
444
445 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
446                                 u32 pm_iir)
447 {
448
449         /*
450          * IIR bits should never already be set because IMR should
451          * prevent an interrupt from being shown in IIR. The warning
452          * displays a case where we've unsafely cleared
453          * dev_priv->pm_iir. Although missing an interrupt of the same
454          * type is not a problem, it displays a problem in the logic.
455          *
456          * The mask bit in IMR is cleared by rps_work.
457          */
458
459         mtx_lock(&dev_priv->rps_lock);
460         if (dev_priv->pm_iir & pm_iir)
461                 printf("Missed a PM interrupt\n");
462         dev_priv->pm_iir |= pm_iir;
463         I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
464         POSTING_READ(GEN6_PMIMR);
465         mtx_unlock(&dev_priv->rps_lock);
466
467         taskqueue_enqueue(dev_priv->tq, &dev_priv->rps_task);
468 }
469
470 static void valleyview_irq_handler(void *arg)
471 {
472         struct drm_device *dev = (struct drm_device *) arg;
473         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
474         u32 iir, gt_iir, pm_iir;
475         int pipe;
476         u32 pipe_stats[I915_MAX_PIPES];
477         u32 vblank_status;
478         int vblank = 0;
479         bool blc_event;
480
481         atomic_inc(&dev_priv->irq_received);
482
483         vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
484                 PIPE_VBLANK_INTERRUPT_STATUS;
485
486         while (true) {
487                 iir = I915_READ(VLV_IIR);
488                 gt_iir = I915_READ(GTIIR);
489                 pm_iir = I915_READ(GEN6_PMIIR);
490
491                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
492                         goto out;
493
494                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
495
496                 mtx_lock(&dev_priv->irq_lock);
497                 for_each_pipe(pipe) {
498                         int reg = PIPESTAT(pipe);
499                         pipe_stats[pipe] = I915_READ(reg);
500
501                         /*
502                          * Clear the PIPE*STAT regs before the IIR
503                          */
504                         if (pipe_stats[pipe] & 0x8000ffff) {
505                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
506                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
507                                                          pipe_name(pipe));
508                                 I915_WRITE(reg, pipe_stats[pipe]);
509                         }
510                 }
511                 mtx_unlock(&dev_priv->irq_lock);
512
513                 /* Consume port.  Then clear IIR or we'll miss events */
514                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
515                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
516
517                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
518                                          hotplug_status);
519                         if (hotplug_status & dev_priv->hotplug_supported_mask)
520                                 taskqueue_enqueue(dev_priv->tq,
521                                     &dev_priv->hotplug_task);
522
523                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
524                         I915_READ(PORT_HOTPLUG_STAT);
525                 }
526
527
528                 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
529                         drm_handle_vblank(dev, 0);
530                         vblank++;
531                         intel_finish_page_flip(dev, 0);
532                 }
533
534                 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
535                         drm_handle_vblank(dev, 1);
536                         vblank++;
537                         intel_finish_page_flip(dev, 0);
538                 }
539
540                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
541                         blc_event = true;
542
543                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
544                         gen6_queue_rps_work(dev_priv, pm_iir);
545
546                 I915_WRITE(GTIIR, gt_iir);
547                 I915_WRITE(GEN6_PMIIR, pm_iir);
548                 I915_WRITE(VLV_IIR, iir);
549         }
550
551 out:;
552 }
553
554 static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
555 {
556         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
557         int pipe;
558
559         if (pch_iir & SDE_AUDIO_POWER_MASK)
560                 DRM_DEBUG("i915: PCH audio power change on port %d\n",
561                                  (pch_iir & SDE_AUDIO_POWER_MASK) >>
562                                  SDE_AUDIO_POWER_SHIFT);
563
564         if (pch_iir & SDE_GMBUS)
565                 DRM_DEBUG("i915: PCH GMBUS interrupt\n");
566
567         if (pch_iir & SDE_AUDIO_HDCP_MASK)
568                 DRM_DEBUG("i915: PCH HDCP audio interrupt\n");
569
570         if (pch_iir & SDE_AUDIO_TRANS_MASK)
571                 DRM_DEBUG("i915: PCH transcoder audio interrupt\n");
572
573         if (pch_iir & SDE_POISON)
574                 DRM_ERROR("i915: PCH poison interrupt\n");
575
576         if (pch_iir & SDE_FDI_MASK)
577                 for_each_pipe(pipe)
578                         DRM_DEBUG("  pipe %c FDI IIR: 0x%08x\n",
579                                          pipe_name(pipe),
580                                          I915_READ(FDI_RX_IIR(pipe)));
581
582         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
583                 DRM_DEBUG("i915: PCH transcoder CRC done interrupt\n");
584
585         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
586                 DRM_DEBUG("i915: PCH transcoder CRC error interrupt\n");
587
588         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
589                 DRM_DEBUG("i915: PCH transcoder B underrun interrupt\n");
590         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
591                 DRM_DEBUG("PCH transcoder A underrun interrupt\n");
592 }
593
594 static void 
595 ivybridge_irq_handler(void *arg)
596 {
597         struct drm_device *dev = (struct drm_device *) arg;
598         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
599         u32 de_iir, gt_iir, de_ier, pm_iir;
600         int i;
601
602         atomic_inc(&dev_priv->irq_received);
603
604         /* disable master interrupt before clearing iir  */
605         de_ier = I915_READ(DEIER);
606         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
607         POSTING_READ(DEIER);
608
609         gt_iir = I915_READ(GTIIR);
610         if (gt_iir) {
611                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
612                 I915_WRITE(GTIIR, gt_iir);
613         }
614
615         de_iir = I915_READ(DEIIR);
616         if (de_iir) {
617                 if (de_iir & DE_GSE_IVB)
618                         intel_opregion_gse_intr(dev);
619
620                 for (i = 0; i < 3; i++) {
621                         if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
622                                 intel_prepare_page_flip(dev, i);
623                                 intel_finish_page_flip_plane(dev, i);
624                         }
625                         if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
626                                 drm_handle_vblank(dev, i);
627                 }
628
629                 /* check event from PCH */
630                 if (de_iir & DE_PCH_EVENT_IVB) {
631                         u32 pch_iir = I915_READ(SDEIIR);
632
633                         if (pch_iir & SDE_HOTPLUG_MASK_CPT)
634                                 taskqueue_enqueue(dev_priv->tq,
635                                     &dev_priv->hotplug_task);
636                         pch_irq_handler(dev, pch_iir);
637
638                         /* clear PCH hotplug event before clear CPU irq */
639                         I915_WRITE(SDEIIR, pch_iir);
640                 }
641
642                 I915_WRITE(DEIIR, de_iir);
643         }
644
645         pm_iir = I915_READ(GEN6_PMIIR);
646         if (pm_iir) {
647                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
648                         gen6_queue_rps_work(dev_priv, pm_iir);
649                 I915_WRITE(GEN6_PMIIR, pm_iir);
650         }
651
652         I915_WRITE(DEIER, de_ier);
653         POSTING_READ(DEIER);
654
655         CTR3(KTR_DRM, "ivybridge_irq de %x gt %x pm %x", de_iir,
656             gt_iir, pm_iir);
657 }
658
659 static void ilk_gt_irq_handler(struct drm_device *dev,
660                                struct drm_i915_private *dev_priv,
661                                u32 gt_iir)
662 {
663         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
664                 notify_ring(dev, &dev_priv->rings[RCS]);
665         if (gt_iir & GT_BSD_USER_INTERRUPT)
666                 notify_ring(dev, &dev_priv->rings[VCS]);
667 }
668
669 static void
670 ironlake_irq_handler(void *arg)
671 {
672         struct drm_device *dev = arg;
673         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
674         u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
675         u32 hotplug_mask;
676
677         atomic_inc(&dev_priv->irq_received);
678
679         /* disable master interrupt before clearing iir  */
680         de_ier = I915_READ(DEIER);
681         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
682         POSTING_READ(DEIER);
683
684         de_iir = I915_READ(DEIIR);
685         gt_iir = I915_READ(GTIIR);
686         pch_iir = I915_READ(SDEIIR);
687         pm_iir = I915_READ(GEN6_PMIIR);
688
689         CTR4(KTR_DRM, "ironlake_irq de %x gt %x pch %x pm %x", de_iir,
690             gt_iir, pch_iir, pm_iir);
691
692         if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
693             (!IS_GEN6(dev) || pm_iir == 0))
694                 goto done;
695
696         if (HAS_PCH_CPT(dev))
697                 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
698         else
699                 hotplug_mask = SDE_HOTPLUG_MASK;
700
701         if (IS_GEN5(dev))
702                 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
703         else
704                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
705
706         if (de_iir & DE_GSE) {
707                 intel_opregion_gse_intr(dev);
708         }
709
710         if (de_iir & DE_PLANEA_FLIP_DONE) {
711                 intel_prepare_page_flip(dev, 0);
712                 intel_finish_page_flip_plane(dev, 0);
713         }
714
715         if (de_iir & DE_PLANEB_FLIP_DONE) {
716                 intel_prepare_page_flip(dev, 1);
717                 intel_finish_page_flip_plane(dev, 1);
718         }
719
720         if (de_iir & DE_PIPEA_VBLANK)
721                 drm_handle_vblank(dev, 0);
722
723         if (de_iir & DE_PIPEB_VBLANK)
724                 drm_handle_vblank(dev, 1);
725
726         /* check event from PCH */
727         if (de_iir & DE_PCH_EVENT) {
728                 if (pch_iir & hotplug_mask)
729                         taskqueue_enqueue(dev_priv->tq,
730                             &dev_priv->hotplug_task);
731                 pch_irq_handler(dev, pch_iir);
732         }
733
734         if (de_iir & DE_PCU_EVENT) {
735                 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
736                 i915_handle_rps_change(dev);
737         }
738
739         if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
740                 gen6_queue_rps_work(dev_priv, pm_iir);
741
742         /* should clear PCH hotplug event before clear CPU irq */
743         I915_WRITE(SDEIIR, pch_iir);
744         I915_WRITE(GTIIR, gt_iir);
745         I915_WRITE(DEIIR, de_iir);
746         I915_WRITE(GEN6_PMIIR, pm_iir);
747
748 done:
749         I915_WRITE(DEIER, de_ier);
750         POSTING_READ(DEIER);
751 }
752
753 /**
754  * i915_error_work_func - do process context error handling work
755  * @work: work struct
756  *
757  * Fire an error uevent so userspace can see that a hang or error
758  * was detected.
759  */
760 static void
761 i915_error_work_func(void *context, int pending)
762 {
763         drm_i915_private_t *dev_priv = context;
764         struct drm_device *dev = dev_priv->dev;
765
766         /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
767
768         if (atomic_load_acq_int(&dev_priv->mm.wedged)) {
769                 DRM_DEBUG("i915: resetting chip\n");
770                 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
771                 if (!i915_reset(dev)) {
772                         atomic_store_rel_int(&dev_priv->mm.wedged, 0);
773                         /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
774                 }
775                 mtx_lock(&dev_priv->error_completion_lock);
776                 dev_priv->error_completion++;
777                 wakeup(&dev_priv->error_completion);
778                 mtx_unlock(&dev_priv->error_completion_lock);
779         }
780 }
781
782 #define pr_err(...) printf(__VA_ARGS__)
783
784 static void i915_report_and_clear_eir(struct drm_device *dev)
785 {
786         struct drm_i915_private *dev_priv = dev->dev_private;
787         u32 eir = I915_READ(EIR);
788         int pipe;
789
790         if (!eir)
791                 return;
792
793         printf("i915: render error detected, EIR: 0x%08x\n", eir);
794
795         if (IS_G4X(dev)) {
796                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
797                         u32 ipeir = I915_READ(IPEIR_I965);
798
799                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
800                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
801                         pr_err("  INSTDONE: 0x%08x\n",
802                                I915_READ(INSTDONE_I965));
803                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
804                         pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
805                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
806                         I915_WRITE(IPEIR_I965, ipeir);
807                         POSTING_READ(IPEIR_I965);
808                 }
809                 if (eir & GM45_ERROR_PAGE_TABLE) {
810                         u32 pgtbl_err = I915_READ(PGTBL_ER);
811                         pr_err("page table error\n");
812                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
813                         I915_WRITE(PGTBL_ER, pgtbl_err);
814                         POSTING_READ(PGTBL_ER);
815                 }
816         }
817
818         if (!IS_GEN2(dev)) {
819                 if (eir & I915_ERROR_PAGE_TABLE) {
820                         u32 pgtbl_err = I915_READ(PGTBL_ER);
821                         pr_err("page table error\n");
822                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
823                         I915_WRITE(PGTBL_ER, pgtbl_err);
824                         POSTING_READ(PGTBL_ER);
825                 }
826         }
827
828         if (eir & I915_ERROR_MEMORY_REFRESH) {
829                 pr_err("memory refresh error:\n");
830                 for_each_pipe(pipe)
831                         pr_err("pipe %c stat: 0x%08x\n",
832                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
833                 /* pipestat has already been acked */
834         }
835         if (eir & I915_ERROR_INSTRUCTION) {
836                 pr_err("instruction error\n");
837                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
838                 if (INTEL_INFO(dev)->gen < 4) {
839                         u32 ipeir = I915_READ(IPEIR);
840
841                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
842                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
843                         pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
844                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
845                         I915_WRITE(IPEIR, ipeir);
846                         POSTING_READ(IPEIR);
847                 } else {
848                         u32 ipeir = I915_READ(IPEIR_I965);
849
850                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
851                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
852                         pr_err("  INSTDONE: 0x%08x\n",
853                                I915_READ(INSTDONE_I965));
854                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
855                         pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
856                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
857                         I915_WRITE(IPEIR_I965, ipeir);
858                         POSTING_READ(IPEIR_I965);
859                 }
860         }
861
862         I915_WRITE(EIR, eir);
863         POSTING_READ(EIR);
864         eir = I915_READ(EIR);
865         if (eir) {
866                 /*
867                  * some errors might have become stuck,
868                  * mask them.
869                  */
870                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
871                 I915_WRITE(EMR, I915_READ(EMR) | eir);
872                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
873         }
874 }
875
876 /**
877  * i915_handle_error - handle an error interrupt
878  * @dev: drm device
879  *
880  * Do some basic checking of regsiter state at error interrupt time and
881  * dump it to the syslog.  Also call i915_capture_error_state() to make
882  * sure we get a record and make it available in debugfs.  Fire a uevent
883  * so userspace knows something bad happened (should trigger collection
884  * of a ring dump etc.).
885  */
886 void i915_handle_error(struct drm_device *dev, bool wedged)
887 {
888         struct drm_i915_private *dev_priv = dev->dev_private;
889         struct intel_ring_buffer *ring;
890         int i;
891
892         i915_capture_error_state(dev);
893         i915_report_and_clear_eir(dev);
894
895         if (wedged) {
896                 mtx_lock(&dev_priv->error_completion_lock);
897                 dev_priv->error_completion = 0;
898                 dev_priv->mm.wedged = 1;
899                 /* unlock acts as rel barrier for store to wedged */
900                 mtx_unlock(&dev_priv->error_completion_lock);
901
902                 /*
903                  * Wakeup waiting processes so they don't hang
904                  */
905                 for_each_ring(ring, dev_priv, i) {
906                         mtx_lock(&dev_priv->irq_lock);
907                         wakeup(ring);
908                         mtx_unlock(&dev_priv->irq_lock);
909                 }
910         }
911
912         taskqueue_enqueue(dev_priv->tq, &dev_priv->error_task);
913 }
914
915 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
916 {
917         drm_i915_private_t *dev_priv = dev->dev_private;
918         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
919         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
920         struct drm_i915_gem_object *obj;
921         struct intel_unpin_work *work;
922         bool stall_detected;
923
924         /* Ignore early vblank irqs */
925         if (intel_crtc == NULL)
926                 return;
927
928         mtx_lock(&dev->event_lock);
929         work = intel_crtc->unpin_work;
930
931         if (work == NULL || work->pending || !work->enable_stall_check) {
932                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
933                 mtx_unlock(&dev->event_lock);
934                 return;
935         }
936
937         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
938         obj = work->pending_flip_obj;
939         if (INTEL_INFO(dev)->gen >= 4) {
940                 int dspsurf = DSPSURF(intel_crtc->plane);
941                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
942                                         obj->gtt_offset;
943         } else {
944                 int dspaddr = DSPADDR(intel_crtc->plane);
945                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
946                                                         crtc->y * crtc->fb->pitches[0] +
947                                                         crtc->x * crtc->fb->bits_per_pixel/8);
948         }
949
950         mtx_unlock(&dev->event_lock);
951
952         if (stall_detected) {
953                 DRM_DEBUG("Pageflip stall detected\n");
954                 intel_prepare_page_flip(dev, intel_crtc->plane);
955         }
956 }
957
958 /* Called from drm generic code, passed 'crtc' which
959  * we use as a pipe index
960  */
961 static int
962 i915_enable_vblank(struct drm_device *dev, int pipe)
963 {
964         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
965
966         if (!i915_pipe_enabled(dev, pipe))
967                 return -EINVAL;
968
969         mtx_lock(&dev_priv->irq_lock);
970         if (INTEL_INFO(dev)->gen >= 4)
971                 i915_enable_pipestat(dev_priv, pipe,
972                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
973         else
974                 i915_enable_pipestat(dev_priv, pipe,
975                                      PIPE_VBLANK_INTERRUPT_ENABLE);
976
977         /* maintain vblank delivery even in deep C-states */
978         if (dev_priv->info->gen == 3)
979                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
980         mtx_unlock(&dev_priv->irq_lock);
981         CTR1(KTR_DRM, "i915_enable_vblank %d", pipe);
982
983         return 0;
984 }
985
986 static int
987 ironlake_enable_vblank(struct drm_device *dev, int pipe)
988 {
989         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
990
991         if (!i915_pipe_enabled(dev, pipe))
992                 return -EINVAL;
993
994         mtx_lock(&dev_priv->irq_lock);
995         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
996             DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
997         mtx_unlock(&dev_priv->irq_lock);
998         CTR1(KTR_DRM, "ironlake_enable_vblank %d", pipe);
999
1000         return 0;
1001 }
1002
1003 static int
1004 ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1005 {
1006         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1007
1008         if (!i915_pipe_enabled(dev, pipe))
1009                 return -EINVAL;
1010
1011         mtx_lock(&dev_priv->irq_lock);
1012         ironlake_enable_display_irq(dev_priv,
1013                                     DE_PIPEA_VBLANK_IVB << (5 * pipe));
1014         mtx_unlock(&dev_priv->irq_lock);
1015         CTR1(KTR_DRM, "ivybridge_enable_vblank %d", pipe);
1016
1017         return 0;
1018 }
1019
1020 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1021 {
1022         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1023         u32 dpfl, imr;
1024
1025         if (!i915_pipe_enabled(dev, pipe))
1026                 return -EINVAL;
1027
1028         mtx_lock(&dev_priv->irq_lock);
1029         dpfl = I915_READ(VLV_DPFLIPSTAT);
1030         imr = I915_READ(VLV_IMR);
1031         if (pipe == 0) {
1032                 dpfl |= PIPEA_VBLANK_INT_EN;
1033                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1034         } else {
1035                 dpfl |= PIPEA_VBLANK_INT_EN;
1036                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1037         }
1038         I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1039         I915_WRITE(VLV_IMR, imr);
1040         mtx_unlock(&dev_priv->irq_lock);
1041
1042         return 0;
1043 }
1044
1045 /* Called from drm generic code, passed 'crtc' which
1046  * we use as a pipe index
1047  */
1048 static void
1049 i915_disable_vblank(struct drm_device *dev, int pipe)
1050 {
1051         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1052
1053         mtx_lock(&dev_priv->irq_lock);
1054         if (dev_priv->info->gen == 3)
1055                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1056
1057         i915_disable_pipestat(dev_priv, pipe,
1058             PIPE_VBLANK_INTERRUPT_ENABLE |
1059             PIPE_START_VBLANK_INTERRUPT_ENABLE);
1060         mtx_unlock(&dev_priv->irq_lock);
1061         CTR1(KTR_DRM, "i915_disable_vblank %d", pipe);
1062 }
1063
1064 static void
1065 ironlake_disable_vblank(struct drm_device *dev, int pipe)
1066 {
1067         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1068
1069         mtx_lock(&dev_priv->irq_lock);
1070         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1071             DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1072         mtx_unlock(&dev_priv->irq_lock);
1073         CTR1(KTR_DRM, "ironlake_disable_vblank %d", pipe);
1074 }
1075
1076 static void
1077 ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1078 {
1079         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1080
1081         mtx_lock(&dev_priv->irq_lock);
1082         ironlake_disable_display_irq(dev_priv,
1083                                      DE_PIPEA_VBLANK_IVB << (pipe * 5));
1084         mtx_unlock(&dev_priv->irq_lock);
1085         CTR1(KTR_DRM, "ivybridge_disable_vblank %d", pipe);
1086 }
1087
1088 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1089 {
1090         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1091         u32 dpfl, imr;
1092
1093         mtx_lock(&dev_priv->irq_lock);
1094         dpfl = I915_READ(VLV_DPFLIPSTAT);
1095         imr = I915_READ(VLV_IMR);
1096         if (pipe == 0) {
1097                 dpfl &= ~PIPEA_VBLANK_INT_EN;
1098                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1099         } else {
1100                 dpfl &= ~PIPEB_VBLANK_INT_EN;
1101                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1102         }
1103         I915_WRITE(VLV_IMR, imr);
1104         I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1105         mtx_unlock(&dev_priv->irq_lock);
1106 }
1107
1108 static u32
1109 ring_last_seqno(struct intel_ring_buffer *ring)
1110 {
1111
1112         if (list_empty(&ring->request_list))
1113                 return (0);
1114         else
1115                 return (list_entry(ring->request_list.prev,
1116                     struct drm_i915_gem_request, list)->seqno);
1117 }
1118
1119 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1120 {
1121         if (list_empty(&ring->request_list) ||
1122             i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1123                 /* Issue a wake-up to catch stuck h/w. */
1124                 sleepq_lock(ring);
1125                 if (sleepq_sleepcnt(ring, 0) != 0) {
1126                         sleepq_release(ring);
1127                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1128                                   ring->name);
1129                         wakeup(ring);
1130                         *err = true;
1131                 } else
1132                         sleepq_release(ring);
1133                 return true;
1134         }
1135         return false;
1136 }
1137
1138 static bool kick_ring(struct intel_ring_buffer *ring)
1139 {
1140         struct drm_device *dev = ring->dev;
1141         struct drm_i915_private *dev_priv = dev->dev_private;
1142         u32 tmp = I915_READ_CTL(ring);
1143         if (tmp & RING_WAIT) {
1144                 DRM_ERROR("Kicking stuck wait on %s\n",
1145                           ring->name);
1146                 I915_WRITE_CTL(ring, tmp);
1147                 return true;
1148         }
1149         return false;
1150 }
1151
1152 static bool i915_hangcheck_hung(struct drm_device *dev)
1153 {
1154         drm_i915_private_t *dev_priv = dev->dev_private;
1155
1156         if (dev_priv->hangcheck_count++ > 1) {
1157                 bool hung = true;
1158
1159                 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1160                 i915_handle_error(dev, true);
1161
1162                 if (!IS_GEN2(dev)) {
1163                         struct intel_ring_buffer *ring;
1164                         int i;
1165
1166                         /* Is the chip hanging on a WAIT_FOR_EVENT?
1167                          * If so we can simply poke the RB_WAIT bit
1168                          * and break the hang. This should work on
1169                          * all but the second generation chipsets.
1170                          */
1171                         for_each_ring(ring, dev_priv, i)
1172                                 hung &= !kick_ring(ring);
1173                 }
1174
1175                 return hung;
1176         }
1177
1178         return false;
1179 }
1180
1181 /**
1182  * This is called when the chip hasn't reported back with completed
1183  * batchbuffers in a long time. The first time this is called we simply record
1184  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1185  * again, we assume the chip is wedged and try to fix it.
1186  */
1187 void
1188 i915_hangcheck_elapsed(void *context)
1189 {
1190         struct drm_device *dev = (struct drm_device *)context;
1191         drm_i915_private_t *dev_priv = dev->dev_private;
1192         uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
1193         struct intel_ring_buffer *ring;
1194         bool err = false, idle;
1195         int i;
1196
1197         if (!i915_enable_hangcheck)
1198                 return;
1199
1200         memset(acthd, 0, sizeof(acthd));
1201         idle = true;
1202         for_each_ring(ring, dev_priv, i) {
1203             idle &= i915_hangcheck_ring_idle(ring, &err);
1204             acthd[i] = intel_ring_get_active_head(ring);
1205         }
1206
1207         /* If all work is done then ACTHD clearly hasn't advanced. */
1208         if (idle) {
1209                 if (err) {
1210                         if (i915_hangcheck_hung(dev))
1211                                 return;
1212
1213                         goto repeat;
1214                 }
1215
1216                 dev_priv->hangcheck_count = 0;
1217                 return;
1218         }
1219
1220         if (INTEL_INFO(dev)->gen < 4) {
1221                 instdone = I915_READ(INSTDONE);
1222                 instdone1 = 0;
1223         } else {
1224                 instdone = I915_READ(INSTDONE_I965);
1225                 instdone1 = I915_READ(INSTDONE1);
1226         }
1227         if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1228             dev_priv->last_instdone == instdone &&
1229             dev_priv->last_instdone1 == instdone1) {
1230                 if (i915_hangcheck_hung(dev))
1231                         return;
1232         } else {
1233                 dev_priv->hangcheck_count = 0;
1234
1235                 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1236                 dev_priv->last_instdone = instdone;
1237                 dev_priv->last_instdone1 = instdone1;
1238         }
1239
1240 repeat:
1241         /* Reset timer case chip hangs without another request being added */
1242         callout_schedule(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD);
1243 }
1244
1245 /* drm_dma.h hooks
1246 */
1247 static void
1248 ironlake_irq_preinstall(struct drm_device *dev)
1249 {
1250         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1251
1252         atomic_set(&dev_priv->irq_received, 0);
1253
1254         I915_WRITE(HWSTAM, 0xeffe);
1255
1256         /* XXX hotplug from PCH */
1257
1258         I915_WRITE(DEIMR, 0xffffffff);
1259         I915_WRITE(DEIER, 0x0);
1260         POSTING_READ(DEIER);
1261
1262         /* and GT */
1263         I915_WRITE(GTIMR, 0xffffffff);
1264         I915_WRITE(GTIER, 0x0);
1265         POSTING_READ(GTIER);
1266
1267         /* south display irq */
1268         I915_WRITE(SDEIMR, 0xffffffff);
1269         I915_WRITE(SDEIER, 0x0);
1270         POSTING_READ(SDEIER);
1271 }
1272
1273 static void valleyview_irq_preinstall(struct drm_device *dev)
1274 {
1275         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1276         int pipe;
1277
1278         atomic_set(&dev_priv->irq_received, 0);
1279
1280         /* VLV magic */
1281         I915_WRITE(VLV_IMR, 0);
1282         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1283         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1284         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1285
1286         /* and GT */
1287         I915_WRITE(GTIIR, I915_READ(GTIIR));
1288         I915_WRITE(GTIIR, I915_READ(GTIIR));
1289         I915_WRITE(GTIMR, 0xffffffff);
1290         I915_WRITE(GTIER, 0x0);
1291         POSTING_READ(GTIER);
1292
1293         I915_WRITE(DPINVGTT, 0xff);
1294
1295         I915_WRITE(PORT_HOTPLUG_EN, 0);
1296         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1297         for_each_pipe(pipe)
1298                 I915_WRITE(PIPESTAT(pipe), 0xffff);
1299         I915_WRITE(VLV_IIR, 0xffffffff);
1300         I915_WRITE(VLV_IMR, 0xffffffff);
1301         I915_WRITE(VLV_IER, 0x0);
1302         POSTING_READ(VLV_IER);
1303 }
1304
1305 /*
1306  * Enable digital hotplug on the PCH, and configure the DP short pulse
1307  * duration to 2ms (which is the minimum in the Display Port spec)
1308  *
1309  * This register is the same on all known PCH chips.
1310  */
1311
1312 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1313 {
1314         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1315         u32     hotplug;
1316
1317         hotplug = I915_READ(PCH_PORT_HOTPLUG);
1318         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1319         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1320         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1321         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1322         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1323 }
1324
1325 static int ironlake_irq_postinstall(struct drm_device *dev)
1326 {
1327         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1328         /* enable kind of interrupts always enabled */
1329         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1330                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1331         u32 render_irqs;
1332         u32 hotplug_mask;
1333
1334         dev_priv->irq_mask = ~display_mask;
1335
1336         /* should always can generate irq */
1337         I915_WRITE(DEIIR, I915_READ(DEIIR));
1338         I915_WRITE(DEIMR, dev_priv->irq_mask);
1339         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1340         POSTING_READ(DEIER);
1341
1342         dev_priv->gt_irq_mask = ~0;
1343
1344         I915_WRITE(GTIIR, I915_READ(GTIIR));
1345         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1346
1347         if (IS_GEN6(dev))
1348                 render_irqs =
1349                         GT_USER_INTERRUPT |
1350                         GEN6_BSD_USER_INTERRUPT |
1351                         GEN6_BLITTER_USER_INTERRUPT;
1352         else
1353                 render_irqs =
1354                         GT_USER_INTERRUPT |
1355                         GT_PIPE_NOTIFY |
1356                         GT_BSD_USER_INTERRUPT;
1357         I915_WRITE(GTIER, render_irqs);
1358         POSTING_READ(GTIER);
1359
1360         if (HAS_PCH_CPT(dev)) {
1361                 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1362                                 SDE_PORTB_HOTPLUG_CPT |
1363                                 SDE_PORTC_HOTPLUG_CPT |
1364                                 SDE_PORTD_HOTPLUG_CPT);
1365         } else {
1366                 hotplug_mask = (SDE_CRT_HOTPLUG |
1367                                 SDE_PORTB_HOTPLUG |
1368                                 SDE_PORTC_HOTPLUG |
1369                                 SDE_PORTD_HOTPLUG |
1370                                 SDE_AUX_MASK);
1371         }
1372
1373         dev_priv->pch_irq_mask = ~hotplug_mask;
1374
1375         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1376         I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1377         I915_WRITE(SDEIER, hotplug_mask);
1378         POSTING_READ(SDEIER);
1379
1380         ironlake_enable_pch_hotplug(dev);
1381
1382         if (IS_IRONLAKE_M(dev)) {
1383                 /* Clear & enable PCU event interrupts */
1384                 I915_WRITE(DEIIR, DE_PCU_EVENT);
1385                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1386                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1387         }
1388
1389         return 0;
1390 }
1391
1392 static int
1393 ivybridge_irq_postinstall(struct drm_device *dev)
1394 {
1395         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1396         /* enable kind of interrupts always enabled */
1397         u32 display_mask =
1398                 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1399                 DE_PLANEC_FLIP_DONE_IVB |
1400                 DE_PLANEB_FLIP_DONE_IVB |
1401                 DE_PLANEA_FLIP_DONE_IVB;
1402         u32 render_irqs;
1403         u32 hotplug_mask;
1404
1405         dev_priv->irq_mask = ~display_mask;
1406
1407         /* should always can generate irq */
1408         I915_WRITE(DEIIR, I915_READ(DEIIR));
1409         I915_WRITE(DEIMR, dev_priv->irq_mask);
1410         I915_WRITE(DEIER,
1411                    display_mask |
1412                    DE_PIPEC_VBLANK_IVB |
1413                    DE_PIPEB_VBLANK_IVB |
1414                    DE_PIPEA_VBLANK_IVB);
1415         POSTING_READ(DEIER);
1416
1417         dev_priv->gt_irq_mask = ~0;
1418
1419         I915_WRITE(GTIIR, I915_READ(GTIIR));
1420         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1421
1422         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1423                 GEN6_BLITTER_USER_INTERRUPT;
1424         I915_WRITE(GTIER, render_irqs);
1425         POSTING_READ(GTIER);
1426
1427         hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1428                         SDE_PORTB_HOTPLUG_CPT |
1429                         SDE_PORTC_HOTPLUG_CPT |
1430                         SDE_PORTD_HOTPLUG_CPT);
1431         dev_priv->pch_irq_mask = ~hotplug_mask;
1432
1433         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1434         I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1435         I915_WRITE(SDEIER, hotplug_mask);
1436         POSTING_READ(SDEIER);
1437
1438         ironlake_enable_pch_hotplug(dev);
1439
1440         return 0;
1441 }
1442
1443 static int valleyview_irq_postinstall(struct drm_device *dev)
1444 {
1445         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1446         u32 render_irqs;
1447         u32 enable_mask;
1448         u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1449         u16 msid;
1450
1451         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1452         enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1453                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1454
1455         dev_priv->irq_mask = ~enable_mask;
1456
1457         dev_priv->pipestat[0] = 0;
1458         dev_priv->pipestat[1] = 0;
1459
1460         /* Hack for broken MSIs on VLV */
1461         pci_write_config(dev->dev, 0x94, 0xfee00000, 4);
1462         msid = pci_read_config(dev->dev, 0x98, 2);
1463         msid &= 0xff; /* mask out delivery bits */
1464         msid |= (1<<14);
1465         pci_write_config(dev->dev, 0x98, msid, 2);
1466
1467         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1468         I915_WRITE(VLV_IER, enable_mask);
1469         I915_WRITE(VLV_IIR, 0xffffffff);
1470         I915_WRITE(PIPESTAT(0), 0xffff);
1471         I915_WRITE(PIPESTAT(1), 0xffff);
1472         POSTING_READ(VLV_IER);
1473
1474         I915_WRITE(VLV_IIR, 0xffffffff);
1475         I915_WRITE(VLV_IIR, 0xffffffff);
1476
1477         render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1478                 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1479                 GT_GEN6_BLT_USER_INTERRUPT |
1480                 GT_GEN6_BSD_USER_INTERRUPT |
1481                 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1482                 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1483                 GT_PIPE_NOTIFY |
1484                 GT_RENDER_CS_ERROR_INTERRUPT |
1485                 GT_SYNC_STATUS |
1486                 GT_USER_INTERRUPT;
1487
1488         dev_priv->gt_irq_mask = ~render_irqs;
1489
1490         I915_WRITE(GTIIR, I915_READ(GTIIR));
1491         I915_WRITE(GTIIR, I915_READ(GTIIR));
1492         I915_WRITE(GTIMR, 0);
1493         I915_WRITE(GTIER, render_irqs);
1494         POSTING_READ(GTIER);
1495
1496         /* ack & enable invalid PTE error interrupts */
1497 #if 0 /* FIXME: add support to irq handler for checking these bits */
1498         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1499         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1500 #endif
1501
1502         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1503 #if 0 /* FIXME: check register definitions; some have moved */
1504         /* Note HDMI and DP share bits */
1505         if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1506                 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1507         if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1508                 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1509         if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1510                 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1511         if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1512                 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1513         if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1514                 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1515         if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1516                 hotplug_en |= CRT_HOTPLUG_INT_EN;
1517                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1518         }
1519 #endif
1520
1521         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1522
1523         return 0;
1524 }
1525
1526 static void valleyview_irq_uninstall(struct drm_device *dev)
1527 {
1528         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1529         int pipe;
1530
1531         if (!dev_priv)
1532                 return;
1533
1534         for_each_pipe(pipe)
1535                 I915_WRITE(PIPESTAT(pipe), 0xffff);
1536
1537         I915_WRITE(HWSTAM, 0xffffffff);
1538         I915_WRITE(PORT_HOTPLUG_EN, 0);
1539         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1540         for_each_pipe(pipe)
1541                 I915_WRITE(PIPESTAT(pipe), 0xffff);
1542         I915_WRITE(VLV_IIR, 0xffffffff);
1543         I915_WRITE(VLV_IMR, 0xffffffff);
1544         I915_WRITE(VLV_IER, 0x0);
1545         POSTING_READ(VLV_IER);
1546 }
1547
1548 static void
1549 ironlake_irq_uninstall(struct drm_device *dev)
1550 {
1551         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1552
1553         if (dev_priv == NULL)
1554                 return;
1555
1556         I915_WRITE(HWSTAM, 0xffffffff);
1557
1558         I915_WRITE(DEIMR, 0xffffffff);
1559         I915_WRITE(DEIER, 0x0);
1560         I915_WRITE(DEIIR, I915_READ(DEIIR));
1561
1562         I915_WRITE(GTIMR, 0xffffffff);
1563         I915_WRITE(GTIER, 0x0);
1564         I915_WRITE(GTIIR, I915_READ(GTIIR));
1565
1566         I915_WRITE(SDEIMR, 0xffffffff);
1567         I915_WRITE(SDEIER, 0x0);
1568         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1569 }
1570
1571 static void i8xx_irq_preinstall(struct drm_device * dev)
1572 {
1573         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1574         int pipe;
1575  
1576         atomic_set(&dev_priv->irq_received, 0);
1577  
1578         for_each_pipe(pipe)
1579                 I915_WRITE(PIPESTAT(pipe), 0);
1580         I915_WRITE16(IMR, 0xffff);
1581         I915_WRITE16(IER, 0x0);
1582         POSTING_READ16(IER);
1583 }
1584
1585 static int i8xx_irq_postinstall(struct drm_device *dev)
1586 {
1587         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1588
1589         dev_priv->pipestat[0] = 0;
1590         dev_priv->pipestat[1] = 0;
1591
1592         I915_WRITE16(EMR,
1593                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1594
1595         /* Unmask the interrupts that we always want on. */
1596         dev_priv->irq_mask =
1597                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1598                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1599                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1600                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1601                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1602         I915_WRITE16(IMR, dev_priv->irq_mask);
1603
1604         I915_WRITE16(IER,
1605                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1606                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1607                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1608                      I915_USER_INTERRUPT);
1609         POSTING_READ16(IER);
1610
1611         return 0;
1612 }
1613
1614 static void i8xx_irq_handler(void *arg)
1615 {
1616         struct drm_device *dev = (struct drm_device *) arg;
1617         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1618         u16 iir, new_iir;
1619         u32 pipe_stats[2];
1620         int irq_received;
1621         int pipe;
1622         u16 flip_mask =
1623                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1624                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1625
1626         atomic_inc(&dev_priv->irq_received);
1627
1628         iir = I915_READ16(IIR);
1629         if (iir == 0)
1630                 return;
1631
1632         while (iir & ~flip_mask) {
1633                 /* Can't rely on pipestat interrupt bit in iir as it might
1634                  * have been cleared after the pipestat interrupt was received.
1635                  * It doesn't set the bit in iir again, but it still produces
1636                  * interrupts (for non-MSI).
1637                  */
1638                 mtx_lock(&dev_priv->irq_lock);
1639                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1640                         i915_handle_error(dev, false);
1641
1642                 for_each_pipe(pipe) {
1643                         int reg = PIPESTAT(pipe);
1644                         pipe_stats[pipe] = I915_READ(reg);
1645
1646                         /*
1647                          * Clear the PIPE*STAT regs before the IIR
1648                          */
1649                         if (pipe_stats[pipe] & 0x8000ffff) {
1650                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1651                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
1652                                                          pipe_name(pipe));
1653                                 I915_WRITE(reg, pipe_stats[pipe]);
1654                                 irq_received = 1;
1655                         }
1656                 }
1657                 mtx_unlock(&dev_priv->irq_lock);
1658
1659                 I915_WRITE16(IIR, iir & ~flip_mask);
1660                 new_iir = I915_READ16(IIR); /* Flush posted writes */
1661
1662                 i915_update_dri1_breadcrumb(dev);
1663
1664                 if (iir & I915_USER_INTERRUPT)
1665                         notify_ring(dev, &dev_priv->rings[RCS]);
1666
1667                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
1668                     drm_handle_vblank(dev, 0)) {
1669                         if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1670                                 intel_prepare_page_flip(dev, 0);
1671                                 intel_finish_page_flip(dev, 0);
1672                                 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1673                         }
1674                 }
1675
1676                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
1677                     drm_handle_vblank(dev, 1)) {
1678                         if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1679                                 intel_prepare_page_flip(dev, 1);
1680                                 intel_finish_page_flip(dev, 1);
1681                                 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1682                         }
1683                 }
1684
1685                 iir = new_iir;
1686         }
1687 }
1688
1689 static void i8xx_irq_uninstall(struct drm_device * dev)
1690 {
1691         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1692         int pipe;
1693
1694         for_each_pipe(pipe) {
1695                 /* Clear enable bits; then clear status bits */
1696                 I915_WRITE(PIPESTAT(pipe), 0);
1697                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1698         }
1699         I915_WRITE16(IMR, 0xffff);
1700         I915_WRITE16(IER, 0x0);
1701         I915_WRITE16(IIR, I915_READ16(IIR));
1702 }
1703
1704 static void i915_irq_preinstall(struct drm_device * dev)
1705 {
1706         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1707         int pipe;
1708
1709         atomic_set(&dev_priv->irq_received, 0);
1710
1711         if (I915_HAS_HOTPLUG(dev)) {
1712                 I915_WRITE(PORT_HOTPLUG_EN, 0);
1713                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1714         }
1715
1716         I915_WRITE16(HWSTAM, 0xeffe);
1717         for_each_pipe(pipe)
1718                 I915_WRITE(PIPESTAT(pipe), 0);
1719         I915_WRITE(IMR, 0xffffffff);
1720         I915_WRITE(IER, 0x0);
1721         POSTING_READ(IER);
1722 }
1723
1724 static int i915_irq_postinstall(struct drm_device *dev)
1725 {
1726         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1727         u32 enable_mask;
1728
1729         dev_priv->pipestat[0] = 0;
1730         dev_priv->pipestat[1] = 0;
1731
1732         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1733
1734         /* Unmask the interrupts that we always want on. */
1735         dev_priv->irq_mask =
1736                 ~(I915_ASLE_INTERRUPT |
1737                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1738                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1739                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1740                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1741                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1742
1743         enable_mask =
1744                 I915_ASLE_INTERRUPT |
1745                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1746                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1747                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1748                 I915_USER_INTERRUPT;
1749
1750         if (I915_HAS_HOTPLUG(dev)) {
1751                 /* Enable in IER... */
1752                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1753                 /* and unmask in IMR */
1754                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1755         }
1756
1757         I915_WRITE(IMR, dev_priv->irq_mask);
1758         I915_WRITE(IER, enable_mask);
1759         POSTING_READ(IER);
1760
1761         if (I915_HAS_HOTPLUG(dev)) {
1762                 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1763
1764                 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1765                         hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1766                 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1767                         hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1768                 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1769                         hotplug_en |= HDMID_HOTPLUG_INT_EN;
1770                 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1771                         hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1772                 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1773                         hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1774                 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1775                         hotplug_en |= CRT_HOTPLUG_INT_EN;
1776                         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1777                 }
1778
1779                 /* Ignore TV since it's buggy */
1780
1781                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1782         }
1783
1784         intel_opregion_enable_asle(dev);
1785
1786         return 0;
1787 }
1788
1789 static void i915_irq_handler(void *arg)
1790 {
1791         struct drm_device *dev = (struct drm_device *) arg;
1792         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1793         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
1794         u32 flip_mask =
1795                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1796                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1797         u32 flip[2] = {
1798                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
1799                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
1800         };
1801         int pipe;
1802
1803         atomic_inc(&dev_priv->irq_received);
1804
1805         iir = I915_READ(IIR);
1806         do {
1807                 bool irq_received = (iir & ~flip_mask) != 0;
1808                 bool blc_event = false;
1809
1810                 /* Can't rely on pipestat interrupt bit in iir as it might
1811                  * have been cleared after the pipestat interrupt was received.
1812                  * It doesn't set the bit in iir again, but it still produces
1813                  * interrupts (for non-MSI).
1814                  */
1815                 mtx_lock(&dev_priv->irq_lock);
1816                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1817                         i915_handle_error(dev, false);
1818
1819                 for_each_pipe(pipe) {
1820                         int reg = PIPESTAT(pipe);
1821                         pipe_stats[pipe] = I915_READ(reg);
1822
1823                         /* Clear the PIPE*STAT regs before the IIR */
1824                         if (pipe_stats[pipe] & 0x8000ffff) {
1825                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1826                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
1827                                                          pipe_name(pipe));
1828                                 I915_WRITE(reg, pipe_stats[pipe]);
1829                                 irq_received = true;
1830                         }
1831                 }
1832                 mtx_unlock(&dev_priv->irq_lock);
1833
1834                 if (!irq_received)
1835                         break;
1836
1837                 /* Consume port.  Then clear IIR or we'll miss events */
1838                 if ((I915_HAS_HOTPLUG(dev)) &&
1839                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
1840                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1841
1842                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1843                                   hotplug_status);
1844                         if (hotplug_status & dev_priv->hotplug_supported_mask)
1845                                 taskqueue_enqueue(dev_priv->tq,
1846                                     &dev_priv->hotplug_task);
1847
1848                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1849                         POSTING_READ(PORT_HOTPLUG_STAT);
1850                 }
1851
1852                 I915_WRITE(IIR, iir & ~flip_mask);
1853                 new_iir = I915_READ(IIR); /* Flush posted writes */
1854
1855                 if (iir & I915_USER_INTERRUPT)
1856                         notify_ring(dev, &dev_priv->rings[RCS]);
1857
1858                 for_each_pipe(pipe) {
1859                         int plane = pipe;
1860                         if (IS_MOBILE(dev))
1861                                 plane = !plane;
1862                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1863                             drm_handle_vblank(dev, pipe)) {
1864                                 if (iir & flip[plane]) {
1865                                         intel_prepare_page_flip(dev, plane);
1866                                         intel_finish_page_flip(dev, pipe);
1867                                         flip_mask &= ~flip[plane];
1868                                 }
1869                         }
1870
1871                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1872                                 blc_event = true;
1873                 }
1874
1875                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1876                         intel_opregion_asle_intr(dev);
1877
1878
1879                 /* With MSI, interrupts are only generated when iir
1880                  * transitions from zero to nonzero.  If another bit got
1881                  * set while we were handling the existing iir bits, then
1882                  * we would never get another interrupt.
1883                  *
1884                  * This is fine on non-MSI as well, as if we hit this path
1885                  * we avoid exiting the interrupt handler only to generate
1886                  * another one.
1887                  *
1888                  * Note that for MSI this could cause a stray interrupt report
1889                  * if an interrupt landed in the time between writing IIR and
1890                  * the posting read.  This should be rare enough to never
1891                  * trigger the 99% of 100,000 interrupts test for disabling
1892                  * stray interrupts.
1893                  */
1894                 iir = new_iir;
1895         } while (iir & ~flip_mask);
1896
1897         i915_update_dri1_breadcrumb(dev);
1898 }
1899
1900 static void i915_irq_uninstall(struct drm_device * dev)
1901 {
1902         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1903         int pipe;
1904
1905         if (!dev_priv)
1906                 return;
1907
1908         if (I915_HAS_HOTPLUG(dev)) {
1909                 I915_WRITE(PORT_HOTPLUG_EN, 0);
1910                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1911         }
1912
1913         I915_WRITE16(HWSTAM, 0xffff);
1914         for_each_pipe(pipe) {
1915                 /* Clear enable bits; then clear status bits */
1916                 I915_WRITE(PIPESTAT(pipe), 0);
1917                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1918         }
1919         I915_WRITE(IMR, 0xffffffff);
1920         I915_WRITE(IER, 0x0);
1921
1922         I915_WRITE(IIR, I915_READ(IIR));
1923 }
1924
1925 static void i965_irq_preinstall(struct drm_device * dev)
1926 {
1927         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1928         int pipe;
1929
1930         atomic_set(&dev_priv->irq_received, 0);
1931
1932         if (I915_HAS_HOTPLUG(dev)) {
1933                 I915_WRITE(PORT_HOTPLUG_EN, 0);
1934                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1935         }
1936
1937         I915_WRITE(HWSTAM, 0xeffe);
1938         for_each_pipe(pipe)
1939                 I915_WRITE(PIPESTAT(pipe), 0);
1940         I915_WRITE(IMR, 0xffffffff);
1941         I915_WRITE(IER, 0x0);
1942         POSTING_READ(IER);
1943 }
1944
1945 static int i965_irq_postinstall(struct drm_device *dev)
1946 {
1947         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1948         u32 enable_mask;
1949         u32 error_mask;
1950
1951         /* Unmask the interrupts that we always want on. */
1952         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
1953                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1954                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1955                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1956                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1957                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1958
1959         enable_mask = ~dev_priv->irq_mask;
1960         enable_mask |= I915_USER_INTERRUPT;
1961
1962         if (IS_G4X(dev))
1963                 enable_mask |= I915_BSD_USER_INTERRUPT;
1964
1965         dev_priv->pipestat[0] = 0;
1966         dev_priv->pipestat[1] = 0;
1967
1968         if (I915_HAS_HOTPLUG(dev)) {
1969                 /* Enable in IER... */
1970                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1971                 /* and unmask in IMR */
1972                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1973         }
1974
1975         /*
1976          * Enable some error detection, note the instruction error mask
1977          * bit is reserved, so we leave it masked.
1978          */
1979         if (IS_G4X(dev)) {
1980                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1981                                GM45_ERROR_MEM_PRIV |
1982                                GM45_ERROR_CP_PRIV |
1983                                I915_ERROR_MEMORY_REFRESH);
1984         } else {
1985                 error_mask = ~(I915_ERROR_PAGE_TABLE |
1986                                I915_ERROR_MEMORY_REFRESH);
1987         }
1988         I915_WRITE(EMR, error_mask);
1989
1990         I915_WRITE(IMR, dev_priv->irq_mask);
1991         I915_WRITE(IER, enable_mask);
1992         POSTING_READ(IER);
1993
1994         if (I915_HAS_HOTPLUG(dev)) {
1995                 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1996
1997                 /* Note HDMI and DP share bits */
1998                 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1999                         hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2000                 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2001                         hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2002                 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2003                         hotplug_en |= HDMID_HOTPLUG_INT_EN;
2004                 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2005                         hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2006                 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2007                         hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2008                 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2009                         hotplug_en |= CRT_HOTPLUG_INT_EN;
2010
2011                         /* Programming the CRT detection parameters tends
2012                            to generate a spurious hotplug event about three
2013                            seconds later.  So just do it once.
2014                         */
2015                         if (IS_G4X(dev))
2016                                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2017                         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2018                 }
2019
2020                 /* Ignore TV since it's buggy */
2021
2022                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2023         }
2024
2025         intel_opregion_enable_asle(dev);
2026
2027         return 0;
2028 }
2029
2030 static void i965_irq_handler(void *arg)
2031 {
2032         struct drm_device *dev = (struct drm_device *) arg;
2033         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2034         u32 iir, new_iir;
2035         u32 pipe_stats[I915_MAX_PIPES];
2036         int irq_received;
2037         int pipe;
2038
2039         atomic_inc(&dev_priv->irq_received);
2040
2041         iir = I915_READ(IIR);
2042
2043         for (;;) {
2044                 bool blc_event = false;
2045
2046                 irq_received = iir != 0;
2047
2048                 /* Can't rely on pipestat interrupt bit in iir as it might
2049                  * have been cleared after the pipestat interrupt was received.
2050                  * It doesn't set the bit in iir again, but it still produces
2051                  * interrupts (for non-MSI).
2052                  */
2053                 mtx_lock(&dev_priv->irq_lock);
2054                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2055                         i915_handle_error(dev, false);
2056
2057                 for_each_pipe(pipe) {
2058                         int reg = PIPESTAT(pipe);
2059                         pipe_stats[pipe] = I915_READ(reg);
2060
2061                         /*
2062                          * Clear the PIPE*STAT regs before the IIR
2063                          */
2064                         if (pipe_stats[pipe] & 0x8000ffff) {
2065                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2066                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2067                                                          pipe_name(pipe));
2068                                 I915_WRITE(reg, pipe_stats[pipe]);
2069                                 irq_received = 1;
2070                         }
2071                 }
2072                 mtx_unlock(&dev_priv->irq_lock);
2073
2074                 if (!irq_received)
2075                         break;
2076
2077                 /* Consume port.  Then clear IIR or we'll miss events */
2078                 if ((I915_HAS_HOTPLUG(dev)) &&
2079                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2080                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2081
2082                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2083                                   hotplug_status);
2084                         if (hotplug_status & dev_priv->hotplug_supported_mask)
2085                                 taskqueue_enqueue(dev_priv->tq,
2086                                     &dev_priv->hotplug_task);
2087
2088                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2089                         I915_READ(PORT_HOTPLUG_STAT);
2090                 }
2091
2092                 I915_WRITE(IIR, iir);
2093                 new_iir = I915_READ(IIR); /* Flush posted writes */
2094
2095                 if (iir & I915_USER_INTERRUPT)
2096                         notify_ring(dev, &dev_priv->rings[RCS]);
2097                 if (iir & I915_BSD_USER_INTERRUPT)
2098                         notify_ring(dev, &dev_priv->rings[VCS]);
2099
2100                 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2101                         intel_prepare_page_flip(dev, 0);
2102
2103                 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2104                         intel_prepare_page_flip(dev, 1);
2105
2106                 for_each_pipe(pipe) {
2107                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2108                             drm_handle_vblank(dev, pipe)) {
2109                                 i915_pageflip_stall_check(dev, pipe);
2110                                 intel_finish_page_flip(dev, pipe);
2111                         }
2112
2113                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2114                                 blc_event = true;
2115                 }
2116
2117
2118                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2119                         intel_opregion_asle_intr(dev);
2120
2121                 /* With MSI, interrupts are only generated when iir
2122                  * transitions from zero to nonzero.  If another bit got
2123                  * set while we were handling the existing iir bits, then
2124                  * we would never get another interrupt.
2125                  *
2126                  * This is fine on non-MSI as well, as if we hit this path
2127                  * we avoid exiting the interrupt handler only to generate
2128                  * another one.
2129                  *
2130                  * Note that for MSI this could cause a stray interrupt report
2131                  * if an interrupt landed in the time between writing IIR and
2132                  * the posting read.  This should be rare enough to never
2133                  * trigger the 99% of 100,000 interrupts test for disabling
2134                  * stray interrupts.
2135                  */
2136                 iir = new_iir;
2137         }
2138
2139         i915_update_dri1_breadcrumb(dev);
2140 }
2141
2142 static void i965_irq_uninstall(struct drm_device * dev)
2143 {
2144         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2145         int pipe;
2146
2147         if (I915_HAS_HOTPLUG(dev)) {
2148                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2149                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2150         }
2151
2152         I915_WRITE(HWSTAM, 0xffffffff);
2153         for_each_pipe(pipe)
2154                 I915_WRITE(PIPESTAT(pipe), 0);
2155         I915_WRITE(IMR, 0xffffffff);
2156         I915_WRITE(IER, 0x0);
2157
2158         for_each_pipe(pipe)
2159                 I915_WRITE(PIPESTAT(pipe),
2160                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2161         I915_WRITE(IIR, I915_READ(IIR));
2162 }
2163
2164 void intel_irq_init(struct drm_device *dev)
2165 {
2166         struct drm_i915_private *dev_priv = dev->dev_private;
2167
2168         TASK_INIT(&dev_priv->hotplug_task, 0, i915_hotplug_work_func,
2169             dev->dev_private);
2170         TASK_INIT(&dev_priv->error_task, 0, i915_error_work_func,
2171             dev->dev_private);
2172         TASK_INIT(&dev_priv->rps_task, 0, gen6_pm_rps_work_func,
2173             dev->dev_private);
2174
2175         dev->driver->get_vblank_counter = i915_get_vblank_counter;
2176         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2177         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2178                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2179                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2180         }
2181         if (drm_core_check_feature(dev, DRIVER_MODESET))
2182                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2183         else
2184                 dev->driver->get_vblank_timestamp = NULL;
2185         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2186
2187         if (IS_VALLEYVIEW(dev)) {
2188                 dev->driver->irq_handler = valleyview_irq_handler;
2189                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2190                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2191                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2192                 dev->driver->enable_vblank = valleyview_enable_vblank;
2193                 dev->driver->disable_vblank = valleyview_disable_vblank;
2194         } else if (IS_IVYBRIDGE(dev)) {
2195                 /* Share pre & uninstall handlers with ILK/SNB */
2196                 dev->driver->irq_handler = ivybridge_irq_handler;
2197                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2198                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2199                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2200                 dev->driver->enable_vblank = ivybridge_enable_vblank;
2201                 dev->driver->disable_vblank = ivybridge_disable_vblank;
2202         } else if (IS_HASWELL(dev)) {
2203                 /* Share interrupts handling with IVB */
2204                 dev->driver->irq_handler = ivybridge_irq_handler;
2205                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2206                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2207                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2208                 dev->driver->enable_vblank = ivybridge_enable_vblank;
2209                 dev->driver->disable_vblank = ivybridge_disable_vblank;
2210         } else if (HAS_PCH_SPLIT(dev)) {
2211                 dev->driver->irq_handler = ironlake_irq_handler;
2212                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2213                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2214                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2215                 dev->driver->enable_vblank = ironlake_enable_vblank;
2216                 dev->driver->disable_vblank = ironlake_disable_vblank;
2217         } else {
2218                 if (INTEL_INFO(dev)->gen == 2) {
2219                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
2220                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
2221                         dev->driver->irq_handler = i8xx_irq_handler;
2222                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
2223                 } else if (INTEL_INFO(dev)->gen == 3) {
2224                         /* IIR "flip pending" means done if this bit is set */
2225                         I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
2226
2227                         dev->driver->irq_preinstall = i915_irq_preinstall;
2228                         dev->driver->irq_postinstall = i915_irq_postinstall;
2229                         dev->driver->irq_uninstall = i915_irq_uninstall;
2230                         dev->driver->irq_handler = i915_irq_handler;
2231                 } else {
2232                         dev->driver->irq_preinstall = i965_irq_preinstall;
2233                         dev->driver->irq_postinstall = i965_irq_postinstall;
2234                         dev->driver->irq_uninstall = i965_irq_uninstall;
2235                         dev->driver->irq_handler = i965_irq_handler;
2236                 }
2237                 dev->driver->enable_vblank = i915_enable_vblank;
2238                 dev->driver->disable_vblank = i915_disable_vblank;
2239         }
2240 }
2241
2242 static struct drm_i915_error_object *
2243 i915_error_object_create(struct drm_i915_private *dev_priv,
2244     struct drm_i915_gem_object *src)
2245 {
2246         struct drm_i915_error_object *dst;
2247         struct sf_buf *sf;
2248         void *d, *s;
2249         int page, page_count;
2250         u32 reloc_offset;
2251
2252         if (src == NULL || src->pages == NULL)
2253                 return NULL;
2254
2255         page_count = src->base.size / PAGE_SIZE;
2256
2257         dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
2258             M_NOWAIT);
2259         if (dst == NULL)
2260                 return (NULL);
2261
2262         reloc_offset = src->gtt_offset;
2263         for (page = 0; page < page_count; page++) {
2264                 d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
2265                 if (d == NULL)
2266                         goto unwind;
2267
2268                 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
2269                     src->has_global_gtt_mapping) {
2270                         /* Simply ignore tiling or any overlapping fence.
2271                          * It's part of the error state, and this hopefully
2272                          * captures what the GPU read.
2273                          */
2274                         s = pmap_mapdev_attr(src->base.dev->agp->base +
2275                             reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
2276                         memcpy(d, s, PAGE_SIZE);
2277                         pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
2278                 } else {
2279                         drm_clflush_pages(&src->pages[page], 1);
2280
2281                         sched_pin();
2282                         sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE |
2283                             SFB_NOWAIT);
2284                         if (sf != NULL) {
2285                                 s = (void *)(uintptr_t)sf_buf_kva(sf);
2286                                 memcpy(d, s, PAGE_SIZE);
2287                                 sf_buf_free(sf);
2288                         } else {
2289                                 bzero(d, PAGE_SIZE);
2290                                 strcpy(d, "XXXKIB");
2291                         }
2292                         sched_unpin();
2293
2294                         drm_clflush_pages(&src->pages[page], 1);
2295                 }
2296
2297                 dst->pages[page] = d;
2298
2299                 reloc_offset += PAGE_SIZE;
2300         }
2301         dst->page_count = page_count;
2302         dst->gtt_offset = src->gtt_offset;
2303
2304         return (dst);
2305
2306 unwind:
2307         while (page--)
2308                 free(dst->pages[page], DRM_I915_GEM);
2309         free(dst, DRM_I915_GEM);
2310         return (NULL);
2311 }
2312
2313 static void
2314 i915_error_object_free(struct drm_i915_error_object *obj)
2315 {
2316         int page;
2317
2318         if (obj == NULL)
2319                 return;
2320
2321         for (page = 0; page < obj->page_count; page++)
2322                 free(obj->pages[page], DRM_I915_GEM);
2323
2324         free(obj, DRM_I915_GEM);
2325 }
2326
2327 void
2328 i915_error_state_free(struct drm_i915_error_state *error)
2329 {
2330         int i;
2331
2332         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
2333                 i915_error_object_free(error->ring[i].batchbuffer);
2334                 i915_error_object_free(error->ring[i].ringbuffer);
2335                 free(error->ring[i].requests, DRM_I915_GEM);
2336         }
2337
2338         free(error->active_bo, DRM_I915_GEM);
2339         free(error->overlay, DRM_I915_GEM);
2340         free(error, DRM_I915_GEM);
2341 }
2342
2343 static void capture_bo(struct drm_i915_error_buffer *err,
2344                        struct drm_i915_gem_object *obj)
2345 {
2346         err->size = obj->base.size;
2347         err->name = obj->base.name;
2348         err->seqno = obj->last_rendering_seqno;
2349         err->gtt_offset = obj->gtt_offset;
2350         err->read_domains = obj->base.read_domains;
2351         err->write_domain = obj->base.write_domain;
2352         err->fence_reg = obj->fence_reg;
2353         err->pinned = 0;
2354         if (obj->pin_count > 0)
2355                 err->pinned = 1;
2356         if (obj->user_pin_count > 0)
2357                 err->pinned = -1;
2358         err->tiling = obj->tiling_mode;
2359         err->dirty = obj->dirty;
2360         err->purgeable = obj->madv != I915_MADV_WILLNEED;
2361         err->ring = obj->ring ? obj->ring->id : -1;
2362         err->cache_level = obj->cache_level;
2363 }
2364
2365 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
2366                              int count, struct list_head *head)
2367 {
2368         struct drm_i915_gem_object *obj;
2369         int i = 0;
2370
2371         list_for_each_entry(obj, head, mm_list) {
2372                 capture_bo(err++, obj);
2373                 if (++i == count)
2374                         break;
2375         }
2376
2377         return i;
2378 }
2379
2380 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
2381                              int count, struct list_head *head)
2382 {
2383         struct drm_i915_gem_object *obj;
2384         int i = 0;
2385
2386         list_for_each_entry(obj, head, gtt_list) {
2387                 if (obj->pin_count == 0)
2388                         continue;
2389
2390                 capture_bo(err++, obj);
2391                 if (++i == count)
2392                         break;
2393         }
2394
2395         return i;
2396 }
2397
2398 static void
2399 i915_gem_record_fences(struct drm_device *dev,
2400     struct drm_i915_error_state *error)
2401 {
2402         struct drm_i915_private *dev_priv = dev->dev_private;
2403         int i;
2404
2405         /* Fences */
2406         switch (INTEL_INFO(dev)->gen) {
2407         case 7:
2408         case 6:
2409                 for (i = 0; i < 16; i++)
2410                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
2411                 break;
2412         case 5:
2413         case 4:
2414                 for (i = 0; i < 16; i++)
2415                         error->fence[i] = I915_READ64(FENCE_REG_965_0 +
2416                             (i * 8));
2417                 break;
2418         case 3:
2419                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2420                         for (i = 0; i < 8; i++)
2421                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
2422                                     (i * 4));
2423         case 2:
2424                 for (i = 0; i < 8; i++)
2425                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
2426                 break;
2427
2428         }
2429 }
2430
2431 static struct drm_i915_error_object *
2432 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
2433                              struct intel_ring_buffer *ring)
2434 {
2435         struct drm_i915_gem_object *obj;
2436         u32 seqno;
2437
2438         if (!ring->get_seqno)
2439                 return (NULL);
2440
2441         seqno = ring->get_seqno(ring);
2442         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
2443                 if (obj->ring != ring)
2444                         continue;
2445
2446                 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
2447                         continue;
2448
2449                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
2450                         continue;
2451
2452                 /* We need to copy these to an anonymous buffer as the simplest
2453                  * method to avoid being overwritten by userspace.
2454                  */
2455                 return (i915_error_object_create(dev_priv, obj));
2456         }
2457
2458         return NULL;
2459 }
2460
2461 static void
2462 i915_record_ring_state(struct drm_device *dev,
2463     struct drm_i915_error_state *error,
2464     struct intel_ring_buffer *ring)
2465 {
2466         struct drm_i915_private *dev_priv = dev->dev_private;
2467
2468         if (INTEL_INFO(dev)->gen >= 6) {
2469                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
2470                 error->semaphore_mboxes[ring->id][0]
2471                         = I915_READ(RING_SYNC_0(ring->mmio_base));
2472                 error->semaphore_mboxes[ring->id][1]
2473                         = I915_READ(RING_SYNC_1(ring->mmio_base));
2474         }
2475
2476         if (INTEL_INFO(dev)->gen >= 4) {
2477                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
2478                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
2479                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
2480                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
2481                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
2482                 if (ring->id == RCS) {
2483                         error->instdone1 = I915_READ(INSTDONE1);
2484                         error->bbaddr = I915_READ64(BB_ADDR);
2485                 }
2486         } else {
2487                 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
2488                 error->ipeir[ring->id] = I915_READ(IPEIR);
2489                 error->ipehr[ring->id] = I915_READ(IPEHR);
2490                 error->instdone[ring->id] = I915_READ(INSTDONE);
2491         }
2492
2493         sleepq_lock(ring);
2494         error->waiting[ring->id] = sleepq_sleepcnt(ring, 0) != 0;
2495         sleepq_release(ring);
2496         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
2497         error->seqno[ring->id] = ring->get_seqno(ring);
2498         error->acthd[ring->id] = intel_ring_get_active_head(ring);
2499         error->head[ring->id] = I915_READ_HEAD(ring);
2500         error->tail[ring->id] = I915_READ_TAIL(ring);
2501
2502         error->cpu_ring_head[ring->id] = ring->head;
2503         error->cpu_ring_tail[ring->id] = ring->tail;
2504 }
2505
2506 static void
2507 i915_gem_record_rings(struct drm_device *dev,
2508     struct drm_i915_error_state *error)
2509 {
2510         struct drm_i915_private *dev_priv = dev->dev_private;
2511         struct intel_ring_buffer *ring;
2512         struct drm_i915_gem_request *request;
2513         int i, count;
2514
2515         for_each_ring(ring, dev_priv, i) {
2516                 i915_record_ring_state(dev, error, ring);
2517
2518                 error->ring[i].batchbuffer =
2519                         i915_error_first_batchbuffer(dev_priv, ring);
2520
2521                 error->ring[i].ringbuffer =
2522                         i915_error_object_create(dev_priv, ring->obj);
2523
2524                 count = 0;
2525                 list_for_each_entry(request, &ring->request_list, list)
2526                         count++;
2527
2528                 error->ring[i].num_requests = count;
2529                 error->ring[i].requests = malloc(count *
2530                     sizeof(struct drm_i915_error_request), DRM_I915_GEM,
2531                     M_WAITOK);
2532                 if (error->ring[i].requests == NULL) {
2533                         error->ring[i].num_requests = 0;
2534                         continue;
2535                 }
2536
2537                 count = 0;
2538                 list_for_each_entry(request, &ring->request_list, list) {
2539                         struct drm_i915_error_request *erq;
2540
2541                         erq = &error->ring[i].requests[count++];
2542                         erq->seqno = request->seqno;
2543                         erq->jiffies = request->emitted_jiffies;
2544                         erq->tail = request->tail;
2545                 }
2546         }
2547 }
2548
2549 static void
2550 i915_capture_error_state(struct drm_device *dev)
2551 {
2552         struct drm_i915_private *dev_priv = dev->dev_private;
2553         struct drm_i915_gem_object *obj;
2554         struct drm_i915_error_state *error;
2555         int i, pipe;
2556
2557         mtx_lock(&dev_priv->error_lock);
2558         error = dev_priv->first_error;
2559         mtx_unlock(&dev_priv->error_lock);
2560         if (error != NULL)
2561                 return;
2562
2563         /* Account for pipe specific data like PIPE*STAT */
2564         error = malloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO);
2565         if (error == NULL) {
2566                 DRM_DEBUG("out of memory, not capturing error state\n");
2567                 return;
2568         }
2569
2570         DRM_INFO("capturing error event; look for more information in "
2571             "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx);
2572
2573         refcount_init(&error->ref, 1);
2574         error->eir = I915_READ(EIR);
2575         error->pgtbl_er = I915_READ(PGTBL_ER);
2576
2577         if (HAS_PCH_SPLIT(dev))
2578                 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
2579         else if (IS_VALLEYVIEW(dev))
2580                 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
2581         else if (IS_GEN2(dev))
2582                 error->ier = I915_READ16(IER);
2583         else
2584                 error->ier = I915_READ(IER);
2585
2586         for_each_pipe(pipe)
2587                 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
2588
2589         if (INTEL_INFO(dev)->gen >= 6) {
2590                 error->error = I915_READ(ERROR_GEN6);
2591                 error->done_reg = I915_READ(DONE_REG);
2592         }
2593
2594         i915_gem_record_fences(dev, error);
2595         i915_gem_record_rings(dev, error);
2596
2597         /* Record buffers on the active and pinned lists. */
2598         error->active_bo = NULL;
2599         error->pinned_bo = NULL;
2600
2601         i = 0;
2602         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
2603                 i++;
2604         error->active_bo_count = i;
2605         list_for_each_entry(obj, &dev_priv->mm.gtt_list, mm_list)
2606                 if (obj->pin_count)
2607                         i++;
2608         error->pinned_bo_count = i - error->active_bo_count;
2609
2610         error->active_bo = NULL;
2611         error->pinned_bo = NULL;
2612         if (i) {
2613                 error->active_bo = malloc(sizeof(*error->active_bo) * i,
2614                     DRM_I915_GEM, M_NOWAIT);
2615                 if (error->active_bo)
2616                         error->pinned_bo = error->active_bo +
2617                             error->active_bo_count;
2618         }
2619
2620         if (error->active_bo)
2621                 error->active_bo_count =
2622                         capture_active_bo(error->active_bo,
2623                                           error->active_bo_count,
2624                                           &dev_priv->mm.active_list);
2625
2626         if (error->pinned_bo)
2627                 error->pinned_bo_count =
2628                         capture_pinned_bo(error->pinned_bo,
2629                                           error->pinned_bo_count,
2630                                           &dev_priv->mm.gtt_list);
2631
2632         microtime(&error->time);
2633
2634         error->overlay = intel_overlay_capture_error_state(dev);
2635         error->display = intel_display_capture_error_state(dev);
2636
2637         mtx_lock(&dev_priv->error_lock);
2638         if (dev_priv->first_error == NULL) {
2639                 dev_priv->first_error = error;
2640                 error = NULL;
2641         }
2642         mtx_unlock(&dev_priv->error_lock);
2643
2644         if (error != NULL)
2645                 i915_error_state_free(error);
2646 }
2647
2648 void
2649 i915_destroy_error_state(struct drm_device *dev)
2650 {
2651         struct drm_i915_private *dev_priv = dev->dev_private;
2652         struct drm_i915_error_state *error;
2653
2654         mtx_lock(&dev_priv->error_lock);
2655         error = dev_priv->first_error;
2656         dev_priv->first_error = NULL;
2657         mtx_unlock(&dev_priv->error_lock);
2658
2659         if (error != NULL && refcount_release(&error->ref))
2660                 i915_error_state_free(error);
2661 }