]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/dev/drm2/i915/intel_pm.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / dev / drm2 / i915 / intel_pm.c
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <dev/drm2/drmP.h>
32 #include <dev/drm2/drm.h>
33 #include <dev/drm2/i915/i915_drm.h>
34 #include <dev/drm2/i915/i915_drv.h>
35 #include <dev/drm2/i915/intel_drv.h>
36 #include <sys/kdb.h>
37
38 static struct drm_i915_private *i915_mch_dev;
39 /*
40  * Lock protecting IPS related data structures
41  *   - i915_mch_dev
42  *   - dev_priv->max_delay
43  *   - dev_priv->min_delay
44  *   - dev_priv->fmax
45  *   - dev_priv->gpu_busy
46  */
47 static struct mtx mchdev_lock;
48 MTX_SYSINIT(mchdev, &mchdev_lock, "mchdev", MTX_DEF);
49
50 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
51  * framebuffer contents in-memory, aiming at reducing the required bandwidth
52  * during in-memory transfers and, therefore, reduce the power packet.
53  *
54  * The benefits of FBC are mostly visible with solid backgrounds and
55  * variation-less patterns.
56  *
57  * FBC-related functionality can be enabled by the means of the
58  * i915.i915_enable_fbc parameter
59  */
60
61 static void i8xx_disable_fbc(struct drm_device *dev)
62 {
63         struct drm_i915_private *dev_priv = dev->dev_private;
64         u32 fbc_ctl;
65
66         /* Disable compression */
67         fbc_ctl = I915_READ(FBC_CONTROL);
68         if ((fbc_ctl & FBC_CTL_EN) == 0)
69                 return;
70
71         fbc_ctl &= ~FBC_CTL_EN;
72         I915_WRITE(FBC_CONTROL, fbc_ctl);
73
74         /* Wait for compressing bit to clear */
75         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
76                 DRM_DEBUG_KMS("FBC idle timed out\n");
77                 return;
78         }
79
80         DRM_DEBUG_KMS("disabled FBC\n");
81 }
82
83 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
84 {
85         struct drm_device *dev = crtc->dev;
86         struct drm_i915_private *dev_priv = dev->dev_private;
87         struct drm_framebuffer *fb = crtc->fb;
88         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
89         struct drm_i915_gem_object *obj = intel_fb->obj;
90         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
91         int cfb_pitch;
92         int plane, i;
93         u32 fbc_ctl, fbc_ctl2;
94
95         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
96         if (fb->pitches[0] < cfb_pitch)
97                 cfb_pitch = fb->pitches[0];
98
99         /* FBC_CTL wants 64B units */
100         cfb_pitch = (cfb_pitch / 64) - 1;
101         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
102
103         /* Clear old tags */
104         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
105                 I915_WRITE(FBC_TAG + (i * 4), 0);
106
107         /* Set it up... */
108         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
109         fbc_ctl2 |= plane;
110         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
111         I915_WRITE(FBC_FENCE_OFF, crtc->y);
112
113         /* enable it... */
114         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
115         if (IS_I945GM(dev))
116                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
117         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
118         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
119         fbc_ctl |= obj->fence_reg;
120         I915_WRITE(FBC_CONTROL, fbc_ctl);
121
122         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
123                       cfb_pitch, crtc->y, intel_crtc->plane);
124 }
125
126 static bool i8xx_fbc_enabled(struct drm_device *dev)
127 {
128         struct drm_i915_private *dev_priv = dev->dev_private;
129
130         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
131 }
132
133 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
134 {
135         struct drm_device *dev = crtc->dev;
136         struct drm_i915_private *dev_priv = dev->dev_private;
137         struct drm_framebuffer *fb = crtc->fb;
138         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
139         struct drm_i915_gem_object *obj = intel_fb->obj;
140         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
141         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
142         unsigned long stall_watermark = 200;
143         u32 dpfc_ctl;
144
145         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
146         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
147         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
148
149         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
150                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
151                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
152         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
153
154         /* enable it... */
155         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
156
157         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
158 }
159
160 static void g4x_disable_fbc(struct drm_device *dev)
161 {
162         struct drm_i915_private *dev_priv = dev->dev_private;
163         u32 dpfc_ctl;
164
165         /* Disable compression */
166         dpfc_ctl = I915_READ(DPFC_CONTROL);
167         if (dpfc_ctl & DPFC_CTL_EN) {
168                 dpfc_ctl &= ~DPFC_CTL_EN;
169                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
170
171                 DRM_DEBUG_KMS("disabled FBC\n");
172         }
173 }
174
175 static bool g4x_fbc_enabled(struct drm_device *dev)
176 {
177         struct drm_i915_private *dev_priv = dev->dev_private;
178
179         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
180 }
181
182 static void sandybridge_blit_fbc_update(struct drm_device *dev)
183 {
184         struct drm_i915_private *dev_priv = dev->dev_private;
185         u32 blt_ecoskpd;
186
187         /* Make sure blitter notifies FBC of writes */
188         gen6_gt_force_wake_get(dev_priv);
189         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
190         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
191                 GEN6_BLITTER_LOCK_SHIFT;
192         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
193         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
194         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
195         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
196                          GEN6_BLITTER_LOCK_SHIFT);
197         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
198         POSTING_READ(GEN6_BLITTER_ECOSKPD);
199         gen6_gt_force_wake_put(dev_priv);
200 }
201
202 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
203 {
204         struct drm_device *dev = crtc->dev;
205         struct drm_i915_private *dev_priv = dev->dev_private;
206         struct drm_framebuffer *fb = crtc->fb;
207         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
208         struct drm_i915_gem_object *obj = intel_fb->obj;
209         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
210         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
211         unsigned long stall_watermark = 200;
212         u32 dpfc_ctl;
213
214         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
215         dpfc_ctl &= DPFC_RESERVED;
216         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
217         /* Set persistent mode for front-buffer rendering, ala X. */
218         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
219         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
220         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
221
222         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
223                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
224                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
225         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
226         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
227         /* enable it... */
228         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
229
230         if (IS_GEN6(dev)) {
231                 I915_WRITE(SNB_DPFC_CTL_SA,
232                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
233                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
234                 sandybridge_blit_fbc_update(dev);
235         }
236
237         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
238 }
239
240 static void ironlake_disable_fbc(struct drm_device *dev)
241 {
242         struct drm_i915_private *dev_priv = dev->dev_private;
243         u32 dpfc_ctl;
244
245         /* Disable compression */
246         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
247         if (dpfc_ctl & DPFC_CTL_EN) {
248                 dpfc_ctl &= ~DPFC_CTL_EN;
249                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
250
251                 DRM_DEBUG_KMS("disabled FBC\n");
252         }
253 }
254
255 static bool ironlake_fbc_enabled(struct drm_device *dev)
256 {
257         struct drm_i915_private *dev_priv = dev->dev_private;
258
259         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
260 }
261
262 bool intel_fbc_enabled(struct drm_device *dev)
263 {
264         struct drm_i915_private *dev_priv = dev->dev_private;
265
266         if (!dev_priv->display.fbc_enabled)
267                 return false;
268
269         return dev_priv->display.fbc_enabled(dev);
270 }
271
272 static void intel_fbc_work_fn(void *arg, int pending)
273 {
274         struct intel_fbc_work *work = arg;
275         struct drm_device *dev = work->crtc->dev;
276         struct drm_i915_private *dev_priv = dev->dev_private;
277
278         DRM_LOCK(dev);
279         if (work == dev_priv->fbc_work) {
280                 /* Double check that we haven't switched fb without cancelling
281                  * the prior work.
282                  */
283                 if (work->crtc->fb == work->fb) {
284                         dev_priv->display.enable_fbc(work->crtc,
285                                                      work->interval);
286
287                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
288                         dev_priv->cfb_fb = work->crtc->fb->base.id;
289                         dev_priv->cfb_y = work->crtc->y;
290                 }
291
292                 dev_priv->fbc_work = NULL;
293         }
294         DRM_UNLOCK(dev);
295
296         free(work, DRM_MEM_KMS);
297 }
298
299 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
300 {
301         u_int pending;
302
303         if (dev_priv->fbc_work == NULL)
304                 return;
305
306         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
307
308         /* Synchronisation is provided by struct_mutex and checking of
309          * dev_priv->fbc_work, so we can perform the cancellation
310          * entirely asynchronously.
311          */
312         if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
313             &pending) == 0)
314                 /* tasklet was killed before being run, clean up */
315                 free(dev_priv->fbc_work, DRM_MEM_KMS);
316
317         /* Mark the work as no longer wanted so that if it does
318          * wake-up (because the work was already running and waiting
319          * for our mutex), it will discover that is no longer
320          * necessary to run.
321          */
322         dev_priv->fbc_work = NULL;
323 }
324
325 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
326 {
327         struct intel_fbc_work *work;
328         struct drm_device *dev = crtc->dev;
329         struct drm_i915_private *dev_priv = dev->dev_private;
330
331         if (!dev_priv->display.enable_fbc)
332                 return;
333
334         intel_cancel_fbc_work(dev_priv);
335
336         work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
337
338         work->crtc = crtc;
339         work->fb = crtc->fb;
340         work->interval = interval;
341         TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
342             work);
343
344         dev_priv->fbc_work = work;
345
346         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
347
348         /* Delay the actual enabling to let pageflipping cease and the
349          * display to settle before starting the compression. Note that
350          * this delay also serves a second purpose: it allows for a
351          * vblank to pass after disabling the FBC before we attempt
352          * to modify the control registers.
353          *
354          * A more complicated solution would involve tracking vblanks
355          * following the termination of the page-flipping sequence
356          * and indeed performing the enable as a co-routine and not
357          * waiting synchronously upon the vblank.
358          */
359         taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
360             msecs_to_jiffies(50));
361 }
362
363 void intel_disable_fbc(struct drm_device *dev)
364 {
365         struct drm_i915_private *dev_priv = dev->dev_private;
366
367         intel_cancel_fbc_work(dev_priv);
368
369         if (!dev_priv->display.disable_fbc)
370                 return;
371
372         dev_priv->display.disable_fbc(dev);
373         dev_priv->cfb_plane = -1;
374 }
375
376 /**
377  * intel_update_fbc - enable/disable FBC as needed
378  * @dev: the drm_device
379  *
380  * Set up the framebuffer compression hardware at mode set time.  We
381  * enable it if possible:
382  *   - plane A only (on pre-965)
383  *   - no pixel mulitply/line duplication
384  *   - no alpha buffer discard
385  *   - no dual wide
386  *   - framebuffer <= 2048 in width, 1536 in height
387  *
388  * We can't assume that any compression will take place (worst case),
389  * so the compressed buffer has to be the same size as the uncompressed
390  * one.  It also must reside (along with the line length buffer) in
391  * stolen memory.
392  *
393  * We need to enable/disable FBC on a global basis.
394  */
395 void intel_update_fbc(struct drm_device *dev)
396 {
397         struct drm_i915_private *dev_priv = dev->dev_private;
398         struct drm_crtc *crtc = NULL, *tmp_crtc;
399         struct intel_crtc *intel_crtc;
400         struct drm_framebuffer *fb;
401         struct intel_framebuffer *intel_fb;
402         struct drm_i915_gem_object *obj;
403         int enable_fbc;
404
405         DRM_DEBUG_KMS("\n");
406
407         if (!i915_powersave)
408                 return;
409
410         if (!I915_HAS_FBC(dev))
411                 return;
412
413         /*
414          * If FBC is already on, we just have to verify that we can
415          * keep it that way...
416          * Need to disable if:
417          *   - more than one pipe is active
418          *   - changing FBC params (stride, fence, mode)
419          *   - new fb is too large to fit in compressed buffer
420          *   - going to an unsupported config (interlace, pixel multiply, etc.)
421          */
422         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
423                 if (tmp_crtc->enabled && tmp_crtc->fb) {
424                         if (crtc) {
425                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
426                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
427                                 goto out_disable;
428                         }
429                         crtc = tmp_crtc;
430                 }
431         }
432
433         if (!crtc || crtc->fb == NULL) {
434                 DRM_DEBUG_KMS("no output, disabling\n");
435                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
436                 goto out_disable;
437         }
438
439         intel_crtc = to_intel_crtc(crtc);
440         fb = crtc->fb;
441         intel_fb = to_intel_framebuffer(fb);
442         obj = intel_fb->obj;
443
444         enable_fbc = i915_enable_fbc;
445         if (enable_fbc < 0) {
446                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
447                 enable_fbc = 1;
448                 if (INTEL_INFO(dev)->gen <= 6)
449                         enable_fbc = 0;
450         }
451         if (!enable_fbc) {
452                 DRM_DEBUG_KMS("fbc disabled per module param\n");
453                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
454                 goto out_disable;
455         }
456         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
457                 DRM_DEBUG_KMS("framebuffer too large, disabling "
458                               "compression\n");
459                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
460                 goto out_disable;
461         }
462         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
463             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
464                 DRM_DEBUG_KMS("mode incompatible with compression, "
465                               "disabling\n");
466                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
467                 goto out_disable;
468         }
469         if ((crtc->mode.hdisplay > 2048) ||
470             (crtc->mode.vdisplay > 1536)) {
471                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
472                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
473                 goto out_disable;
474         }
475         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
476                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
477                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
478                 goto out_disable;
479         }
480
481         /* The use of a CPU fence is mandatory in order to detect writes
482          * by the CPU to the scanout and trigger updates to the FBC.
483          */
484         if (obj->tiling_mode != I915_TILING_X ||
485             obj->fence_reg == I915_FENCE_REG_NONE) {
486                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
487                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
488                 goto out_disable;
489         }
490
491         /* If the kernel debugger is active, always disable compression */
492         if (kdb_active)
493                 goto out_disable;
494
495         /* If the scanout has not changed, don't modify the FBC settings.
496          * Note that we make the fundamental assumption that the fb->obj
497          * cannot be unpinned (and have its GTT offset and fence revoked)
498          * without first being decoupled from the scanout and FBC disabled.
499          */
500         if (dev_priv->cfb_plane == intel_crtc->plane &&
501             dev_priv->cfb_fb == fb->base.id &&
502             dev_priv->cfb_y == crtc->y)
503                 return;
504
505         if (intel_fbc_enabled(dev)) {
506                 /* We update FBC along two paths, after changing fb/crtc
507                  * configuration (modeswitching) and after page-flipping
508                  * finishes. For the latter, we know that not only did
509                  * we disable the FBC at the start of the page-flip
510                  * sequence, but also more than one vblank has passed.
511                  *
512                  * For the former case of modeswitching, it is possible
513                  * to switch between two FBC valid configurations
514                  * instantaneously so we do need to disable the FBC
515                  * before we can modify its control registers. We also
516                  * have to wait for the next vblank for that to take
517                  * effect. However, since we delay enabling FBC we can
518                  * assume that a vblank has passed since disabling and
519                  * that we can safely alter the registers in the deferred
520                  * callback.
521                  *
522                  * In the scenario that we go from a valid to invalid
523                  * and then back to valid FBC configuration we have
524                  * no strict enforcement that a vblank occurred since
525                  * disabling the FBC. However, along all current pipe
526                  * disabling paths we do need to wait for a vblank at
527                  * some point. And we wait before enabling FBC anyway.
528                  */
529                 DRM_DEBUG_KMS("disabling active FBC for update\n");
530                 intel_disable_fbc(dev);
531         }
532
533         intel_enable_fbc(crtc, 500);
534         return;
535
536 out_disable:
537         /* Multiple disables should be harmless */
538         if (intel_fbc_enabled(dev)) {
539                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
540                 intel_disable_fbc(dev);
541         }
542 }
543
544 static void i915_pineview_get_mem_freq(struct drm_device *dev)
545 {
546         drm_i915_private_t *dev_priv = dev->dev_private;
547         u32 tmp;
548
549         tmp = I915_READ(CLKCFG);
550
551         switch (tmp & CLKCFG_FSB_MASK) {
552         case CLKCFG_FSB_533:
553                 dev_priv->fsb_freq = 533; /* 133*4 */
554                 break;
555         case CLKCFG_FSB_800:
556                 dev_priv->fsb_freq = 800; /* 200*4 */
557                 break;
558         case CLKCFG_FSB_667:
559                 dev_priv->fsb_freq =  667; /* 167*4 */
560                 break;
561         case CLKCFG_FSB_400:
562                 dev_priv->fsb_freq = 400; /* 100*4 */
563                 break;
564         }
565
566         switch (tmp & CLKCFG_MEM_MASK) {
567         case CLKCFG_MEM_533:
568                 dev_priv->mem_freq = 533;
569                 break;
570         case CLKCFG_MEM_667:
571                 dev_priv->mem_freq = 667;
572                 break;
573         case CLKCFG_MEM_800:
574                 dev_priv->mem_freq = 800;
575                 break;
576         }
577
578         /* detect pineview DDR3 setting */
579         tmp = I915_READ(CSHRDDR3CTL);
580         dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
581 }
582
583 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
584 {
585         drm_i915_private_t *dev_priv = dev->dev_private;
586         u16 ddrpll, csipll;
587
588         ddrpll = I915_READ16(DDRMPLL1);
589         csipll = I915_READ16(CSIPLL0);
590
591         switch (ddrpll & 0xff) {
592         case 0xc:
593                 dev_priv->mem_freq = 800;
594                 break;
595         case 0x10:
596                 dev_priv->mem_freq = 1066;
597                 break;
598         case 0x14:
599                 dev_priv->mem_freq = 1333;
600                 break;
601         case 0x18:
602                 dev_priv->mem_freq = 1600;
603                 break;
604         default:
605                 DRM_DEBUG("unknown memory frequency 0x%02x\n",
606                                  ddrpll & 0xff);
607                 dev_priv->mem_freq = 0;
608                 break;
609         }
610
611         dev_priv->r_t = dev_priv->mem_freq;
612
613         switch (csipll & 0x3ff) {
614         case 0x00c:
615                 dev_priv->fsb_freq = 3200;
616                 break;
617         case 0x00e:
618                 dev_priv->fsb_freq = 3733;
619                 break;
620         case 0x010:
621                 dev_priv->fsb_freq = 4266;
622                 break;
623         case 0x012:
624                 dev_priv->fsb_freq = 4800;
625                 break;
626         case 0x014:
627                 dev_priv->fsb_freq = 5333;
628                 break;
629         case 0x016:
630                 dev_priv->fsb_freq = 5866;
631                 break;
632         case 0x018:
633                 dev_priv->fsb_freq = 6400;
634                 break;
635         default:
636                 DRM_DEBUG("unknown fsb frequency 0x%04x\n",
637                                  csipll & 0x3ff);
638                 dev_priv->fsb_freq = 0;
639                 break;
640         }
641
642         if (dev_priv->fsb_freq == 3200) {
643                 dev_priv->c_m = 0;
644         } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
645                 dev_priv->c_m = 1;
646         } else {
647                 dev_priv->c_m = 2;
648         }
649 }
650
651 static const struct cxsr_latency cxsr_latency_table[] = {
652         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
653         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
654         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
655         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
656         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
657
658         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
659         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
660         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
661         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
662         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
663
664         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
665         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
666         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
667         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
668         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
669
670         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
671         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
672         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
673         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
674         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
675
676         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
677         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
678         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
679         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
680         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
681
682         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
683         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
684         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
685         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
686         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
687 };
688
689 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
690                                                          int is_ddr3,
691                                                          int fsb,
692                                                          int mem)
693 {
694         const struct cxsr_latency *latency;
695         int i;
696
697         if (fsb == 0 || mem == 0)
698                 return NULL;
699
700         for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
701                 latency = &cxsr_latency_table[i];
702                 if (is_desktop == latency->is_desktop &&
703                     is_ddr3 == latency->is_ddr3 &&
704                     fsb == latency->fsb_freq && mem == latency->mem_freq)
705                         return latency;
706         }
707
708         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
709
710         return NULL;
711 }
712
713 static void pineview_disable_cxsr(struct drm_device *dev)
714 {
715         struct drm_i915_private *dev_priv = dev->dev_private;
716
717         /* deactivate cxsr */
718         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
719 }
720
721 /*
722  * Latency for FIFO fetches is dependent on several factors:
723  *   - memory configuration (speed, channels)
724  *   - chipset
725  *   - current MCH state
726  * It can be fairly high in some situations, so here we assume a fairly
727  * pessimal value.  It's a tradeoff between extra memory fetches (if we
728  * set this value too high, the FIFO will fetch frequently to stay full)
729  * and power consumption (set it too low to save power and we might see
730  * FIFO underruns and display "flicker").
731  *
732  * A value of 5us seems to be a good balance; safe for very low end
733  * platforms but not overly aggressive on lower latency configs.
734  */
735 static const int latency_ns = 5000;
736
737 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
738 {
739         struct drm_i915_private *dev_priv = dev->dev_private;
740         uint32_t dsparb = I915_READ(DSPARB);
741         int size;
742
743         size = dsparb & 0x7f;
744         if (plane)
745                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
746
747         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
748                       plane ? "B" : "A", size);
749
750         return size;
751 }
752
753 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
754 {
755         struct drm_i915_private *dev_priv = dev->dev_private;
756         uint32_t dsparb = I915_READ(DSPARB);
757         int size;
758
759         size = dsparb & 0x1ff;
760         if (plane)
761                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
762         size >>= 1; /* Convert to cachelines */
763
764         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
765                       plane ? "B" : "A", size);
766
767         return size;
768 }
769
770 static int i845_get_fifo_size(struct drm_device *dev, int plane)
771 {
772         struct drm_i915_private *dev_priv = dev->dev_private;
773         uint32_t dsparb = I915_READ(DSPARB);
774         int size;
775
776         size = dsparb & 0x7f;
777         size >>= 2; /* Convert to cachelines */
778
779         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
780                       plane ? "B" : "A",
781                       size);
782
783         return size;
784 }
785
786 static int i830_get_fifo_size(struct drm_device *dev, int plane)
787 {
788         struct drm_i915_private *dev_priv = dev->dev_private;
789         uint32_t dsparb = I915_READ(DSPARB);
790         int size;
791
792         size = dsparb & 0x7f;
793         size >>= 1; /* Convert to cachelines */
794
795         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
796                       plane ? "B" : "A", size);
797
798         return size;
799 }
800
801 /* Pineview has different values for various configs */
802 static const struct intel_watermark_params pineview_display_wm = {
803         PINEVIEW_DISPLAY_FIFO,
804         PINEVIEW_MAX_WM,
805         PINEVIEW_DFT_WM,
806         PINEVIEW_GUARD_WM,
807         PINEVIEW_FIFO_LINE_SIZE
808 };
809 static const struct intel_watermark_params pineview_display_hplloff_wm = {
810         PINEVIEW_DISPLAY_FIFO,
811         PINEVIEW_MAX_WM,
812         PINEVIEW_DFT_HPLLOFF_WM,
813         PINEVIEW_GUARD_WM,
814         PINEVIEW_FIFO_LINE_SIZE
815 };
816 static const struct intel_watermark_params pineview_cursor_wm = {
817         PINEVIEW_CURSOR_FIFO,
818         PINEVIEW_CURSOR_MAX_WM,
819         PINEVIEW_CURSOR_DFT_WM,
820         PINEVIEW_CURSOR_GUARD_WM,
821         PINEVIEW_FIFO_LINE_SIZE,
822 };
823 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
824         PINEVIEW_CURSOR_FIFO,
825         PINEVIEW_CURSOR_MAX_WM,
826         PINEVIEW_CURSOR_DFT_WM,
827         PINEVIEW_CURSOR_GUARD_WM,
828         PINEVIEW_FIFO_LINE_SIZE
829 };
830 static const struct intel_watermark_params g4x_wm_info = {
831         G4X_FIFO_SIZE,
832         G4X_MAX_WM,
833         G4X_MAX_WM,
834         2,
835         G4X_FIFO_LINE_SIZE,
836 };
837 static const struct intel_watermark_params g4x_cursor_wm_info = {
838         I965_CURSOR_FIFO,
839         I965_CURSOR_MAX_WM,
840         I965_CURSOR_DFT_WM,
841         2,
842         G4X_FIFO_LINE_SIZE,
843 };
844 static const struct intel_watermark_params valleyview_wm_info = {
845         VALLEYVIEW_FIFO_SIZE,
846         VALLEYVIEW_MAX_WM,
847         VALLEYVIEW_MAX_WM,
848         2,
849         G4X_FIFO_LINE_SIZE,
850 };
851 static const struct intel_watermark_params valleyview_cursor_wm_info = {
852         I965_CURSOR_FIFO,
853         VALLEYVIEW_CURSOR_MAX_WM,
854         I965_CURSOR_DFT_WM,
855         2,
856         G4X_FIFO_LINE_SIZE,
857 };
858 static const struct intel_watermark_params i965_cursor_wm_info = {
859         I965_CURSOR_FIFO,
860         I965_CURSOR_MAX_WM,
861         I965_CURSOR_DFT_WM,
862         2,
863         I915_FIFO_LINE_SIZE,
864 };
865 static const struct intel_watermark_params i945_wm_info = {
866         I945_FIFO_SIZE,
867         I915_MAX_WM,
868         1,
869         2,
870         I915_FIFO_LINE_SIZE
871 };
872 static const struct intel_watermark_params i915_wm_info = {
873         I915_FIFO_SIZE,
874         I915_MAX_WM,
875         1,
876         2,
877         I915_FIFO_LINE_SIZE
878 };
879 static const struct intel_watermark_params i855_wm_info = {
880         I855GM_FIFO_SIZE,
881         I915_MAX_WM,
882         1,
883         2,
884         I830_FIFO_LINE_SIZE
885 };
886 static const struct intel_watermark_params i830_wm_info = {
887         I830_FIFO_SIZE,
888         I915_MAX_WM,
889         1,
890         2,
891         I830_FIFO_LINE_SIZE
892 };
893
894 static const struct intel_watermark_params ironlake_display_wm_info = {
895         ILK_DISPLAY_FIFO,
896         ILK_DISPLAY_MAXWM,
897         ILK_DISPLAY_DFTWM,
898         2,
899         ILK_FIFO_LINE_SIZE
900 };
901 static const struct intel_watermark_params ironlake_cursor_wm_info = {
902         ILK_CURSOR_FIFO,
903         ILK_CURSOR_MAXWM,
904         ILK_CURSOR_DFTWM,
905         2,
906         ILK_FIFO_LINE_SIZE
907 };
908 static const struct intel_watermark_params ironlake_display_srwm_info = {
909         ILK_DISPLAY_SR_FIFO,
910         ILK_DISPLAY_MAX_SRWM,
911         ILK_DISPLAY_DFT_SRWM,
912         2,
913         ILK_FIFO_LINE_SIZE
914 };
915 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
916         ILK_CURSOR_SR_FIFO,
917         ILK_CURSOR_MAX_SRWM,
918         ILK_CURSOR_DFT_SRWM,
919         2,
920         ILK_FIFO_LINE_SIZE
921 };
922
923 static const struct intel_watermark_params sandybridge_display_wm_info = {
924         SNB_DISPLAY_FIFO,
925         SNB_DISPLAY_MAXWM,
926         SNB_DISPLAY_DFTWM,
927         2,
928         SNB_FIFO_LINE_SIZE
929 };
930 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
931         SNB_CURSOR_FIFO,
932         SNB_CURSOR_MAXWM,
933         SNB_CURSOR_DFTWM,
934         2,
935         SNB_FIFO_LINE_SIZE
936 };
937 static const struct intel_watermark_params sandybridge_display_srwm_info = {
938         SNB_DISPLAY_SR_FIFO,
939         SNB_DISPLAY_MAX_SRWM,
940         SNB_DISPLAY_DFT_SRWM,
941         2,
942         SNB_FIFO_LINE_SIZE
943 };
944 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
945         SNB_CURSOR_SR_FIFO,
946         SNB_CURSOR_MAX_SRWM,
947         SNB_CURSOR_DFT_SRWM,
948         2,
949         SNB_FIFO_LINE_SIZE
950 };
951
952
953 /**
954  * intel_calculate_wm - calculate watermark level
955  * @clock_in_khz: pixel clock
956  * @wm: chip FIFO params
957  * @pixel_size: display pixel size
958  * @latency_ns: memory latency for the platform
959  *
960  * Calculate the watermark level (the level at which the display plane will
961  * start fetching from memory again).  Each chip has a different display
962  * FIFO size and allocation, so the caller needs to figure that out and pass
963  * in the correct intel_watermark_params structure.
964  *
965  * As the pixel clock runs, the FIFO will be drained at a rate that depends
966  * on the pixel size.  When it reaches the watermark level, it'll start
967  * fetching FIFO line sized based chunks from memory until the FIFO fills
968  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
969  * will occur, and a display engine hang could result.
970  */
971 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
972                                         const struct intel_watermark_params *wm,
973                                         int fifo_size,
974                                         int pixel_size,
975                                         unsigned long latency_ns)
976 {
977         long entries_required, wm_size;
978
979         /*
980          * Note: we need to make sure we don't overflow for various clock &
981          * latency values.
982          * clocks go from a few thousand to several hundred thousand.
983          * latency is usually a few thousand
984          */
985         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
986                 1000;
987         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
988
989         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
990
991         wm_size = fifo_size - (entries_required + wm->guard_size);
992
993         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
994
995         /* Don't promote wm_size to unsigned... */
996         if (wm_size > (long)wm->max_wm)
997                 wm_size = wm->max_wm;
998         if (wm_size <= 0)
999                 wm_size = wm->default_wm;
1000         return wm_size;
1001 }
1002
1003 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1004 {
1005         struct drm_crtc *crtc, *enabled = NULL;
1006
1007         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1008                 if (crtc->enabled && crtc->fb) {
1009                         if (enabled)
1010                                 return NULL;
1011                         enabled = crtc;
1012                 }
1013         }
1014
1015         return enabled;
1016 }
1017
1018 static void pineview_update_wm(struct drm_device *dev)
1019 {
1020         struct drm_i915_private *dev_priv = dev->dev_private;
1021         struct drm_crtc *crtc;
1022         const struct cxsr_latency *latency;
1023         u32 reg;
1024         unsigned long wm;
1025
1026         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1027                                          dev_priv->fsb_freq, dev_priv->mem_freq);
1028         if (!latency) {
1029                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1030                 pineview_disable_cxsr(dev);
1031                 return;
1032         }
1033
1034         crtc = single_enabled_crtc(dev);
1035         if (crtc) {
1036                 int clock = crtc->mode.clock;
1037                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1038
1039                 /* Display SR */
1040                 wm = intel_calculate_wm(clock, &pineview_display_wm,
1041                                         pineview_display_wm.fifo_size,
1042                                         pixel_size, latency->display_sr);
1043                 reg = I915_READ(DSPFW1);
1044                 reg &= ~DSPFW_SR_MASK;
1045                 reg |= wm << DSPFW_SR_SHIFT;
1046                 I915_WRITE(DSPFW1, reg);
1047                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1048
1049                 /* cursor SR */
1050                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1051                                         pineview_display_wm.fifo_size,
1052                                         pixel_size, latency->cursor_sr);
1053                 reg = I915_READ(DSPFW3);
1054                 reg &= ~DSPFW_CURSOR_SR_MASK;
1055                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1056                 I915_WRITE(DSPFW3, reg);
1057
1058                 /* Display HPLL off SR */
1059                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1060                                         pineview_display_hplloff_wm.fifo_size,
1061                                         pixel_size, latency->display_hpll_disable);
1062                 reg = I915_READ(DSPFW3);
1063                 reg &= ~DSPFW_HPLL_SR_MASK;
1064                 reg |= wm & DSPFW_HPLL_SR_MASK;
1065                 I915_WRITE(DSPFW3, reg);
1066
1067                 /* cursor HPLL off SR */
1068                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1069                                         pineview_display_hplloff_wm.fifo_size,
1070                                         pixel_size, latency->cursor_hpll_disable);
1071                 reg = I915_READ(DSPFW3);
1072                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1073                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1074                 I915_WRITE(DSPFW3, reg);
1075                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1076
1077                 /* activate cxsr */
1078                 I915_WRITE(DSPFW3,
1079                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1080                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1081         } else {
1082                 pineview_disable_cxsr(dev);
1083                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1084         }
1085 }
1086
1087 static bool g4x_compute_wm0(struct drm_device *dev,
1088                             int plane,
1089                             const struct intel_watermark_params *display,
1090                             int display_latency_ns,
1091                             const struct intel_watermark_params *cursor,
1092                             int cursor_latency_ns,
1093                             int *plane_wm,
1094                             int *cursor_wm)
1095 {
1096         struct drm_crtc *crtc;
1097         int htotal, hdisplay, clock, pixel_size;
1098         int line_time_us, line_count;
1099         int entries, tlb_miss;
1100
1101         crtc = intel_get_crtc_for_plane(dev, plane);
1102         if (crtc->fb == NULL || !crtc->enabled) {
1103                 *cursor_wm = cursor->guard_size;
1104                 *plane_wm = display->guard_size;
1105                 return false;
1106         }
1107
1108         htotal = crtc->mode.htotal;
1109         hdisplay = crtc->mode.hdisplay;
1110         clock = crtc->mode.clock;
1111         pixel_size = crtc->fb->bits_per_pixel / 8;
1112
1113         /* Use the small buffer method to calculate plane watermark */
1114         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1115         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1116         if (tlb_miss > 0)
1117                 entries += tlb_miss;
1118         entries = DIV_ROUND_UP(entries, display->cacheline_size);
1119         *plane_wm = entries + display->guard_size;
1120         if (*plane_wm > (int)display->max_wm)
1121                 *plane_wm = display->max_wm;
1122
1123         /* Use the large buffer method to calculate cursor watermark */
1124         line_time_us = ((htotal * 1000) / clock);
1125         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1126         entries = line_count * 64 * pixel_size;
1127         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1128         if (tlb_miss > 0)
1129                 entries += tlb_miss;
1130         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1131         *cursor_wm = entries + cursor->guard_size;
1132         if (*cursor_wm > (int)cursor->max_wm)
1133                 *cursor_wm = (int)cursor->max_wm;
1134
1135         return true;
1136 }
1137
1138 /*
1139  * Check the wm result.
1140  *
1141  * If any calculated watermark values is larger than the maximum value that
1142  * can be programmed into the associated watermark register, that watermark
1143  * must be disabled.
1144  */
1145 static bool g4x_check_srwm(struct drm_device *dev,
1146                            int display_wm, int cursor_wm,
1147                            const struct intel_watermark_params *display,
1148                            const struct intel_watermark_params *cursor)
1149 {
1150         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1151                       display_wm, cursor_wm);
1152
1153         if (display_wm > display->max_wm) {
1154                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1155                               display_wm, display->max_wm);
1156                 return false;
1157         }
1158
1159         if (cursor_wm > cursor->max_wm) {
1160                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1161                               cursor_wm, cursor->max_wm);
1162                 return false;
1163         }
1164
1165         if (!(display_wm || cursor_wm)) {
1166                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1167                 return false;
1168         }
1169
1170         return true;
1171 }
1172
1173 static bool g4x_compute_srwm(struct drm_device *dev,
1174                              int plane,
1175                              int latency_ns,
1176                              const struct intel_watermark_params *display,
1177                              const struct intel_watermark_params *cursor,
1178                              int *display_wm, int *cursor_wm)
1179 {
1180         struct drm_crtc *crtc;
1181         int hdisplay, htotal, pixel_size, clock;
1182         unsigned long line_time_us;
1183         int line_count, line_size;
1184         int small, large;
1185         int entries;
1186
1187         if (!latency_ns) {
1188                 *display_wm = *cursor_wm = 0;
1189                 return false;
1190         }
1191
1192         crtc = intel_get_crtc_for_plane(dev, plane);
1193         hdisplay = crtc->mode.hdisplay;
1194         htotal = crtc->mode.htotal;
1195         clock = crtc->mode.clock;
1196         pixel_size = crtc->fb->bits_per_pixel / 8;
1197
1198         line_time_us = (htotal * 1000) / clock;
1199         line_count = (latency_ns / line_time_us + 1000) / 1000;
1200         line_size = hdisplay * pixel_size;
1201
1202         /* Use the minimum of the small and large buffer method for primary */
1203         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1204         large = line_count * line_size;
1205
1206         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1207         *display_wm = entries + display->guard_size;
1208
1209         /* calculate the self-refresh watermark for display cursor */
1210         entries = line_count * pixel_size * 64;
1211         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1212         *cursor_wm = entries + cursor->guard_size;
1213
1214         return g4x_check_srwm(dev,
1215                               *display_wm, *cursor_wm,
1216                               display, cursor);
1217 }
1218
1219 static bool vlv_compute_drain_latency(struct drm_device *dev,
1220                                      int plane,
1221                                      int *plane_prec_mult,
1222                                      int *plane_dl,
1223                                      int *cursor_prec_mult,
1224                                      int *cursor_dl)
1225 {
1226         struct drm_crtc *crtc;
1227         int clock, pixel_size;
1228         int entries;
1229
1230         crtc = intel_get_crtc_for_plane(dev, plane);
1231         if (crtc->fb == NULL || !crtc->enabled)
1232                 return false;
1233
1234         clock = crtc->mode.clock;       /* VESA DOT Clock */
1235         pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
1236
1237         entries = (clock / 1000) * pixel_size;
1238         *plane_prec_mult = (entries > 256) ?
1239                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1240         *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1241                                                      pixel_size);
1242
1243         entries = (clock / 1000) * 4;   /* BPP is always 4 for cursor */
1244         *cursor_prec_mult = (entries > 256) ?
1245                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1246         *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1247
1248         return true;
1249 }
1250
1251 /*
1252  * Update drain latency registers of memory arbiter
1253  *
1254  * Valleyview SoC has a new memory arbiter and needs drain latency registers
1255  * to be programmed. Each plane has a drain latency multiplier and a drain
1256  * latency value.
1257  */
1258
1259 static void vlv_update_drain_latency(struct drm_device *dev)
1260 {
1261         struct drm_i915_private *dev_priv = dev->dev_private;
1262         int planea_prec, planea_dl, planeb_prec, planeb_dl;
1263         int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1264         int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1265                                                         either 16 or 32 */
1266
1267         /* For plane A, Cursor A */
1268         if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1269                                       &cursor_prec_mult, &cursora_dl)) {
1270                 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1271                         DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1272                 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1273                         DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1274
1275                 I915_WRITE(VLV_DDL1, cursora_prec |
1276                                 (cursora_dl << DDL_CURSORA_SHIFT) |
1277                                 planea_prec | planea_dl);
1278         }
1279
1280         /* For plane B, Cursor B */
1281         if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1282                                       &cursor_prec_mult, &cursorb_dl)) {
1283                 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1284                         DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1285                 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1286                         DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1287
1288                 I915_WRITE(VLV_DDL2, cursorb_prec |
1289                                 (cursorb_dl << DDL_CURSORB_SHIFT) |
1290                                 planeb_prec | planeb_dl);
1291         }
1292 }
1293
1294 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
1295
1296 static void valleyview_update_wm(struct drm_device *dev)
1297 {
1298         static const int sr_latency_ns = 12000;
1299         struct drm_i915_private *dev_priv = dev->dev_private;
1300         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1301         int plane_sr, cursor_sr;
1302         unsigned int enabled = 0;
1303
1304         vlv_update_drain_latency(dev);
1305
1306         if (g4x_compute_wm0(dev, 0,
1307                             &valleyview_wm_info, latency_ns,
1308                             &valleyview_cursor_wm_info, latency_ns,
1309                             &planea_wm, &cursora_wm))
1310                 enabled |= 1;
1311
1312         if (g4x_compute_wm0(dev, 1,
1313                             &valleyview_wm_info, latency_ns,
1314                             &valleyview_cursor_wm_info, latency_ns,
1315                             &planeb_wm, &cursorb_wm))
1316                 enabled |= 2;
1317
1318         plane_sr = cursor_sr = 0;
1319         if (single_plane_enabled(enabled) &&
1320             g4x_compute_srwm(dev, ffs(enabled) - 1,
1321                              sr_latency_ns,
1322                              &valleyview_wm_info,
1323                              &valleyview_cursor_wm_info,
1324                              &plane_sr, &cursor_sr))
1325                 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1326         else
1327                 I915_WRITE(FW_BLC_SELF_VLV,
1328                            I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1329
1330         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1331                       planea_wm, cursora_wm,
1332                       planeb_wm, cursorb_wm,
1333                       plane_sr, cursor_sr);
1334
1335         I915_WRITE(DSPFW1,
1336                    (plane_sr << DSPFW_SR_SHIFT) |
1337                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1338                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1339                    planea_wm);
1340         I915_WRITE(DSPFW2,
1341                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1342                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1343         I915_WRITE(DSPFW3,
1344                    (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1345 }
1346
1347 static void g4x_update_wm(struct drm_device *dev)
1348 {
1349         static const int sr_latency_ns = 12000;
1350         struct drm_i915_private *dev_priv = dev->dev_private;
1351         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1352         int plane_sr, cursor_sr;
1353         unsigned int enabled = 0;
1354
1355         if (g4x_compute_wm0(dev, 0,
1356                             &g4x_wm_info, latency_ns,
1357                             &g4x_cursor_wm_info, latency_ns,
1358                             &planea_wm, &cursora_wm))
1359                 enabled |= 1;
1360
1361         if (g4x_compute_wm0(dev, 1,
1362                             &g4x_wm_info, latency_ns,
1363                             &g4x_cursor_wm_info, latency_ns,
1364                             &planeb_wm, &cursorb_wm))
1365                 enabled |= 2;
1366
1367         plane_sr = cursor_sr = 0;
1368         if (single_plane_enabled(enabled) &&
1369             g4x_compute_srwm(dev, ffs(enabled) - 1,
1370                              sr_latency_ns,
1371                              &g4x_wm_info,
1372                              &g4x_cursor_wm_info,
1373                              &plane_sr, &cursor_sr))
1374                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1375         else
1376                 I915_WRITE(FW_BLC_SELF,
1377                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1378
1379         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1380                       planea_wm, cursora_wm,
1381                       planeb_wm, cursorb_wm,
1382                       plane_sr, cursor_sr);
1383
1384         I915_WRITE(DSPFW1,
1385                    (plane_sr << DSPFW_SR_SHIFT) |
1386                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1387                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1388                    planea_wm);
1389         I915_WRITE(DSPFW2,
1390                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1391                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1392         /* HPLL off in SR has some issues on G4x... disable it */
1393         I915_WRITE(DSPFW3,
1394                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1395                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1396 }
1397
1398 static void i965_update_wm(struct drm_device *dev)
1399 {
1400         struct drm_i915_private *dev_priv = dev->dev_private;
1401         struct drm_crtc *crtc;
1402         int srwm = 1;
1403         int cursor_sr = 16;
1404
1405         /* Calc sr entries for one plane configs */
1406         crtc = single_enabled_crtc(dev);
1407         if (crtc) {
1408                 /* self-refresh has much higher latency */
1409                 static const int sr_latency_ns = 12000;
1410                 int clock = crtc->mode.clock;
1411                 int htotal = crtc->mode.htotal;
1412                 int hdisplay = crtc->mode.hdisplay;
1413                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1414                 unsigned long line_time_us;
1415                 int entries;
1416
1417                 line_time_us = ((htotal * 1000) / clock);
1418
1419                 /* Use ns/us then divide to preserve precision */
1420                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1421                         pixel_size * hdisplay;
1422                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1423                 srwm = I965_FIFO_SIZE - entries;
1424                 if (srwm < 0)
1425                         srwm = 1;
1426                 srwm &= 0x1ff;
1427                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1428                               entries, srwm);
1429
1430                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1431                         pixel_size * 64;
1432                 entries = DIV_ROUND_UP(entries,
1433                                           i965_cursor_wm_info.cacheline_size);
1434                 cursor_sr = i965_cursor_wm_info.fifo_size -
1435                         (entries + i965_cursor_wm_info.guard_size);
1436
1437                 if (cursor_sr > i965_cursor_wm_info.max_wm)
1438                         cursor_sr = i965_cursor_wm_info.max_wm;
1439
1440                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1441                               "cursor %d\n", srwm, cursor_sr);
1442
1443                 if (IS_CRESTLINE(dev))
1444                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1445         } else {
1446                 /* Turn off self refresh if both pipes are enabled */
1447                 if (IS_CRESTLINE(dev))
1448                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1449                                    & ~FW_BLC_SELF_EN);
1450         }
1451
1452         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1453                       srwm);
1454
1455         /* 965 has limitations... */
1456         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1457                    (8 << 16) | (8 << 8) | (8 << 0));
1458         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1459         /* update cursor SR watermark */
1460         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1461 }
1462
1463 static void i9xx_update_wm(struct drm_device *dev)
1464 {
1465         struct drm_i915_private *dev_priv = dev->dev_private;
1466         const struct intel_watermark_params *wm_info;
1467         uint32_t fwater_lo;
1468         uint32_t fwater_hi;
1469         int cwm, srwm = 1;
1470         int fifo_size;
1471         int planea_wm, planeb_wm;
1472         struct drm_crtc *crtc, *enabled = NULL;
1473
1474         if (IS_I945GM(dev))
1475                 wm_info = &i945_wm_info;
1476         else if (!IS_GEN2(dev))
1477                 wm_info = &i915_wm_info;
1478         else
1479                 wm_info = &i855_wm_info;
1480
1481         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1482         crtc = intel_get_crtc_for_plane(dev, 0);
1483         if (crtc->enabled && crtc->fb) {
1484                 planea_wm = intel_calculate_wm(crtc->mode.clock,
1485                                                wm_info, fifo_size,
1486                                                crtc->fb->bits_per_pixel / 8,
1487                                                latency_ns);
1488                 enabled = crtc;
1489         } else
1490                 planea_wm = fifo_size - wm_info->guard_size;
1491
1492         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1493         crtc = intel_get_crtc_for_plane(dev, 1);
1494         if (crtc->enabled && crtc->fb) {
1495                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1496                                                wm_info, fifo_size,
1497                                                crtc->fb->bits_per_pixel / 8,
1498                                                latency_ns);
1499                 if (enabled == NULL)
1500                         enabled = crtc;
1501                 else
1502                         enabled = NULL;
1503         } else
1504                 planeb_wm = fifo_size - wm_info->guard_size;
1505
1506         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1507
1508         /*
1509          * Overlay gets an aggressive default since video jitter is bad.
1510          */
1511         cwm = 2;
1512
1513         /* Play safe and disable self-refresh before adjusting watermarks. */
1514         if (IS_I945G(dev) || IS_I945GM(dev))
1515                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1516         else if (IS_I915GM(dev))
1517                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1518
1519         /* Calc sr entries for one plane configs */
1520         if (HAS_FW_BLC(dev) && enabled) {
1521                 /* self-refresh has much higher latency */
1522                 static const int sr_latency_ns = 6000;
1523                 int clock = enabled->mode.clock;
1524                 int htotal = enabled->mode.htotal;
1525                 int hdisplay = enabled->mode.hdisplay;
1526                 int pixel_size = enabled->fb->bits_per_pixel / 8;
1527                 unsigned long line_time_us;
1528                 int entries;
1529
1530                 line_time_us = (htotal * 1000) / clock;
1531
1532                 /* Use ns/us then divide to preserve precision */
1533                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1534                         pixel_size * hdisplay;
1535                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1536                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1537                 srwm = wm_info->fifo_size - entries;
1538                 if (srwm < 0)
1539                         srwm = 1;
1540
1541                 if (IS_I945G(dev) || IS_I945GM(dev))
1542                         I915_WRITE(FW_BLC_SELF,
1543                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1544                 else if (IS_I915GM(dev))
1545                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1546         }
1547
1548         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1549                       planea_wm, planeb_wm, cwm, srwm);
1550
1551         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1552         fwater_hi = (cwm & 0x1f);
1553
1554         /* Set request length to 8 cachelines per fetch */
1555         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1556         fwater_hi = fwater_hi | (1 << 8);
1557
1558         I915_WRITE(FW_BLC, fwater_lo);
1559         I915_WRITE(FW_BLC2, fwater_hi);
1560
1561         if (HAS_FW_BLC(dev)) {
1562                 if (enabled) {
1563                         if (IS_I945G(dev) || IS_I945GM(dev))
1564                                 I915_WRITE(FW_BLC_SELF,
1565                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1566                         else if (IS_I915GM(dev))
1567                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1568                         DRM_DEBUG_KMS("memory self refresh enabled\n");
1569                 } else
1570                         DRM_DEBUG_KMS("memory self refresh disabled\n");
1571         }
1572 }
1573
1574 static void i830_update_wm(struct drm_device *dev)
1575 {
1576         struct drm_i915_private *dev_priv = dev->dev_private;
1577         struct drm_crtc *crtc;
1578         uint32_t fwater_lo;
1579         int planea_wm;
1580
1581         crtc = single_enabled_crtc(dev);
1582         if (crtc == NULL)
1583                 return;
1584
1585         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1586                                        dev_priv->display.get_fifo_size(dev, 0),
1587                                        crtc->fb->bits_per_pixel / 8,
1588                                        latency_ns);
1589         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1590         fwater_lo |= (3<<8) | planea_wm;
1591
1592         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1593
1594         I915_WRITE(FW_BLC, fwater_lo);
1595 }
1596
1597 #define ILK_LP0_PLANE_LATENCY           700
1598 #define ILK_LP0_CURSOR_LATENCY          1300
1599
1600 /*
1601  * Check the wm result.
1602  *
1603  * If any calculated watermark values is larger than the maximum value that
1604  * can be programmed into the associated watermark register, that watermark
1605  * must be disabled.
1606  */
1607 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1608                                 int fbc_wm, int display_wm, int cursor_wm,
1609                                 const struct intel_watermark_params *display,
1610                                 const struct intel_watermark_params *cursor)
1611 {
1612         struct drm_i915_private *dev_priv = dev->dev_private;
1613
1614         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1615                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1616
1617         if (fbc_wm > SNB_FBC_MAX_SRWM) {
1618                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1619                               fbc_wm, SNB_FBC_MAX_SRWM, level);
1620
1621                 /* fbc has it's own way to disable FBC WM */
1622                 I915_WRITE(DISP_ARB_CTL,
1623                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1624                 return false;
1625         }
1626
1627         if (display_wm > display->max_wm) {
1628                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1629                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
1630                 return false;
1631         }
1632
1633         if (cursor_wm > cursor->max_wm) {
1634                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1635                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1636                 return false;
1637         }
1638
1639         if (!(fbc_wm || display_wm || cursor_wm)) {
1640                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1641                 return false;
1642         }
1643
1644         return true;
1645 }
1646
1647 /*
1648  * Compute watermark values of WM[1-3],
1649  */
1650 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1651                                   int latency_ns,
1652                                   const struct intel_watermark_params *display,
1653                                   const struct intel_watermark_params *cursor,
1654                                   int *fbc_wm, int *display_wm, int *cursor_wm)
1655 {
1656         struct drm_crtc *crtc;
1657         unsigned long line_time_us;
1658         int hdisplay, htotal, pixel_size, clock;
1659         int line_count, line_size;
1660         int small, large;
1661         int entries;
1662
1663         if (!latency_ns) {
1664                 *fbc_wm = *display_wm = *cursor_wm = 0;
1665                 return false;
1666         }
1667
1668         crtc = intel_get_crtc_for_plane(dev, plane);
1669         hdisplay = crtc->mode.hdisplay;
1670         htotal = crtc->mode.htotal;
1671         clock = crtc->mode.clock;
1672         pixel_size = crtc->fb->bits_per_pixel / 8;
1673
1674         line_time_us = (htotal * 1000) / clock;
1675         line_count = (latency_ns / line_time_us + 1000) / 1000;
1676         line_size = hdisplay * pixel_size;
1677
1678         /* Use the minimum of the small and large buffer method for primary */
1679         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1680         large = line_count * line_size;
1681
1682         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1683         *display_wm = entries + display->guard_size;
1684
1685         /*
1686          * Spec says:
1687          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1688          */
1689         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1690
1691         /* calculate the self-refresh watermark for display cursor */
1692         entries = line_count * pixel_size * 64;
1693         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1694         *cursor_wm = entries + cursor->guard_size;
1695
1696         return ironlake_check_srwm(dev, level,
1697                                    *fbc_wm, *display_wm, *cursor_wm,
1698                                    display, cursor);
1699 }
1700
1701 static void ironlake_update_wm(struct drm_device *dev)
1702 {
1703         struct drm_i915_private *dev_priv = dev->dev_private;
1704         int fbc_wm, plane_wm, cursor_wm;
1705         unsigned int enabled;
1706
1707         enabled = 0;
1708         if (g4x_compute_wm0(dev, 0,
1709                             &ironlake_display_wm_info,
1710                             ILK_LP0_PLANE_LATENCY,
1711                             &ironlake_cursor_wm_info,
1712                             ILK_LP0_CURSOR_LATENCY,
1713                             &plane_wm, &cursor_wm)) {
1714                 I915_WRITE(WM0_PIPEA_ILK,
1715                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1716                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1717                               " plane %d, " "cursor: %d\n",
1718                               plane_wm, cursor_wm);
1719                 enabled |= 1;
1720         }
1721
1722         if (g4x_compute_wm0(dev, 1,
1723                             &ironlake_display_wm_info,
1724                             ILK_LP0_PLANE_LATENCY,
1725                             &ironlake_cursor_wm_info,
1726                             ILK_LP0_CURSOR_LATENCY,
1727                             &plane_wm, &cursor_wm)) {
1728                 I915_WRITE(WM0_PIPEB_ILK,
1729                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1730                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1731                               " plane %d, cursor: %d\n",
1732                               plane_wm, cursor_wm);
1733                 enabled |= 2;
1734         }
1735
1736         /*
1737          * Calculate and update the self-refresh watermark only when one
1738          * display plane is used.
1739          */
1740         I915_WRITE(WM3_LP_ILK, 0);
1741         I915_WRITE(WM2_LP_ILK, 0);
1742         I915_WRITE(WM1_LP_ILK, 0);
1743
1744         if (!single_plane_enabled(enabled))
1745                 return;
1746         enabled = ffs(enabled) - 1;
1747
1748         /* WM1 */
1749         if (!ironlake_compute_srwm(dev, 1, enabled,
1750                                    ILK_READ_WM1_LATENCY() * 500,
1751                                    &ironlake_display_srwm_info,
1752                                    &ironlake_cursor_srwm_info,
1753                                    &fbc_wm, &plane_wm, &cursor_wm))
1754                 return;
1755
1756         I915_WRITE(WM1_LP_ILK,
1757                    WM1_LP_SR_EN |
1758                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1759                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1760                    (plane_wm << WM1_LP_SR_SHIFT) |
1761                    cursor_wm);
1762
1763         /* WM2 */
1764         if (!ironlake_compute_srwm(dev, 2, enabled,
1765                                    ILK_READ_WM2_LATENCY() * 500,
1766                                    &ironlake_display_srwm_info,
1767                                    &ironlake_cursor_srwm_info,
1768                                    &fbc_wm, &plane_wm, &cursor_wm))
1769                 return;
1770
1771         I915_WRITE(WM2_LP_ILK,
1772                    WM2_LP_EN |
1773                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1774                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1775                    (plane_wm << WM1_LP_SR_SHIFT) |
1776                    cursor_wm);
1777
1778         /*
1779          * WM3 is unsupported on ILK, probably because we don't have latency
1780          * data for that power state
1781          */
1782 }
1783
1784 static void sandybridge_update_wm(struct drm_device *dev)
1785 {
1786         struct drm_i915_private *dev_priv = dev->dev_private;
1787         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
1788         u32 val;
1789         int fbc_wm, plane_wm, cursor_wm;
1790         unsigned int enabled;
1791
1792         enabled = 0;
1793         if (g4x_compute_wm0(dev, 0,
1794                             &sandybridge_display_wm_info, latency,
1795                             &sandybridge_cursor_wm_info, latency,
1796                             &plane_wm, &cursor_wm)) {
1797                 val = I915_READ(WM0_PIPEA_ILK);
1798                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1799                 I915_WRITE(WM0_PIPEA_ILK, val |
1800                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1801                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1802                               " plane %d, " "cursor: %d\n",
1803                               plane_wm, cursor_wm);
1804                 enabled |= 1;
1805         }
1806
1807         if (g4x_compute_wm0(dev, 1,
1808                             &sandybridge_display_wm_info, latency,
1809                             &sandybridge_cursor_wm_info, latency,
1810                             &plane_wm, &cursor_wm)) {
1811                 val = I915_READ(WM0_PIPEB_ILK);
1812                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1813                 I915_WRITE(WM0_PIPEB_ILK, val |
1814                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1815                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1816                               " plane %d, cursor: %d\n",
1817                               plane_wm, cursor_wm);
1818                 enabled |= 2;
1819         }
1820
1821         if ((dev_priv->num_pipe == 3) &&
1822             g4x_compute_wm0(dev, 2,
1823                             &sandybridge_display_wm_info, latency,
1824                             &sandybridge_cursor_wm_info, latency,
1825                             &plane_wm, &cursor_wm)) {
1826                 val = I915_READ(WM0_PIPEC_IVB);
1827                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1828                 I915_WRITE(WM0_PIPEC_IVB, val |
1829                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1830                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1831                               " plane %d, cursor: %d\n",
1832                               plane_wm, cursor_wm);
1833                 enabled |= 3;
1834         }
1835
1836         /*
1837          * Calculate and update the self-refresh watermark only when one
1838          * display plane is used.
1839          *
1840          * SNB support 3 levels of watermark.
1841          *
1842          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1843          * and disabled in the descending order
1844          *
1845          */
1846         I915_WRITE(WM3_LP_ILK, 0);
1847         I915_WRITE(WM2_LP_ILK, 0);
1848         I915_WRITE(WM1_LP_ILK, 0);
1849
1850         if (!single_plane_enabled(enabled) ||
1851             dev_priv->sprite_scaling_enabled)
1852                 return;
1853         enabled = ffs(enabled) - 1;
1854
1855         /* WM1 */
1856         if (!ironlake_compute_srwm(dev, 1, enabled,
1857                                    SNB_READ_WM1_LATENCY() * 500,
1858                                    &sandybridge_display_srwm_info,
1859                                    &sandybridge_cursor_srwm_info,
1860                                    &fbc_wm, &plane_wm, &cursor_wm))
1861                 return;
1862
1863         I915_WRITE(WM1_LP_ILK,
1864                    WM1_LP_SR_EN |
1865                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1866                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1867                    (plane_wm << WM1_LP_SR_SHIFT) |
1868                    cursor_wm);
1869
1870         /* WM2 */
1871         if (!ironlake_compute_srwm(dev, 2, enabled,
1872                                    SNB_READ_WM2_LATENCY() * 500,
1873                                    &sandybridge_display_srwm_info,
1874                                    &sandybridge_cursor_srwm_info,
1875                                    &fbc_wm, &plane_wm, &cursor_wm))
1876                 return;
1877
1878         I915_WRITE(WM2_LP_ILK,
1879                    WM2_LP_EN |
1880                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1881                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1882                    (plane_wm << WM1_LP_SR_SHIFT) |
1883                    cursor_wm);
1884
1885         /* WM3 */
1886         if (!ironlake_compute_srwm(dev, 3, enabled,
1887                                    SNB_READ_WM3_LATENCY() * 500,
1888                                    &sandybridge_display_srwm_info,
1889                                    &sandybridge_cursor_srwm_info,
1890                                    &fbc_wm, &plane_wm, &cursor_wm))
1891                 return;
1892
1893         I915_WRITE(WM3_LP_ILK,
1894                    WM3_LP_EN |
1895                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1896                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1897                    (plane_wm << WM1_LP_SR_SHIFT) |
1898                    cursor_wm);
1899 }
1900
1901 static void
1902 haswell_update_linetime_wm(struct drm_device *dev, int pipe,
1903                                  struct drm_display_mode *mode)
1904 {
1905         struct drm_i915_private *dev_priv = dev->dev_private;
1906         u32 temp;
1907
1908         temp = I915_READ(PIPE_WM_LINETIME(pipe));
1909         temp &= ~PIPE_WM_LINETIME_MASK;
1910
1911         /* The WM are computed with base on how long it takes to fill a single
1912          * row at the given clock rate, multiplied by 8.
1913          * */
1914         temp |= PIPE_WM_LINETIME_TIME(
1915                 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
1916
1917         /* IPS watermarks are only used by pipe A, and are ignored by
1918          * pipes B and C.  They are calculated similarly to the common
1919          * linetime values, except that we are using CD clock frequency
1920          * in MHz instead of pixel rate for the division.
1921          *
1922          * This is a placeholder for the IPS watermark calculation code.
1923          */
1924
1925         I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
1926 }
1927
1928 static bool
1929 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1930                               uint32_t sprite_width, int pixel_size,
1931                               const struct intel_watermark_params *display,
1932                               int display_latency_ns, int *sprite_wm)
1933 {
1934         struct drm_crtc *crtc;
1935         int clock;
1936         int entries, tlb_miss;
1937
1938         crtc = intel_get_crtc_for_plane(dev, plane);
1939         if (crtc->fb == NULL || !crtc->enabled) {
1940                 *sprite_wm = display->guard_size;
1941                 return false;
1942         }
1943
1944         clock = crtc->mode.clock;
1945
1946         /* Use the small buffer method to calculate the sprite watermark */
1947         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1948         tlb_miss = display->fifo_size*display->cacheline_size -
1949                 sprite_width * 8;
1950         if (tlb_miss > 0)
1951                 entries += tlb_miss;
1952         entries = DIV_ROUND_UP(entries, display->cacheline_size);
1953         *sprite_wm = entries + display->guard_size;
1954         if (*sprite_wm > (int)display->max_wm)
1955                 *sprite_wm = display->max_wm;
1956
1957         return true;
1958 }
1959
1960 static bool
1961 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1962                                 uint32_t sprite_width, int pixel_size,
1963                                 const struct intel_watermark_params *display,
1964                                 int latency_ns, int *sprite_wm)
1965 {
1966         struct drm_crtc *crtc;
1967         unsigned long line_time_us;
1968         int clock;
1969         int line_count, line_size;
1970         int small, large;
1971         int entries;
1972
1973         if (!latency_ns) {
1974                 *sprite_wm = 0;
1975                 return false;
1976         }
1977
1978         crtc = intel_get_crtc_for_plane(dev, plane);
1979         clock = crtc->mode.clock;
1980         if (!clock) {
1981                 *sprite_wm = 0;
1982                 return false;
1983         }
1984
1985         line_time_us = (sprite_width * 1000) / clock;
1986         if (!line_time_us) {
1987                 *sprite_wm = 0;
1988                 return false;
1989         }
1990
1991         line_count = (latency_ns / line_time_us + 1000) / 1000;
1992         line_size = sprite_width * pixel_size;
1993
1994         /* Use the minimum of the small and large buffer method for primary */
1995         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1996         large = line_count * line_size;
1997
1998         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1999         *sprite_wm = entries + display->guard_size;
2000
2001         return *sprite_wm > 0x3ff ? false : true;
2002 }
2003
2004 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2005                                          uint32_t sprite_width, int pixel_size)
2006 {
2007         struct drm_i915_private *dev_priv = dev->dev_private;
2008         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
2009         u32 val;
2010         int sprite_wm, reg;
2011         int ret;
2012
2013         switch (pipe) {
2014         case 0:
2015                 reg = WM0_PIPEA_ILK;
2016                 break;
2017         case 1:
2018                 reg = WM0_PIPEB_ILK;
2019                 break;
2020         case 2:
2021                 reg = WM0_PIPEC_IVB;
2022                 break;
2023         default:
2024                 return; /* bad pipe */
2025         }
2026
2027         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2028                                             &sandybridge_display_wm_info,
2029                                             latency, &sprite_wm);
2030         if (!ret) {
2031                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2032                               pipe);
2033                 return;
2034         }
2035
2036         val = I915_READ(reg);
2037         val &= ~WM0_PIPE_SPRITE_MASK;
2038         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2039         DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
2040
2041
2042         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2043                                               pixel_size,
2044                                               &sandybridge_display_srwm_info,
2045                                               SNB_READ_WM1_LATENCY() * 500,
2046                                               &sprite_wm);
2047         if (!ret) {
2048                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2049                               pipe);
2050                 return;
2051         }
2052         I915_WRITE(WM1S_LP_ILK, sprite_wm);
2053
2054         /* Only IVB has two more LP watermarks for sprite */
2055         if (!IS_IVYBRIDGE(dev))
2056                 return;
2057
2058         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2059                                               pixel_size,
2060                                               &sandybridge_display_srwm_info,
2061                                               SNB_READ_WM2_LATENCY() * 500,
2062                                               &sprite_wm);
2063         if (!ret) {
2064                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2065                               pipe);
2066                 return;
2067         }
2068         I915_WRITE(WM2S_LP_IVB, sprite_wm);
2069
2070         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2071                                               pixel_size,
2072                                               &sandybridge_display_srwm_info,
2073                                               SNB_READ_WM3_LATENCY() * 500,
2074                                               &sprite_wm);
2075         if (!ret) {
2076                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2077                               pipe);
2078                 return;
2079         }
2080         I915_WRITE(WM3S_LP_IVB, sprite_wm);
2081 }
2082
2083 /**
2084  * intel_update_watermarks - update FIFO watermark values based on current modes
2085  *
2086  * Calculate watermark values for the various WM regs based on current mode
2087  * and plane configuration.
2088  *
2089  * There are several cases to deal with here:
2090  *   - normal (i.e. non-self-refresh)
2091  *   - self-refresh (SR) mode
2092  *   - lines are large relative to FIFO size (buffer can hold up to 2)
2093  *   - lines are small relative to FIFO size (buffer can hold more than 2
2094  *     lines), so need to account for TLB latency
2095  *
2096  *   The normal calculation is:
2097  *     watermark = dotclock * bytes per pixel * latency
2098  *   where latency is platform & configuration dependent (we assume pessimal
2099  *   values here).
2100  *
2101  *   The SR calculation is:
2102  *     watermark = (trunc(latency/line time)+1) * surface width *
2103  *       bytes per pixel
2104  *   where
2105  *     line time = htotal / dotclock
2106  *     surface width = hdisplay for normal plane and 64 for cursor
2107  *   and latency is assumed to be high, as above.
2108  *
2109  * The final value programmed to the register should always be rounded up,
2110  * and include an extra 2 entries to account for clock crossings.
2111  *
2112  * We don't use the sprite, so we can ignore that.  And on Crestline we have
2113  * to set the non-SR watermarks to 8.
2114  */
2115 void intel_update_watermarks(struct drm_device *dev)
2116 {
2117         struct drm_i915_private *dev_priv = dev->dev_private;
2118
2119         if (dev_priv->display.update_wm)
2120                 dev_priv->display.update_wm(dev);
2121 }
2122
2123 void intel_update_linetime_watermarks(struct drm_device *dev,
2124                 int pipe, struct drm_display_mode *mode)
2125 {
2126         struct drm_i915_private *dev_priv = dev->dev_private;
2127
2128         if (dev_priv->display.update_linetime_wm)
2129                 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2130 }
2131
2132 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2133                                     uint32_t sprite_width, int pixel_size)
2134 {
2135         struct drm_i915_private *dev_priv = dev->dev_private;
2136
2137         if (dev_priv->display.update_sprite_wm)
2138                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2139                                                    pixel_size);
2140 }
2141
2142 static struct drm_i915_gem_object *
2143 intel_alloc_context_page(struct drm_device *dev)
2144 {
2145         struct drm_i915_gem_object *ctx;
2146         int ret;
2147
2148         DRM_LOCK_ASSERT(dev);
2149
2150         ctx = i915_gem_alloc_object(dev, 4096);
2151         if (!ctx) {
2152                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2153                 return NULL;
2154         }
2155
2156         ret = i915_gem_object_pin(ctx, 4096, true);
2157         if (ret) {
2158                 DRM_ERROR("failed to pin power context: %d\n", ret);
2159                 goto err_unref;
2160         }
2161
2162         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2163         if (ret) {
2164                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2165                 goto err_unpin;
2166         }
2167
2168         return ctx;
2169
2170 err_unpin:
2171         i915_gem_object_unpin(ctx);
2172 err_unref:
2173         drm_gem_object_unreference(&ctx->base);
2174         DRM_UNLOCK(dev);
2175         return NULL;
2176 }
2177
2178 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2179 {
2180         struct drm_i915_private *dev_priv = dev->dev_private;
2181         u16 rgvswctl;
2182
2183         rgvswctl = I915_READ16(MEMSWCTL);
2184         if (rgvswctl & MEMCTL_CMD_STS) {
2185                 DRM_DEBUG("gpu busy, RCS change rejected\n");
2186                 return false; /* still busy with another command */
2187         }
2188
2189         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2190                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2191         I915_WRITE16(MEMSWCTL, rgvswctl);
2192         POSTING_READ16(MEMSWCTL);
2193
2194         rgvswctl |= MEMCTL_CMD_STS;
2195         I915_WRITE16(MEMSWCTL, rgvswctl);
2196
2197         return true;
2198 }
2199
2200 void ironlake_enable_drps(struct drm_device *dev)
2201 {
2202         struct drm_i915_private *dev_priv = dev->dev_private;
2203         u32 rgvmodectl = I915_READ(MEMMODECTL);
2204         u8 fmax, fmin, fstart, vstart;
2205
2206         /* Enable temp reporting */
2207         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2208         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2209
2210         /* 100ms RC evaluation intervals */
2211         I915_WRITE(RCUPEI, 100000);
2212         I915_WRITE(RCDNEI, 100000);
2213
2214         /* Set max/min thresholds to 90ms and 80ms respectively */
2215         I915_WRITE(RCBMAXAVG, 90000);
2216         I915_WRITE(RCBMINAVG, 80000);
2217
2218         I915_WRITE(MEMIHYST, 1);
2219
2220         /* Set up min, max, and cur for interrupt handling */
2221         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2222         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2223         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2224                 MEMMODE_FSTART_SHIFT;
2225
2226         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2227                 PXVFREQ_PX_SHIFT;
2228
2229         dev_priv->fmax = fmax; /* IPS callback will increase this */
2230         dev_priv->fstart = fstart;
2231
2232         dev_priv->max_delay = fstart;
2233         dev_priv->min_delay = fmin;
2234         dev_priv->cur_delay = fstart;
2235
2236         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2237                          fmax, fmin, fstart);
2238
2239         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2240
2241         /*
2242          * Interrupts will be enabled in ironlake_irq_postinstall
2243          */
2244
2245         I915_WRITE(VIDSTART, vstart);
2246         POSTING_READ(VIDSTART);
2247
2248         rgvmodectl |= MEMMODE_SWMODE_EN;
2249         I915_WRITE(MEMMODECTL, rgvmodectl);
2250
2251         if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2252                 DRM_ERROR("stuck trying to change perf mode\n");
2253         pause("915dsp", 1);
2254
2255         ironlake_set_drps(dev, fstart);
2256
2257         dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2258                 I915_READ(0x112e0);
2259         dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2260         dev_priv->last_count2 = I915_READ(0x112f4);
2261         nanotime(&dev_priv->last_time2);
2262 }
2263
2264 void ironlake_disable_drps(struct drm_device *dev)
2265 {
2266         struct drm_i915_private *dev_priv = dev->dev_private;
2267         u16 rgvswctl = I915_READ16(MEMSWCTL);
2268
2269         /* Ack interrupts, disable EFC interrupt */
2270         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2271         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2272         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2273         I915_WRITE(DEIIR, DE_PCU_EVENT);
2274         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2275
2276         /* Go back to the starting frequency */
2277         ironlake_set_drps(dev, dev_priv->fstart);
2278         pause("915dsp", 1);
2279         rgvswctl |= MEMCTL_CMD_STS;
2280         I915_WRITE(MEMSWCTL, rgvswctl);
2281         pause("915dsp", 1);
2282
2283 }
2284
2285 void gen6_set_rps(struct drm_device *dev, u8 val)
2286 {
2287         struct drm_i915_private *dev_priv = dev->dev_private;
2288         u32 swreq;
2289
2290         swreq = (val & 0x3ff) << 25;
2291         I915_WRITE(GEN6_RPNSWREQ, swreq);
2292 }
2293
2294 void gen6_disable_rps(struct drm_device *dev)
2295 {
2296         struct drm_i915_private *dev_priv = dev->dev_private;
2297
2298         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2299         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2300         I915_WRITE(GEN6_PMIER, 0);
2301         /* Complete PM interrupt masking here doesn't race with the rps work
2302          * item again unmasking PM interrupts because that is using a different
2303          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2304          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2305
2306         mtx_lock(&dev_priv->rps_lock);
2307         dev_priv->pm_iir = 0;
2308         mtx_unlock(&dev_priv->rps_lock);
2309
2310         I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2311 }
2312
2313 int intel_enable_rc6(const struct drm_device *dev)
2314 {
2315         /*
2316          * Respect the kernel parameter if it is set
2317          */
2318         if (i915_enable_rc6 >= 0)
2319                 return i915_enable_rc6;
2320
2321         /*
2322          * Disable RC6 on Ironlake
2323          */
2324         if (INTEL_INFO(dev)->gen == 5)
2325                 return 0;
2326
2327         /* Sorry Haswell, no RC6 for you for now. */
2328         if (IS_HASWELL(dev))
2329                 return 0;
2330
2331         /*
2332          * Disable rc6 on Sandybridge
2333          */
2334         if (INTEL_INFO(dev)->gen == 6) {
2335                 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2336                 return INTEL_RC6_ENABLE;
2337         }
2338         DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2339         return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2340 }
2341
2342 void gen6_enable_rps(struct drm_i915_private *dev_priv)
2343 {
2344         struct intel_ring_buffer *ring;
2345         u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2346         u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2347         u32 pcu_mbox, rc6_mask = 0;
2348         u32 gtfifodbg;
2349         int cur_freq, min_freq, max_freq;
2350         int rc6_mode;
2351         int i;
2352
2353         /* Here begins a magic sequence of register writes to enable
2354          * auto-downclocking.
2355          *
2356          * Perhaps there might be some value in exposing these to
2357          * userspace...
2358          */
2359         I915_WRITE(GEN6_RC_STATE, 0);
2360         DRM_LOCK(dev_priv->dev);
2361
2362         /* Clear the DBG now so we don't confuse earlier errors */
2363         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2364                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2365                 I915_WRITE(GTFIFODBG, gtfifodbg);
2366         }
2367
2368         gen6_gt_force_wake_get(dev_priv);
2369
2370         /* disable the counters and set deterministic thresholds */
2371         I915_WRITE(GEN6_RC_CONTROL, 0);
2372
2373         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2374         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2375         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2376         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2377         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2378
2379         for_each_ring(ring, dev_priv, i)
2380                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2381
2382         I915_WRITE(GEN6_RC_SLEEP, 0);
2383         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2384         I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2385         I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2386         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2387
2388         rc6_mode = intel_enable_rc6(dev_priv->dev);
2389         if (rc6_mode & INTEL_RC6_ENABLE)
2390                 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2391
2392         if (rc6_mode & INTEL_RC6p_ENABLE)
2393                 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2394
2395         if (rc6_mode & INTEL_RC6pp_ENABLE)
2396                 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2397
2398         DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2399                         (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2400                         (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2401                         (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2402
2403         I915_WRITE(GEN6_RC_CONTROL,
2404                    rc6_mask |
2405                    GEN6_RC_CTL_EI_MODE(1) |
2406                    GEN6_RC_CTL_HW_ENABLE);
2407
2408         I915_WRITE(GEN6_RPNSWREQ,
2409                    GEN6_FREQUENCY(10) |
2410                    GEN6_OFFSET(0) |
2411                    GEN6_AGGRESSIVE_TURBO);
2412         I915_WRITE(GEN6_RC_VIDEO_FREQ,
2413                    GEN6_FREQUENCY(12));
2414
2415         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2416         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2417                    18 << 24 |
2418                    6 << 16);
2419         I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2420         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2421         I915_WRITE(GEN6_RP_UP_EI, 100000);
2422         I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2423         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2424         I915_WRITE(GEN6_RP_CONTROL,
2425                    GEN6_RP_MEDIA_TURBO |
2426                    GEN6_RP_MEDIA_HW_MODE |
2427                    GEN6_RP_MEDIA_IS_GFX |
2428                    GEN6_RP_ENABLE |
2429                    GEN6_RP_UP_BUSY_AVG |
2430                    GEN6_RP_DOWN_IDLE_CONT);
2431
2432         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2433                      500))
2434                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2435
2436         I915_WRITE(GEN6_PCODE_DATA, 0);
2437         I915_WRITE(GEN6_PCODE_MAILBOX,
2438                    GEN6_PCODE_READY |
2439                    GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2440         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2441                      500))
2442                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2443
2444         min_freq = (rp_state_cap & 0xff0000) >> 16;
2445         max_freq = rp_state_cap & 0xff;
2446         cur_freq = (gt_perf_status & 0xff00) >> 8;
2447
2448         /* Check for overclock support */
2449         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2450                      500))
2451                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2452         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2453         pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2454         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2455                      500))
2456                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2457         if (pcu_mbox & (1<<31)) { /* OC supported */
2458                 max_freq = pcu_mbox & 0xff;
2459                 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2460         }
2461
2462         /* In units of 100MHz */
2463         dev_priv->max_delay = max_freq;
2464         dev_priv->min_delay = min_freq;
2465         dev_priv->cur_delay = cur_freq;
2466
2467         /* requires MSI enabled */
2468         I915_WRITE(GEN6_PMIER,
2469                    GEN6_PM_MBOX_EVENT |
2470                    GEN6_PM_THERMAL_EVENT |
2471                    GEN6_PM_RP_DOWN_TIMEOUT |
2472                    GEN6_PM_RP_UP_THRESHOLD |
2473                    GEN6_PM_RP_DOWN_THRESHOLD |
2474                    GEN6_PM_RP_UP_EI_EXPIRED |
2475                    GEN6_PM_RP_DOWN_EI_EXPIRED);
2476         mtx_lock(&dev_priv->rps_lock);
2477         if (dev_priv->pm_iir != 0)
2478                 printf("KMS: pm_iir %x\n", dev_priv->pm_iir);
2479         I915_WRITE(GEN6_PMIMR, 0);
2480         mtx_unlock(&dev_priv->rps_lock);
2481         /* enable all PM interrupts */
2482         I915_WRITE(GEN6_PMINTRMSK, 0);
2483
2484         gen6_gt_force_wake_put(dev_priv);
2485         DRM_UNLOCK(dev_priv->dev);
2486 }
2487
2488 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2489 {
2490         int min_freq = 15;
2491         int gpu_freq, ia_freq, max_ia_freq;
2492         int scaling_factor = 180;
2493         uint64_t tsc_freq;
2494
2495 #if 0
2496         max_ia_freq = cpufreq_quick_get_max(0);
2497         /*
2498          * Default to measured freq if none found, PCU will ensure we don't go
2499          * over
2500          */
2501         if (!max_ia_freq)
2502                 max_ia_freq = tsc_khz;
2503
2504         /* Convert from kHz to MHz */
2505         max_ia_freq /= 1000;
2506 #else
2507         tsc_freq = atomic_load_acq_64(&tsc_freq);
2508         max_ia_freq = tsc_freq / 1000 / 1000;
2509 #endif
2510
2511         DRM_LOCK(dev_priv->dev);
2512
2513         /*
2514          * For each potential GPU frequency, load a ring frequency we'd like
2515          * to use for memory access.  We do this by specifying the IA frequency
2516          * the PCU should use as a reference to determine the ring frequency.
2517          */
2518         for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2519              gpu_freq--) {
2520                 int diff = dev_priv->max_delay - gpu_freq;
2521                 int d;
2522
2523                 /*
2524                  * For GPU frequencies less than 750MHz, just use the lowest
2525                  * ring freq.
2526                  */
2527                 if (gpu_freq < min_freq)
2528                         ia_freq = 800;
2529                 else
2530                         ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2531                 d = 100;
2532                 ia_freq = (ia_freq + d / 2) / d;
2533
2534                 I915_WRITE(GEN6_PCODE_DATA,
2535                            (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2536                            gpu_freq);
2537                 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2538                            GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2539                 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2540                               GEN6_PCODE_READY) == 0, 10)) {
2541                         DRM_ERROR("pcode write of freq table timed out\n");
2542                         continue;
2543                 }
2544         }
2545
2546         DRM_UNLOCK(dev_priv->dev);
2547 }
2548
2549 static void ironlake_teardown_rc6(struct drm_device *dev)
2550 {
2551         struct drm_i915_private *dev_priv = dev->dev_private;
2552
2553         if (dev_priv->renderctx) {
2554                 i915_gem_object_unpin(dev_priv->renderctx);
2555                 drm_gem_object_unreference(&dev_priv->renderctx->base);
2556                 dev_priv->renderctx = NULL;
2557         }
2558
2559         if (dev_priv->pwrctx) {
2560                 i915_gem_object_unpin(dev_priv->pwrctx);
2561                 drm_gem_object_unreference(&dev_priv->pwrctx->base);
2562                 dev_priv->pwrctx = NULL;
2563         }
2564 }
2565
2566 void ironlake_disable_rc6(struct drm_device *dev)
2567 {
2568         struct drm_i915_private *dev_priv = dev->dev_private;
2569
2570         if (I915_READ(PWRCTXA)) {
2571                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2572                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2573                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2574                          50);
2575
2576                 I915_WRITE(PWRCTXA, 0);
2577                 POSTING_READ(PWRCTXA);
2578
2579                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2580                 POSTING_READ(RSTDBYCTL);
2581         }
2582
2583         ironlake_teardown_rc6(dev);
2584 }
2585
2586 static int ironlake_setup_rc6(struct drm_device *dev)
2587 {
2588         struct drm_i915_private *dev_priv = dev->dev_private;
2589
2590         if (dev_priv->renderctx == NULL)
2591                 dev_priv->renderctx = intel_alloc_context_page(dev);
2592         if (!dev_priv->renderctx)
2593                 return -ENOMEM;
2594
2595         if (dev_priv->pwrctx == NULL)
2596                 dev_priv->pwrctx = intel_alloc_context_page(dev);
2597         if (!dev_priv->pwrctx) {
2598                 ironlake_teardown_rc6(dev);
2599                 return -ENOMEM;
2600         }
2601
2602         return 0;
2603 }
2604
2605 void ironlake_enable_rc6(struct drm_device *dev)
2606 {
2607         struct drm_i915_private *dev_priv = dev->dev_private;
2608         struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
2609         int ret;
2610
2611         /* rc6 disabled by default due to repeated reports of hanging during
2612          * boot and resume.
2613          */
2614         if (!intel_enable_rc6(dev))
2615                 return;
2616
2617         DRM_LOCK(dev);
2618         ret = ironlake_setup_rc6(dev);
2619         if (ret) {
2620                 DRM_UNLOCK(dev);
2621                 return;
2622         }
2623
2624         /*
2625          * GPU can automatically power down the render unit if given a page
2626          * to save state.
2627          */
2628         ret = intel_ring_begin(ring, 6);
2629         if (ret) {
2630                 ironlake_teardown_rc6(dev);
2631                 DRM_UNLOCK(dev);
2632                 return;
2633         }
2634
2635         intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2636         intel_ring_emit(ring, MI_SET_CONTEXT);
2637         intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
2638                         MI_MM_SPACE_GTT |
2639                         MI_SAVE_EXT_STATE_EN |
2640                         MI_RESTORE_EXT_STATE_EN |
2641                         MI_RESTORE_INHIBIT);
2642         intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2643         intel_ring_emit(ring, MI_NOOP);
2644         intel_ring_emit(ring, MI_FLUSH);
2645         intel_ring_advance(ring);
2646
2647         /*
2648          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2649          * does an implicit flush, combined with MI_FLUSH above, it should be
2650          * safe to assume that renderctx is valid
2651          */
2652         ret = intel_wait_ring_idle(ring);
2653         if (ret) {
2654                 DRM_ERROR("failed to enable ironlake power power savings\n");
2655                 ironlake_teardown_rc6(dev);
2656                 DRM_UNLOCK(dev);
2657                 return;
2658         }
2659
2660         I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2661         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2662         DRM_UNLOCK(dev);
2663 }
2664
2665 static unsigned long intel_pxfreq(u32 vidfreq)
2666 {
2667         unsigned long freq;
2668         int div = (vidfreq & 0x3f0000) >> 16;
2669         int post = (vidfreq & 0x3000) >> 12;
2670         int pre = (vidfreq & 0x7);
2671
2672         if (!pre)
2673                 return 0;
2674
2675         freq = ((div * 133333) / ((1<<post) * pre));
2676
2677         return freq;
2678 }
2679
2680 static const struct cparams {
2681         u16 i;
2682         u16 t;
2683         u16 m;
2684         u16 c;
2685 } cparams[] = {
2686         { 1, 1333, 301, 28664 },
2687         { 1, 1066, 294, 24460 },
2688         { 1, 800, 294, 25192 },
2689         { 0, 1333, 276, 27605 },
2690         { 0, 1066, 276, 27605 },
2691         { 0, 800, 231, 23784 },
2692 };
2693
2694 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2695 {
2696         u64 total_count, diff, ret;
2697         u32 count1, count2, count3, m = 0, c = 0;
2698         unsigned long now = jiffies_to_msecs(jiffies), diff1;
2699         int i;
2700
2701         diff1 = now - dev_priv->last_time1;
2702         /*
2703          * sysctl(8) reads the value of sysctl twice in rapid
2704          * succession.  There is high chance that it happens in the
2705          * same timer tick.  Use the cached value to not divide by
2706          * zero and give the hw a chance to gather more samples.
2707          */
2708         if (diff1 <= 10)
2709                 return (dev_priv->chipset_power);
2710
2711         count1 = I915_READ(DMIEC);
2712         count2 = I915_READ(DDREC);
2713         count3 = I915_READ(CSIEC);
2714
2715         total_count = count1 + count2 + count3;
2716
2717         /* FIXME: handle per-counter overflow */
2718         if (total_count < dev_priv->last_count1) {
2719                 diff = ~0UL - dev_priv->last_count1;
2720                 diff += total_count;
2721         } else {
2722                 diff = total_count - dev_priv->last_count1;
2723         }
2724
2725         for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
2726                 if (cparams[i].i == dev_priv->c_m &&
2727                     cparams[i].t == dev_priv->r_t) {
2728                         m = cparams[i].m;
2729                         c = cparams[i].c;
2730                         break;
2731                 }
2732         }
2733
2734         diff = diff / diff1;
2735         ret = ((m * diff) + c);
2736         ret = ret / 10;
2737
2738         dev_priv->last_count1 = total_count;
2739         dev_priv->last_time1 = now;
2740
2741         dev_priv->chipset_power = ret;
2742         return (ret);
2743 }
2744
2745 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2746 {
2747         unsigned long m, x, b;
2748         u32 tsfs;
2749
2750         tsfs = I915_READ(TSFS);
2751
2752         m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2753         x = I915_READ8(I915_TR1);
2754
2755         b = tsfs & TSFS_INTR_MASK;
2756
2757         return ((m * x) / 127) - b;
2758 }
2759
2760 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2761 {
2762         static const struct v_table {
2763                 u16 vd; /* in .1 mil */
2764                 u16 vm; /* in .1 mil */
2765         } v_table[] = {
2766                 { 0, 0, },
2767                 { 375, 0, },
2768                 { 500, 0, },
2769                 { 625, 0, },
2770                 { 750, 0, },
2771                 { 875, 0, },
2772                 { 1000, 0, },
2773                 { 1125, 0, },
2774                 { 4125, 3000, },
2775                 { 4125, 3000, },
2776                 { 4125, 3000, },
2777                 { 4125, 3000, },
2778                 { 4125, 3000, },
2779                 { 4125, 3000, },
2780                 { 4125, 3000, },
2781                 { 4125, 3000, },
2782                 { 4125, 3000, },
2783                 { 4125, 3000, },
2784                 { 4125, 3000, },
2785                 { 4125, 3000, },
2786                 { 4125, 3000, },
2787                 { 4125, 3000, },
2788                 { 4125, 3000, },
2789                 { 4125, 3000, },
2790                 { 4125, 3000, },
2791                 { 4125, 3000, },
2792                 { 4125, 3000, },
2793                 { 4125, 3000, },
2794                 { 4125, 3000, },
2795                 { 4125, 3000, },
2796                 { 4125, 3000, },
2797                 { 4125, 3000, },
2798                 { 4250, 3125, },
2799                 { 4375, 3250, },
2800                 { 4500, 3375, },
2801                 { 4625, 3500, },
2802                 { 4750, 3625, },
2803                 { 4875, 3750, },
2804                 { 5000, 3875, },
2805                 { 5125, 4000, },
2806                 { 5250, 4125, },
2807                 { 5375, 4250, },
2808                 { 5500, 4375, },
2809                 { 5625, 4500, },
2810                 { 5750, 4625, },
2811                 { 5875, 4750, },
2812                 { 6000, 4875, },
2813                 { 6125, 5000, },
2814                 { 6250, 5125, },
2815                 { 6375, 5250, },
2816                 { 6500, 5375, },
2817                 { 6625, 5500, },
2818                 { 6750, 5625, },
2819                 { 6875, 5750, },
2820                 { 7000, 5875, },
2821                 { 7125, 6000, },
2822                 { 7250, 6125, },
2823                 { 7375, 6250, },
2824                 { 7500, 6375, },
2825                 { 7625, 6500, },
2826                 { 7750, 6625, },
2827                 { 7875, 6750, },
2828                 { 8000, 6875, },
2829                 { 8125, 7000, },
2830                 { 8250, 7125, },
2831                 { 8375, 7250, },
2832                 { 8500, 7375, },
2833                 { 8625, 7500, },
2834                 { 8750, 7625, },
2835                 { 8875, 7750, },
2836                 { 9000, 7875, },
2837                 { 9125, 8000, },
2838                 { 9250, 8125, },
2839                 { 9375, 8250, },
2840                 { 9500, 8375, },
2841                 { 9625, 8500, },
2842                 { 9750, 8625, },
2843                 { 9875, 8750, },
2844                 { 10000, 8875, },
2845                 { 10125, 9000, },
2846                 { 10250, 9125, },
2847                 { 10375, 9250, },
2848                 { 10500, 9375, },
2849                 { 10625, 9500, },
2850                 { 10750, 9625, },
2851                 { 10875, 9750, },
2852                 { 11000, 9875, },
2853                 { 11125, 10000, },
2854                 { 11250, 10125, },
2855                 { 11375, 10250, },
2856                 { 11500, 10375, },
2857                 { 11625, 10500, },
2858                 { 11750, 10625, },
2859                 { 11875, 10750, },
2860                 { 12000, 10875, },
2861                 { 12125, 11000, },
2862                 { 12250, 11125, },
2863                 { 12375, 11250, },
2864                 { 12500, 11375, },
2865                 { 12625, 11500, },
2866                 { 12750, 11625, },
2867                 { 12875, 11750, },
2868                 { 13000, 11875, },
2869                 { 13125, 12000, },
2870                 { 13250, 12125, },
2871                 { 13375, 12250, },
2872                 { 13500, 12375, },
2873                 { 13625, 12500, },
2874                 { 13750, 12625, },
2875                 { 13875, 12750, },
2876                 { 14000, 12875, },
2877                 { 14125, 13000, },
2878                 { 14250, 13125, },
2879                 { 14375, 13250, },
2880                 { 14500, 13375, },
2881                 { 14625, 13500, },
2882                 { 14750, 13625, },
2883                 { 14875, 13750, },
2884                 { 15000, 13875, },
2885                 { 15125, 14000, },
2886                 { 15250, 14125, },
2887                 { 15375, 14250, },
2888                 { 15500, 14375, },
2889                 { 15625, 14500, },
2890                 { 15750, 14625, },
2891                 { 15875, 14750, },
2892                 { 16000, 14875, },
2893                 { 16125, 15000, },
2894         };
2895         if (dev_priv->info->is_mobile)
2896                 return v_table[pxvid].vm;
2897         else
2898                 return v_table[pxvid].vd;
2899 }
2900
2901 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2902 {
2903         struct timespec now, diff1;
2904         u64 diff;
2905         unsigned long diffms;
2906         u32 count;
2907
2908         if (dev_priv->info->gen != 5)
2909                 return;
2910
2911         nanotime(&now);
2912         diff1 = now;
2913         timespecsub(&diff1, &dev_priv->last_time2);
2914
2915         /* Don't divide by 0 */
2916         diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2917         if (!diffms)
2918                 return;
2919
2920         count = I915_READ(GFXEC);
2921
2922         if (count < dev_priv->last_count2) {
2923                 diff = ~0UL - dev_priv->last_count2;
2924                 diff += count;
2925         } else {
2926                 diff = count - dev_priv->last_count2;
2927         }
2928
2929         dev_priv->last_count2 = count;
2930         dev_priv->last_time2 = now;
2931
2932         /* More magic constants... */
2933         diff = diff * 1181;
2934         diff = diff / (diffms * 10);
2935         dev_priv->gfx_power = diff;
2936 }
2937
2938 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2939 {
2940         unsigned long t, corr, state1, corr2, state2;
2941         u32 pxvid, ext_v;
2942
2943         pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
2944         pxvid = (pxvid >> 24) & 0x7f;
2945         ext_v = pvid_to_extvid(dev_priv, pxvid);
2946
2947         state1 = ext_v;
2948
2949         t = i915_mch_val(dev_priv);
2950
2951         /* Revel in the empirically derived constants */
2952
2953         /* Correction factor in 1/100000 units */
2954         if (t > 80)
2955                 corr = ((t * 2349) + 135940);
2956         else if (t >= 50)
2957                 corr = ((t * 964) + 29317);
2958         else /* < 50 */
2959                 corr = ((t * 301) + 1004);
2960
2961         corr = corr * ((150142 * state1) / 10000 - 78642);
2962         corr /= 100000;
2963         corr2 = (corr * dev_priv->corr);
2964
2965         state2 = (corr2 * state1) / 10000;
2966         state2 /= 100; /* convert to mW */
2967
2968         i915_update_gfx_val(dev_priv);
2969
2970         return dev_priv->gfx_power + state2;
2971 }
2972
2973 /**
2974  * i915_read_mch_val - return value for IPS use
2975  *
2976  * Calculate and return a value for the IPS driver to use when deciding whether
2977  * we have thermal and power headroom to increase CPU or GPU power budget.
2978  */
2979 unsigned long i915_read_mch_val(void)
2980 {
2981         struct drm_i915_private *dev_priv;
2982         unsigned long chipset_val, graphics_val, ret = 0;
2983
2984         mtx_lock(&mchdev_lock);
2985         if (!i915_mch_dev)
2986                 goto out_unlock;
2987         dev_priv = i915_mch_dev;
2988
2989         chipset_val = i915_chipset_val(dev_priv);
2990         graphics_val = i915_gfx_val(dev_priv);
2991
2992         ret = chipset_val + graphics_val;
2993
2994 out_unlock:
2995         mtx_unlock(&mchdev_lock);
2996
2997         return ret;
2998 }
2999
3000 /**
3001  * i915_gpu_raise - raise GPU frequency limit
3002  *
3003  * Raise the limit; IPS indicates we have thermal headroom.
3004  */
3005 bool i915_gpu_raise(void)
3006 {
3007         struct drm_i915_private *dev_priv;
3008         bool ret = true;
3009
3010         mtx_lock(&mchdev_lock);
3011         if (!i915_mch_dev) {
3012                 ret = false;
3013                 goto out_unlock;
3014         }
3015         dev_priv = i915_mch_dev;
3016
3017         if (dev_priv->max_delay > dev_priv->fmax)
3018                 dev_priv->max_delay--;
3019
3020 out_unlock:
3021         mtx_unlock(&mchdev_lock);
3022
3023         return ret;
3024 }
3025
3026 /**
3027  * i915_gpu_lower - lower GPU frequency limit
3028  *
3029  * IPS indicates we're close to a thermal limit, so throttle back the GPU
3030  * frequency maximum.
3031  */
3032 bool i915_gpu_lower(void)
3033 {
3034         struct drm_i915_private *dev_priv;
3035         bool ret = true;
3036
3037         mtx_lock(&mchdev_lock);
3038         if (!i915_mch_dev) {
3039                 ret = false;
3040                 goto out_unlock;
3041         }
3042         dev_priv = i915_mch_dev;
3043
3044         if (dev_priv->max_delay < dev_priv->min_delay)
3045                 dev_priv->max_delay++;
3046
3047 out_unlock:
3048         mtx_unlock(&mchdev_lock);
3049
3050         return ret;
3051 }
3052
3053 /**
3054  * i915_gpu_busy - indicate GPU business to IPS
3055  *
3056  * Tell the IPS driver whether or not the GPU is busy.
3057  */
3058 bool i915_gpu_busy(void)
3059 {
3060         struct drm_i915_private *dev_priv;
3061         bool ret = false;
3062
3063         mtx_lock(&mchdev_lock);
3064         if (!i915_mch_dev)
3065                 goto out_unlock;
3066         dev_priv = i915_mch_dev;
3067
3068         ret = dev_priv->busy;
3069
3070 out_unlock:
3071         mtx_unlock(&mchdev_lock);
3072
3073         return ret;
3074 }
3075
3076 /**
3077  * i915_gpu_turbo_disable - disable graphics turbo
3078  *
3079  * Disable graphics turbo by resetting the max frequency and setting the
3080  * current frequency to the default.
3081  */
3082 bool i915_gpu_turbo_disable(void)
3083 {
3084         struct drm_i915_private *dev_priv;
3085         bool ret = true;
3086
3087         mtx_lock(&mchdev_lock);
3088         if (!i915_mch_dev) {
3089                 ret = false;
3090                 goto out_unlock;
3091         }
3092         dev_priv = i915_mch_dev;
3093
3094         dev_priv->max_delay = dev_priv->fstart;
3095
3096         if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
3097                 ret = false;
3098
3099 out_unlock:
3100         mtx_unlock(&mchdev_lock);
3101
3102         return ret;
3103 }
3104
3105 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3106 {
3107         mtx_lock(&mchdev_lock);
3108         i915_mch_dev = dev_priv;
3109         dev_priv->mchdev_lock = &mchdev_lock;
3110         mtx_unlock(&mchdev_lock);
3111
3112 #if 0
3113         ips_ping_for_i915_load();
3114 #endif
3115 }
3116
3117 void intel_gpu_ips_teardown(void)
3118 {
3119         mtx_lock(&mchdev_lock);
3120         i915_mch_dev = NULL;
3121         mtx_unlock(&mchdev_lock);
3122 }
3123
3124 void intel_init_emon(struct drm_device *dev)
3125 {
3126         struct drm_i915_private *dev_priv = dev->dev_private;
3127         u32 lcfuse;
3128         u8 pxw[16];
3129         int i;
3130
3131         /* Disable to program */
3132         I915_WRITE(ECR, 0);
3133         POSTING_READ(ECR);
3134
3135         /* Program energy weights for various events */
3136         I915_WRITE(SDEW, 0x15040d00);
3137         I915_WRITE(CSIEW0, 0x007f0000);
3138         I915_WRITE(CSIEW1, 0x1e220004);
3139         I915_WRITE(CSIEW2, 0x04000004);
3140
3141         for (i = 0; i < 5; i++)
3142                 I915_WRITE(PEW + (i * 4), 0);
3143         for (i = 0; i < 3; i++)
3144                 I915_WRITE(DEW + (i * 4), 0);
3145
3146         /* Program P-state weights to account for frequency power adjustment */
3147         for (i = 0; i < 16; i++) {
3148                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3149                 unsigned long freq = intel_pxfreq(pxvidfreq);
3150                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3151                         PXVFREQ_PX_SHIFT;
3152                 unsigned long val;
3153
3154                 val = vid * vid;
3155                 val *= (freq / 1000);
3156                 val *= 255;
3157                 val /= (127*127*900);
3158                 if (val > 0xff)
3159                         DRM_ERROR("bad pxval: %ld\n", val);
3160                 pxw[i] = val;
3161         }
3162         /* Render standby states get 0 weight */
3163         pxw[14] = 0;
3164         pxw[15] = 0;
3165
3166         for (i = 0; i < 4; i++) {
3167                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3168                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3169                 I915_WRITE(PXW + (i * 4), val);
3170         }
3171
3172         /* Adjust magic regs to magic values (more experimental results) */
3173         I915_WRITE(OGW0, 0);
3174         I915_WRITE(OGW1, 0);
3175         I915_WRITE(EG0, 0x00007f00);
3176         I915_WRITE(EG1, 0x0000000e);
3177         I915_WRITE(EG2, 0x000e0000);
3178         I915_WRITE(EG3, 0x68000300);
3179         I915_WRITE(EG4, 0x42000000);
3180         I915_WRITE(EG5, 0x00140031);
3181         I915_WRITE(EG6, 0);
3182         I915_WRITE(EG7, 0);
3183
3184         for (i = 0; i < 8; i++)
3185                 I915_WRITE(PXWL + (i * 4), 0);
3186
3187         /* Enable PMON + select events */
3188         I915_WRITE(ECR, 0x80000019);
3189
3190         lcfuse = I915_READ(LCFUSE02);
3191
3192         dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
3193 }
3194
3195 static void ironlake_init_clock_gating(struct drm_device *dev)
3196 {
3197         struct drm_i915_private *dev_priv = dev->dev_private;
3198         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3199
3200         /* Required for FBC */
3201         dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
3202                 DPFCRUNIT_CLOCK_GATE_DISABLE |
3203                 DPFDUNIT_CLOCK_GATE_DISABLE;
3204         /* Required for CxSR */
3205         dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
3206
3207         I915_WRITE(PCH_3DCGDIS0,
3208                    MARIUNIT_CLOCK_GATE_DISABLE |
3209                    SVSMUNIT_CLOCK_GATE_DISABLE);
3210         I915_WRITE(PCH_3DCGDIS1,
3211                    VFMUNIT_CLOCK_GATE_DISABLE);
3212
3213         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3214
3215         /*
3216          * According to the spec the following bits should be set in
3217          * order to enable memory self-refresh
3218          * The bit 22/21 of 0x42004
3219          * The bit 5 of 0x42020
3220          * The bit 15 of 0x45000
3221          */
3222         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3223                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
3224                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3225         I915_WRITE(ILK_DSPCLK_GATE,
3226                    (I915_READ(ILK_DSPCLK_GATE) |
3227                     ILK_DPARB_CLK_GATE));
3228         I915_WRITE(DISP_ARB_CTL,
3229                    (I915_READ(DISP_ARB_CTL) |
3230                     DISP_FBC_WM_DIS));
3231         I915_WRITE(WM3_LP_ILK, 0);
3232         I915_WRITE(WM2_LP_ILK, 0);
3233         I915_WRITE(WM1_LP_ILK, 0);
3234
3235         /*
3236          * Based on the document from hardware guys the following bits
3237          * should be set unconditionally in order to enable FBC.
3238          * The bit 22 of 0x42000
3239          * The bit 22 of 0x42004
3240          * The bit 7,8,9 of 0x42020.
3241          */
3242         if (IS_IRONLAKE_M(dev)) {
3243                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3244                            I915_READ(ILK_DISPLAY_CHICKEN1) |
3245                            ILK_FBCQ_DIS);
3246                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3247                            I915_READ(ILK_DISPLAY_CHICKEN2) |
3248                            ILK_DPARB_GATE);
3249                 I915_WRITE(ILK_DSPCLK_GATE,
3250                            I915_READ(ILK_DSPCLK_GATE) |
3251                            ILK_DPFC_DIS1 |
3252                            ILK_DPFC_DIS2 |
3253                            ILK_CLK_FBC);
3254         }
3255
3256         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3257                    I915_READ(ILK_DISPLAY_CHICKEN2) |
3258                    ILK_ELPIN_409_SELECT);
3259         I915_WRITE(_3D_CHICKEN2,
3260                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3261                    _3D_CHICKEN2_WM_READ_PIPELINED);
3262 }
3263
3264 static void gen6_init_clock_gating(struct drm_device *dev)
3265 {
3266         struct drm_i915_private *dev_priv = dev->dev_private;
3267         int pipe;
3268         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3269
3270         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3271
3272         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3273                    I915_READ(ILK_DISPLAY_CHICKEN2) |
3274                    ILK_ELPIN_409_SELECT);
3275
3276         I915_WRITE(WM3_LP_ILK, 0);
3277         I915_WRITE(WM2_LP_ILK, 0);
3278         I915_WRITE(WM1_LP_ILK, 0);
3279
3280         I915_WRITE(CACHE_MODE_0,
3281                    _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
3282
3283         I915_WRITE(GEN6_UCGCTL1,
3284                    I915_READ(GEN6_UCGCTL1) |
3285                    GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3286                    GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3287
3288         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3289          * gating disable must be set.  Failure to set it results in
3290          * flickering pixels due to Z write ordering failures after
3291          * some amount of runtime in the Mesa "fire" demo, and Unigine
3292          * Sanctuary and Tropics, and apparently anything else with
3293          * alpha test or pixel discard.
3294          *
3295          * According to the spec, bit 11 (RCCUNIT) must also be set,
3296          * but we didn't debug actual testcases to find it out.
3297          */
3298         I915_WRITE(GEN6_UCGCTL2,
3299                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3300                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3301
3302         /* Bspec says we need to always set all mask bits. */
3303         I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
3304                    _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
3305
3306         /*
3307          * According to the spec the following bits should be
3308          * set in order to enable memory self-refresh and fbc:
3309          * The bit21 and bit22 of 0x42000
3310          * The bit21 and bit22 of 0x42004
3311          * The bit5 and bit7 of 0x42020
3312          * The bit14 of 0x70180
3313          * The bit14 of 0x71180
3314          */
3315         I915_WRITE(ILK_DISPLAY_CHICKEN1,
3316                    I915_READ(ILK_DISPLAY_CHICKEN1) |
3317                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3318         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3319                    I915_READ(ILK_DISPLAY_CHICKEN2) |
3320                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3321         I915_WRITE(ILK_DSPCLK_GATE,
3322                    I915_READ(ILK_DSPCLK_GATE) |
3323                    ILK_DPARB_CLK_GATE  |
3324                    ILK_DPFD_CLK_GATE);
3325
3326         for_each_pipe(pipe) {
3327                 I915_WRITE(DSPCNTR(pipe),
3328                            I915_READ(DSPCNTR(pipe)) |
3329                            DISPPLANE_TRICKLE_FEED_DISABLE);
3330                 intel_flush_display_plane(dev_priv, pipe);
3331         }
3332 }
3333
3334 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3335 {
3336         uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
3337
3338         reg &= ~GEN7_FF_SCHED_MASK;
3339         reg |= GEN7_FF_TS_SCHED_HW;
3340         reg |= GEN7_FF_VS_SCHED_HW;
3341         reg |= GEN7_FF_DS_SCHED_HW;
3342
3343         I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3344 }
3345
3346 static void ivybridge_init_clock_gating(struct drm_device *dev)
3347 {
3348         struct drm_i915_private *dev_priv = dev->dev_private;
3349         int pipe;
3350         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3351
3352         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3353
3354         I915_WRITE(WM3_LP_ILK, 0);
3355         I915_WRITE(WM2_LP_ILK, 0);
3356         I915_WRITE(WM1_LP_ILK, 0);
3357
3358         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3359          * This implements the WaDisableRCZUnitClockGating workaround.
3360          */
3361         I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3362
3363         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3364
3365         I915_WRITE(IVB_CHICKEN3,
3366                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3367                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
3368
3369         /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3370         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3371                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3372
3373         /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3374         I915_WRITE(GEN7_L3CNTLREG1,
3375                         GEN7_WA_FOR_GEN7_L3_CONTROL);
3376         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3377                         GEN7_WA_L3_CHICKEN_MODE);
3378
3379         /* This is required by WaCatErrorRejectionIssue */
3380         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3381                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3382                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3383
3384         for_each_pipe(pipe) {
3385                 I915_WRITE(DSPCNTR(pipe),
3386                            I915_READ(DSPCNTR(pipe)) |
3387                            DISPPLANE_TRICKLE_FEED_DISABLE);
3388                 intel_flush_display_plane(dev_priv, pipe);
3389         }
3390
3391         gen7_setup_fixed_func_scheduler(dev_priv);
3392
3393         /* WaDisable4x2SubspanOptimization */
3394         I915_WRITE(CACHE_MODE_1,
3395                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3396 }
3397
3398 static void valleyview_init_clock_gating(struct drm_device *dev)
3399 {
3400         struct drm_i915_private *dev_priv = dev->dev_private;
3401         int pipe;
3402         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3403
3404         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3405
3406         I915_WRITE(WM3_LP_ILK, 0);
3407         I915_WRITE(WM2_LP_ILK, 0);
3408         I915_WRITE(WM1_LP_ILK, 0);
3409
3410         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3411          * This implements the WaDisableRCZUnitClockGating workaround.
3412          */
3413         I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3414
3415         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3416
3417         I915_WRITE(IVB_CHICKEN3,
3418                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3419                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
3420
3421         /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3422         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3423                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3424
3425         /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3426         I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
3427         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3428
3429         /* This is required by WaCatErrorRejectionIssue */
3430         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3431                    I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3432                    GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3433
3434         for_each_pipe(pipe) {
3435                 I915_WRITE(DSPCNTR(pipe),
3436                            I915_READ(DSPCNTR(pipe)) |
3437                            DISPPLANE_TRICKLE_FEED_DISABLE);
3438                 intel_flush_display_plane(dev_priv, pipe);
3439         }
3440
3441         I915_WRITE(CACHE_MODE_1,
3442                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3443 }
3444
3445 static void g4x_init_clock_gating(struct drm_device *dev)
3446 {
3447         struct drm_i915_private *dev_priv = dev->dev_private;
3448         uint32_t dspclk_gate;
3449
3450         I915_WRITE(RENCLK_GATE_D1, 0);
3451         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3452                    GS_UNIT_CLOCK_GATE_DISABLE |
3453                    CL_UNIT_CLOCK_GATE_DISABLE);
3454         I915_WRITE(RAMCLK_GATE_D, 0);
3455         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3456                 OVRUNIT_CLOCK_GATE_DISABLE |
3457                 OVCUNIT_CLOCK_GATE_DISABLE;
3458         if (IS_GM45(dev))
3459                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3460         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3461 }
3462
3463 static void crestline_init_clock_gating(struct drm_device *dev)
3464 {
3465         struct drm_i915_private *dev_priv = dev->dev_private;
3466
3467         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3468         I915_WRITE(RENCLK_GATE_D2, 0);
3469         I915_WRITE(DSPCLK_GATE_D, 0);
3470         I915_WRITE(RAMCLK_GATE_D, 0);
3471         I915_WRITE16(DEUC, 0);
3472 }
3473
3474 static void broadwater_init_clock_gating(struct drm_device *dev)
3475 {
3476         struct drm_i915_private *dev_priv = dev->dev_private;
3477
3478         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3479                    I965_RCC_CLOCK_GATE_DISABLE |
3480                    I965_RCPB_CLOCK_GATE_DISABLE |
3481                    I965_ISC_CLOCK_GATE_DISABLE |
3482                    I965_FBC_CLOCK_GATE_DISABLE);
3483         I915_WRITE(RENCLK_GATE_D2, 0);
3484 }
3485
3486 static void gen3_init_clock_gating(struct drm_device *dev)
3487 {
3488         struct drm_i915_private *dev_priv = dev->dev_private;
3489         u32 dstate = I915_READ(D_STATE);
3490
3491         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3492                 DSTATE_DOT_CLOCK_GATING;
3493         I915_WRITE(D_STATE, dstate);
3494
3495         if (IS_PINEVIEW(dev))
3496                 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
3497 }
3498
3499 static void i85x_init_clock_gating(struct drm_device *dev)
3500 {
3501         struct drm_i915_private *dev_priv = dev->dev_private;
3502
3503         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3504 }
3505
3506 static void i830_init_clock_gating(struct drm_device *dev)
3507 {
3508         struct drm_i915_private *dev_priv = dev->dev_private;
3509
3510         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3511 }
3512
3513 static void ibx_init_clock_gating(struct drm_device *dev)
3514 {
3515         struct drm_i915_private *dev_priv = dev->dev_private;
3516
3517         /*
3518          * On Ibex Peak and Cougar Point, we need to disable clock
3519          * gating for the panel power sequencer or it will fail to
3520          * start up when no ports are active.
3521          */
3522         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3523 }
3524
3525 static void cpt_init_clock_gating(struct drm_device *dev)
3526 {
3527         struct drm_i915_private *dev_priv = dev->dev_private;
3528         int pipe;
3529
3530         /*
3531          * On Ibex Peak and Cougar Point, we need to disable clock
3532          * gating for the panel power sequencer or it will fail to
3533          * start up when no ports are active.
3534          */
3535         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3536         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3537                    DPLS_EDP_PPS_FIX_DIS);
3538         /* Without this, mode sets may fail silently on FDI */
3539         for_each_pipe(pipe)
3540                 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3541 }
3542
3543 void intel_init_clock_gating(struct drm_device *dev)
3544 {
3545         struct drm_i915_private *dev_priv = dev->dev_private;
3546
3547         dev_priv->display.init_clock_gating(dev);
3548
3549         if (dev_priv->display.init_pch_clock_gating)
3550                 dev_priv->display.init_pch_clock_gating(dev);
3551 }
3552
3553 static void gen6_sanitize_pm(struct drm_device *dev)
3554 {
3555         struct drm_i915_private *dev_priv = dev->dev_private;
3556         u32 limits, delay, old;
3557
3558         gen6_gt_force_wake_get(dev_priv);
3559
3560         old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
3561         /* Make sure we continue to get interrupts
3562          * until we hit the minimum or maximum frequencies.
3563          */
3564         limits &= ~(0x3f << 16 | 0x3f << 24);
3565         delay = dev_priv->cur_delay;
3566         if (delay < dev_priv->max_delay)
3567                 limits |= (dev_priv->max_delay & 0x3f) << 24;
3568         if (delay > dev_priv->min_delay)
3569                 limits |= (dev_priv->min_delay & 0x3f) << 16;
3570
3571         if (old != limits) {
3572                 DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
3573                           limits, old);
3574                 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3575         }
3576
3577         gen6_gt_force_wake_put(dev_priv);
3578 }
3579
3580 void intel_sanitize_pm(struct drm_device *dev)
3581 {
3582         struct drm_i915_private *dev_priv = dev->dev_private;
3583
3584         if (dev_priv->display.sanitize_pm)
3585                 dev_priv->display.sanitize_pm(dev);
3586 }
3587
3588 /* Starting with Haswell, we have different power wells for
3589  * different parts of the GPU. This attempts to enable them all.
3590  */
3591 static void intel_init_power_wells(struct drm_device *dev)
3592 {
3593         struct drm_i915_private *dev_priv = dev->dev_private;
3594         unsigned long power_wells[] = {
3595                 HSW_PWR_WELL_CTL1,
3596                 HSW_PWR_WELL_CTL2,
3597                 HSW_PWR_WELL_CTL4
3598         };
3599         int i;
3600
3601         if (!IS_HASWELL(dev))
3602                 return;
3603
3604         DRM_LOCK(dev);
3605
3606         for (i = 0; i < DRM_ARRAY_SIZE(power_wells); i++) {
3607                 int well = I915_READ(power_wells[i]);
3608
3609                 if ((well & HSW_PWR_WELL_STATE) == 0) {
3610                         I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3611                         if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
3612                                 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3613                 }
3614         }
3615
3616 printf("XXXKIB HACK: HSW RC OFF\n");
3617         I915_WRITE(GEN6_RC_STATE, 0);
3618         I915_WRITE(GEN6_RC_CONTROL, 0);
3619         DRM_UNLOCK(dev);
3620 }
3621
3622 /* Set up chip specific power management-related functions */
3623 void intel_init_pm(struct drm_device *dev)
3624 {
3625         struct drm_i915_private *dev_priv = dev->dev_private;
3626
3627         if (I915_HAS_FBC(dev)) {
3628                 if (HAS_PCH_SPLIT(dev)) {
3629                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
3630                         dev_priv->display.enable_fbc = ironlake_enable_fbc;
3631                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
3632                 } else if (IS_GM45(dev)) {
3633                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
3634                         dev_priv->display.enable_fbc = g4x_enable_fbc;
3635                         dev_priv->display.disable_fbc = g4x_disable_fbc;
3636                 } else if (IS_CRESTLINE(dev)) {
3637                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
3638                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
3639                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
3640                 }
3641                 /* 855GM needs testing */
3642         }
3643
3644         /* For cxsr */
3645         if (IS_PINEVIEW(dev))
3646                 i915_pineview_get_mem_freq(dev);
3647         else if (IS_GEN5(dev))
3648                 i915_ironlake_get_mem_freq(dev);
3649
3650         /* For FIFO watermark updates */
3651         if (HAS_PCH_SPLIT(dev)) {
3652                 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
3653                 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
3654
3655                 /* IVB configs may use multi-threaded forcewake */
3656                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3657                         u32     ecobus;
3658
3659                         /* A small trick here - if the bios hasn't configured MT forcewake,
3660                          * and if the device is in RC6, then force_wake_mt_get will not wake
3661                          * the device and the ECOBUS read will return zero. Which will be
3662                          * (correctly) interpreted by the test below as MT forcewake being
3663                          * disabled.
3664                          */
3665                         DRM_LOCK(dev);
3666                         __gen6_gt_force_wake_mt_get(dev_priv);
3667                         ecobus = I915_READ_NOTRACE(ECOBUS);
3668                         __gen6_gt_force_wake_mt_put(dev_priv);
3669                         DRM_UNLOCK(dev);
3670
3671                         if (ecobus & FORCEWAKE_MT_ENABLE) {
3672                                 DRM_DEBUG_KMS("Using MT version of forcewake\n");
3673                                 dev_priv->display.force_wake_get =
3674                                         __gen6_gt_force_wake_mt_get;
3675                                 dev_priv->display.force_wake_put =
3676                                         __gen6_gt_force_wake_mt_put;
3677                         }
3678                 }
3679
3680                 if (HAS_PCH_IBX(dev))
3681                         dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
3682                 else if (HAS_PCH_CPT(dev))
3683                         dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
3684
3685                 if (IS_GEN5(dev)) {
3686                         if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3687                                 dev_priv->display.update_wm = ironlake_update_wm;
3688                         else {
3689                                 DRM_DEBUG_KMS("Failed to get proper latency. "
3690                                               "Disable CxSR\n");
3691                                 dev_priv->display.update_wm = NULL;
3692                         }
3693                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3694                 } else if (IS_GEN6(dev)) {
3695                         if (SNB_READ_WM0_LATENCY()) {
3696                                 dev_priv->display.update_wm = sandybridge_update_wm;
3697                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3698                         } else {
3699                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
3700                                               "Disable CxSR\n");
3701                                 dev_priv->display.update_wm = NULL;
3702                         }
3703                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3704                         dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3705                 } else if (IS_IVYBRIDGE(dev)) {
3706                         /* FIXME: detect B0+ stepping and use auto training */
3707                         if (SNB_READ_WM0_LATENCY()) {
3708                                 dev_priv->display.update_wm = sandybridge_update_wm;
3709                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3710                         } else {
3711                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
3712                                               "Disable CxSR\n");
3713                                 dev_priv->display.update_wm = NULL;
3714                         }
3715                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3716                         dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3717                 } else if (IS_HASWELL(dev)) {
3718                         if (SNB_READ_WM0_LATENCY()) {
3719                                 dev_priv->display.update_wm = sandybridge_update_wm;
3720                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3721                                 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
3722                         } else {
3723                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
3724                                               "Disable CxSR\n");
3725                                 dev_priv->display.update_wm = NULL;
3726                         }
3727                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3728                         dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3729                 } else
3730                         dev_priv->display.update_wm = NULL;
3731         } else if (IS_VALLEYVIEW(dev)) {
3732                 dev_priv->display.update_wm = valleyview_update_wm;
3733                 dev_priv->display.init_clock_gating =
3734                         valleyview_init_clock_gating;
3735                 dev_priv->display.force_wake_get = vlv_force_wake_get;
3736                 dev_priv->display.force_wake_put = vlv_force_wake_put;
3737         } else if (IS_PINEVIEW(dev)) {
3738                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
3739                                             dev_priv->is_ddr3,
3740                                             dev_priv->fsb_freq,
3741                                             dev_priv->mem_freq)) {
3742                         DRM_INFO("failed to find known CxSR latency "
3743                                  "(found ddr%s fsb freq %d, mem freq %d), "
3744                                  "disabling CxSR\n",
3745                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
3746                                  dev_priv->fsb_freq, dev_priv->mem_freq);
3747                         /* Disable CxSR and never update its watermark again */
3748                         pineview_disable_cxsr(dev);
3749                         dev_priv->display.update_wm = NULL;
3750                 } else
3751                         dev_priv->display.update_wm = pineview_update_wm;
3752                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3753         } else if (IS_G4X(dev)) {
3754                 dev_priv->display.update_wm = g4x_update_wm;
3755                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
3756         } else if (IS_GEN4(dev)) {
3757                 dev_priv->display.update_wm = i965_update_wm;
3758                 if (IS_CRESTLINE(dev))
3759                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
3760                 else if (IS_BROADWATER(dev))
3761                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
3762         } else if (IS_GEN3(dev)) {
3763                 dev_priv->display.update_wm = i9xx_update_wm;
3764                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
3765                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3766         } else if (IS_I865G(dev)) {
3767                 dev_priv->display.update_wm = i830_update_wm;
3768                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3769                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3770         } else if (IS_I85X(dev)) {
3771                 dev_priv->display.update_wm = i9xx_update_wm;
3772                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
3773                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3774         } else {
3775                 dev_priv->display.update_wm = i830_update_wm;
3776                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
3777                 if (IS_845G(dev))
3778                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
3779                 else
3780                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
3781         }
3782
3783         /* We attempt to init the necessary power wells early in the initialization
3784          * time, so the subsystems that expect power to be enabled can work.
3785          */
3786         intel_init_power_wells(dev);
3787 }
3788