]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/drm2/i915/intel_dp.c
MFV r337220: 8375 Kernel memory leak in nvpair code
[FreeBSD/FreeBSD.git] / sys / dev / drm2 / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <dev/drm2/drmP.h>
32 #include <dev/drm2/drm_crtc.h>
33 #include <dev/drm2/drm_crtc_helper.h>
34 #include <dev/drm2/drm_edid.h>
35 #include <dev/drm2/i915/intel_drv.h>
36 #include <dev/drm2/i915/i915_drm.h>
37 #include <dev/drm2/i915/i915_drv.h>
38
39 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
40
41 /**
42  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43  * @intel_dp: DP struct
44  *
45  * If a CPU or PCH DP output is attached to an eDP panel, this function
46  * will return true, and false otherwise.
47  */
48 static bool is_edp(struct intel_dp *intel_dp)
49 {
50         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53 }
54
55 /**
56  * is_pch_edp - is the port on the PCH and attached to an eDP panel?
57  * @intel_dp: DP struct
58  *
59  * Returns true if the given DP struct corresponds to a PCH DP port attached
60  * to an eDP panel, false otherwise.  Helpful for determining whether we
61  * may need FDI resources for a given DP output or not.
62  */
63 static bool is_pch_edp(struct intel_dp *intel_dp)
64 {
65         return intel_dp->is_pch_edp;
66 }
67
68 /**
69  * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
70  * @intel_dp: DP struct
71  *
72  * Returns true if the given DP struct corresponds to a CPU eDP port.
73  */
74 static bool is_cpu_edp(struct intel_dp *intel_dp)
75 {
76         return is_edp(intel_dp) && !is_pch_edp(intel_dp);
77 }
78
79 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
80 {
81         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
82
83         return intel_dig_port->base.base.dev;
84 }
85
86 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
87 {
88         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
89 }
90
91 /**
92  * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
93  * @encoder: DRM encoder
94  *
95  * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
96  * by intel_display.c.
97  */
98 bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
99 {
100         struct intel_dp *intel_dp;
101
102         if (!encoder)
103                 return false;
104
105         intel_dp = enc_to_intel_dp(encoder);
106
107         return is_pch_edp(intel_dp);
108 }
109
110 static void intel_dp_link_down(struct intel_dp *intel_dp);
111
112 void
113 intel_edp_link_config(struct intel_encoder *intel_encoder,
114                        int *lane_num, int *link_bw)
115 {
116         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
117
118         *lane_num = intel_dp->lane_count;
119         *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
120 }
121
122 int
123 intel_edp_target_clock(struct intel_encoder *intel_encoder,
124                        struct drm_display_mode *mode)
125 {
126         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
127         struct intel_connector *intel_connector = intel_dp->attached_connector;
128
129         if (intel_connector->panel.fixed_mode)
130                 return intel_connector->panel.fixed_mode->clock;
131         else
132                 return mode->clock;
133 }
134
135 static int
136 intel_dp_max_link_bw(struct intel_dp *intel_dp)
137 {
138         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
139
140         switch (max_link_bw) {
141         case DP_LINK_BW_1_62:
142         case DP_LINK_BW_2_7:
143                 break;
144         default:
145                 max_link_bw = DP_LINK_BW_1_62;
146                 break;
147         }
148         return max_link_bw;
149 }
150
151 static int
152 intel_dp_link_clock(uint8_t link_bw)
153 {
154         if (link_bw == DP_LINK_BW_2_7)
155                 return 270000;
156         else
157                 return 162000;
158 }
159
160 /*
161  * The units on the numbers in the next two are... bizarre.  Examples will
162  * make it clearer; this one parallels an example in the eDP spec.
163  *
164  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165  *
166  *     270000 * 1 * 8 / 10 == 216000
167  *
168  * The actual data capacity of that configuration is 2.16Gbit/s, so the
169  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
170  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171  * 119000.  At 18bpp that's 2142000 kilobits per second.
172  *
173  * Thus the strange-looking division by 10 in intel_dp_link_required, to
174  * get the result in decakilobits instead of kilobits.
175  */
176
177 static int
178 intel_dp_link_required(int pixel_clock, int bpp)
179 {
180         return (pixel_clock * bpp + 9) / 10;
181 }
182
183 static int
184 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185 {
186         return (max_link_clock * max_lanes * 8) / 10;
187 }
188
189 static bool
190 intel_dp_adjust_dithering(struct intel_dp *intel_dp,
191                           struct drm_display_mode *mode,
192                           bool adjust_mode)
193 {
194         int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
195         int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
196         int max_rate, mode_rate;
197
198         mode_rate = intel_dp_link_required(mode->clock, 24);
199         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
200
201         if (mode_rate > max_rate) {
202                 mode_rate = intel_dp_link_required(mode->clock, 18);
203                 if (mode_rate > max_rate)
204                         return false;
205
206                 if (adjust_mode)
207                         mode->private_flags
208                                 |= INTEL_MODE_DP_FORCE_6BPC;
209
210                 return true;
211         }
212
213         return true;
214 }
215
216 static int
217 intel_dp_mode_valid(struct drm_connector *connector,
218                     struct drm_display_mode *mode)
219 {
220         struct intel_dp *intel_dp = intel_attached_dp(connector);
221         struct intel_connector *intel_connector = to_intel_connector(connector);
222         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
223
224         if (is_edp(intel_dp) && fixed_mode) {
225                 if (mode->hdisplay > fixed_mode->hdisplay)
226                         return MODE_PANEL;
227
228                 if (mode->vdisplay > fixed_mode->vdisplay)
229                         return MODE_PANEL;
230         }
231
232         if (!intel_dp_adjust_dithering(intel_dp, mode, false))
233                 return MODE_CLOCK_HIGH;
234
235         if (mode->clock < 10000)
236                 return MODE_CLOCK_LOW;
237
238         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
239                 return MODE_H_ILLEGAL;
240
241         return MODE_OK;
242 }
243
244 static uint32_t
245 pack_aux(uint8_t *src, int src_bytes)
246 {
247         int     i;
248         uint32_t v = 0;
249
250         if (src_bytes > 4)
251                 src_bytes = 4;
252         for (i = 0; i < src_bytes; i++)
253                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
254         return v;
255 }
256
257 static void
258 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
259 {
260         int i;
261         if (dst_bytes > 4)
262                 dst_bytes = 4;
263         for (i = 0; i < dst_bytes; i++)
264                 dst[i] = src >> ((3-i) * 8);
265 }
266
267 /* hrawclock is 1/4 the FSB frequency */
268 static int
269 intel_hrawclk(struct drm_device *dev)
270 {
271         struct drm_i915_private *dev_priv = dev->dev_private;
272         uint32_t clkcfg;
273
274         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
275         if (IS_VALLEYVIEW(dev))
276                 return 200;
277
278         clkcfg = I915_READ(CLKCFG);
279         switch (clkcfg & CLKCFG_FSB_MASK) {
280         case CLKCFG_FSB_400:
281                 return 100;
282         case CLKCFG_FSB_533:
283                 return 133;
284         case CLKCFG_FSB_667:
285                 return 166;
286         case CLKCFG_FSB_800:
287                 return 200;
288         case CLKCFG_FSB_1067:
289                 return 266;
290         case CLKCFG_FSB_1333:
291                 return 333;
292         /* these two are just a guess; one of them might be right */
293         case CLKCFG_FSB_1600:
294         case CLKCFG_FSB_1600_ALT:
295                 return 400;
296         default:
297                 return 133;
298         }
299 }
300
301 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
302 {
303         struct drm_device *dev = intel_dp_to_dev(intel_dp);
304         struct drm_i915_private *dev_priv = dev->dev_private;
305
306         return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
307 }
308
309 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
310 {
311         struct drm_device *dev = intel_dp_to_dev(intel_dp);
312         struct drm_i915_private *dev_priv = dev->dev_private;
313
314         return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
315 }
316
317 static void
318 intel_dp_check_edp(struct intel_dp *intel_dp)
319 {
320         struct drm_device *dev = intel_dp_to_dev(intel_dp);
321         struct drm_i915_private *dev_priv = dev->dev_private;
322
323         if (!is_edp(intel_dp))
324                 return;
325         if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
326                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
327                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
328                               I915_READ(PCH_PP_STATUS),
329                               I915_READ(PCH_PP_CONTROL));
330         }
331 }
332
333 static int
334 intel_dp_aux_ch(struct intel_dp *intel_dp,
335                 uint8_t *send, int send_bytes,
336                 uint8_t *recv, int recv_size)
337 {
338         uint32_t output_reg = intel_dp->output_reg;
339         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
340         struct drm_device *dev = intel_dig_port->base.base.dev;
341         struct drm_i915_private *dev_priv = dev->dev_private;
342         uint32_t ch_ctl = output_reg + 0x10;
343         uint32_t ch_data = ch_ctl + 4;
344         int i;
345         int recv_bytes;
346         uint32_t status;
347         uint32_t aux_clock_divider;
348         int try, precharge;
349
350         if (IS_HASWELL(dev)) {
351                 switch (intel_dig_port->port) {
352                 case PORT_A:
353                         ch_ctl = DPA_AUX_CH_CTL;
354                         ch_data = DPA_AUX_CH_DATA1;
355                         break;
356                 case PORT_B:
357                         ch_ctl = PCH_DPB_AUX_CH_CTL;
358                         ch_data = PCH_DPB_AUX_CH_DATA1;
359                         break;
360                 case PORT_C:
361                         ch_ctl = PCH_DPC_AUX_CH_CTL;
362                         ch_data = PCH_DPC_AUX_CH_DATA1;
363                         break;
364                 case PORT_D:
365                         ch_ctl = PCH_DPD_AUX_CH_CTL;
366                         ch_data = PCH_DPD_AUX_CH_DATA1;
367                         break;
368                 default:
369                         BUG();
370                 }
371         }
372
373         intel_dp_check_edp(intel_dp);
374         /* The clock divider is based off the hrawclk,
375          * and would like to run at 2MHz. So, take the
376          * hrawclk value and divide by 2 and use that
377          *
378          * Note that PCH attached eDP panels should use a 125MHz input
379          * clock divider.
380          */
381         if (is_cpu_edp(intel_dp)) {
382                 if (IS_HASWELL(dev))
383                         aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
384                 else if (IS_VALLEYVIEW(dev))
385                         aux_clock_divider = 100;
386                 else if (IS_GEN6(dev) || IS_GEN7(dev))
387                         aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
388                 else
389                         aux_clock_divider = 225; /* eDP input clock at 450Mhz */
390         } else if (HAS_PCH_SPLIT(dev))
391                 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
392         else
393                 aux_clock_divider = intel_hrawclk(dev) / 2;
394
395         if (IS_GEN6(dev))
396                 precharge = 3;
397         else
398                 precharge = 5;
399
400         /* Try to wait for any previous AUX channel activity */
401         for (try = 0; try < 3; try++) {
402                 status = I915_READ(ch_ctl);
403                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
404                         break;
405                 DRM_MSLEEP(1);
406         }
407
408         if (try == 3) {
409                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
410                      I915_READ(ch_ctl));
411                 return -EBUSY;
412         }
413
414         /* Must try at least 3 times according to DP spec */
415         for (try = 0; try < 5; try++) {
416                 /* Load the send data into the aux channel data registers */
417                 for (i = 0; i < send_bytes; i += 4)
418                         I915_WRITE(ch_data + i,
419                                    pack_aux(send + i, send_bytes - i));
420
421                 /* Send the command and wait for it to complete */
422                 I915_WRITE(ch_ctl,
423                            DP_AUX_CH_CTL_SEND_BUSY |
424                            DP_AUX_CH_CTL_TIME_OUT_400us |
425                            (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
426                            (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
427                            (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
428                            DP_AUX_CH_CTL_DONE |
429                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
430                            DP_AUX_CH_CTL_RECEIVE_ERROR);
431                 for (;;) {
432                         status = I915_READ(ch_ctl);
433                         if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
434                                 break;
435                         udelay(100);
436                 }
437
438                 /* Clear done status and any errors */
439                 I915_WRITE(ch_ctl,
440                            status |
441                            DP_AUX_CH_CTL_DONE |
442                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
443                            DP_AUX_CH_CTL_RECEIVE_ERROR);
444
445                 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
446                               DP_AUX_CH_CTL_RECEIVE_ERROR))
447                         continue;
448                 if (status & DP_AUX_CH_CTL_DONE)
449                         break;
450         }
451
452         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
453                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
454                 return -EBUSY;
455         }
456
457         /* Check for timeout or receive error.
458          * Timeouts occur when the sink is not connected
459          */
460         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
461                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
462                 return -EIO;
463         }
464
465         /* Timeouts occur when the device isn't connected, so they're
466          * "normal" -- don't fill the kernel log with these */
467         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
468                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
469                 return -ETIMEDOUT;
470         }
471
472         /* Unload any bytes sent back from the other side */
473         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
474                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
475         if (recv_bytes > recv_size)
476                 recv_bytes = recv_size;
477
478         for (i = 0; i < recv_bytes; i += 4)
479                 unpack_aux(I915_READ(ch_data + i),
480                            recv + i, recv_bytes - i);
481
482         return recv_bytes;
483 }
484
485 /* Write data to the aux channel in native mode */
486 static int
487 intel_dp_aux_native_write(struct intel_dp *intel_dp,
488                           uint16_t address, uint8_t *send, int send_bytes)
489 {
490         int ret;
491         uint8_t msg[20];
492         int msg_bytes;
493         uint8_t ack;
494
495         intel_dp_check_edp(intel_dp);
496         if (send_bytes > 16)
497                 return -1;
498         msg[0] = AUX_NATIVE_WRITE << 4;
499         msg[1] = address >> 8;
500         msg[2] = address & 0xff;
501         msg[3] = send_bytes - 1;
502         memcpy(&msg[4], send, send_bytes);
503         msg_bytes = send_bytes + 4;
504         for (;;) {
505                 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
506                 if (ret < 0)
507                         return ret;
508                 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
509                         break;
510                 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
511                         udelay(100);
512                 else
513                         return -EIO;
514         }
515         return send_bytes;
516 }
517
518 /* Write a single byte to the aux channel in native mode */
519 static int
520 intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
521                             uint16_t address, uint8_t byte)
522 {
523         return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
524 }
525
526 /* read bytes from a native aux channel */
527 static int
528 intel_dp_aux_native_read(struct intel_dp *intel_dp,
529                          uint16_t address, uint8_t *recv, int recv_bytes)
530 {
531         uint8_t msg[4];
532         int msg_bytes;
533         uint8_t reply[20];
534         int reply_bytes;
535         uint8_t ack;
536         int ret;
537
538         intel_dp_check_edp(intel_dp);
539         msg[0] = AUX_NATIVE_READ << 4;
540         msg[1] = address >> 8;
541         msg[2] = address & 0xff;
542         msg[3] = recv_bytes - 1;
543
544         msg_bytes = 4;
545         reply_bytes = recv_bytes + 1;
546
547         for (;;) {
548                 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
549                                       reply, reply_bytes);
550                 if (ret == 0)
551                         return -EPROTO;
552                 if (ret < 0)
553                         return ret;
554                 ack = reply[0];
555                 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
556                         memcpy(recv, reply + 1, ret - 1);
557                         return ret - 1;
558                 }
559                 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
560                         udelay(100);
561                 else
562                         return -EIO;
563         }
564 }
565
566 static int
567 intel_dp_i2c_aux_ch(device_t adapter, int mode,
568                     uint8_t write_byte, uint8_t *read_byte)
569 {
570         struct iic_dp_aux_data *algo_data = device_get_softc(adapter);
571         struct intel_dp *intel_dp = algo_data->priv;
572         uint16_t address = algo_data->address;
573         uint8_t msg[5];
574         uint8_t reply[2];
575         unsigned retry;
576         int msg_bytes;
577         int reply_bytes;
578         int ret;
579
580         intel_dp_check_edp(intel_dp);
581         /* Set up the command byte */
582         if (mode & MODE_I2C_READ)
583                 msg[0] = AUX_I2C_READ << 4;
584         else
585                 msg[0] = AUX_I2C_WRITE << 4;
586
587         if (!(mode & MODE_I2C_STOP))
588                 msg[0] |= AUX_I2C_MOT << 4;
589
590         msg[1] = address >> 8;
591         msg[2] = address;
592
593         switch (mode) {
594         case MODE_I2C_WRITE:
595                 msg[3] = 0;
596                 msg[4] = write_byte;
597                 msg_bytes = 5;
598                 reply_bytes = 1;
599                 break;
600         case MODE_I2C_READ:
601                 msg[3] = 0;
602                 msg_bytes = 4;
603                 reply_bytes = 2;
604                 break;
605         default:
606                 msg_bytes = 3;
607                 reply_bytes = 1;
608                 break;
609         }
610
611         for (retry = 0; retry < 5; retry++) {
612                 ret = intel_dp_aux_ch(intel_dp,
613                                       msg, msg_bytes,
614                                       reply, reply_bytes);
615                 if (ret < 0) {
616                         DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
617                         return ret;
618                 }
619
620                 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
621                 case AUX_NATIVE_REPLY_ACK:
622                         /* I2C-over-AUX Reply field is only valid
623                          * when paired with AUX ACK.
624                          */
625                         break;
626                 case AUX_NATIVE_REPLY_NACK:
627                         DRM_DEBUG_KMS("aux_ch native nack\n");
628                         return -EREMOTEIO;
629                 case AUX_NATIVE_REPLY_DEFER:
630                         udelay(100);
631                         continue;
632                 default:
633                         DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
634                                   reply[0]);
635                         return -EREMOTEIO;
636                 }
637
638                 switch (reply[0] & AUX_I2C_REPLY_MASK) {
639                 case AUX_I2C_REPLY_ACK:
640                         if (mode == MODE_I2C_READ) {
641                                 *read_byte = reply[1];
642                         }
643                         return reply_bytes - 1;
644                 case AUX_I2C_REPLY_NACK:
645                         DRM_DEBUG_KMS("aux_i2c nack\n");
646                         return -EREMOTEIO;
647                 case AUX_I2C_REPLY_DEFER:
648                         DRM_DEBUG_KMS("aux_i2c defer\n");
649                         udelay(100);
650                         break;
651                 default:
652                         DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
653                         return -EREMOTEIO;
654                 }
655         }
656
657         DRM_ERROR("too many retries, giving up\n");
658         return -EREMOTEIO;
659 }
660
661 static int
662 intel_dp_i2c_init(struct intel_dp *intel_dp,
663                   struct intel_connector *intel_connector, const char *name)
664 {
665         int     ret;
666
667         DRM_DEBUG_KMS("i2c_init %s\n", name);
668
669         ironlake_edp_panel_vdd_on(intel_dp);
670         ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
671             intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
672             &intel_dp->adapter);
673         ironlake_edp_panel_vdd_off(intel_dp, false);
674         return ret;
675 }
676
677 bool
678 intel_dp_mode_fixup(struct drm_encoder *encoder,
679                     const struct drm_display_mode *mode,
680                     struct drm_display_mode *adjusted_mode)
681 {
682         struct drm_device *dev = encoder->dev;
683         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
684         struct intel_connector *intel_connector = intel_dp->attached_connector;
685         int lane_count, clock;
686         int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
687         int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
688         int bpp, mode_rate;
689         static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
690
691         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
692                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
693                                        adjusted_mode);
694                 intel_pch_panel_fitting(dev,
695                                         intel_connector->panel.fitting_mode,
696                                         mode, adjusted_mode);
697         }
698
699         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
700                 return false;
701
702         DRM_DEBUG_KMS("DP link computation with max lane count %i "
703                       "max bw %02x pixel clock %iKHz\n",
704                       max_lane_count, bws[max_clock], adjusted_mode->clock);
705
706         if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
707                 return false;
708
709         bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
710         mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
711
712         for (clock = 0; clock <= max_clock; clock++) {
713                 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
714                         int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
715
716                         if (mode_rate <= link_avail) {
717                                 intel_dp->link_bw = bws[clock];
718                                 intel_dp->lane_count = lane_count;
719                                 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
720                                 DRM_DEBUG_KMS("DP link bw %02x lane "
721                                                 "count %d clock %d bpp %d\n",
722                                        intel_dp->link_bw, intel_dp->lane_count,
723                                        adjusted_mode->clock, bpp);
724                                 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
725                                               mode_rate, link_avail);
726                                 return true;
727                         }
728                 }
729         }
730
731         return false;
732 }
733
734 struct intel_dp_m_n {
735         uint32_t        tu;
736         uint32_t        gmch_m;
737         uint32_t        gmch_n;
738         uint32_t        link_m;
739         uint32_t        link_n;
740 };
741
742 static void
743 intel_reduce_ratio(uint32_t *num, uint32_t *den)
744 {
745         while (*num > 0xffffff || *den > 0xffffff) {
746                 *num >>= 1;
747                 *den >>= 1;
748         }
749 }
750
751 static void
752 intel_dp_compute_m_n(int bpp,
753                      int nlanes,
754                      int pixel_clock,
755                      int link_clock,
756                      struct intel_dp_m_n *m_n)
757 {
758         m_n->tu = 64;
759         m_n->gmch_m = (pixel_clock * bpp) >> 3;
760         m_n->gmch_n = link_clock * nlanes;
761         intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
762         m_n->link_m = pixel_clock;
763         m_n->link_n = link_clock;
764         intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
765 }
766
767 void
768 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
769                  struct drm_display_mode *adjusted_mode)
770 {
771         struct drm_device *dev = crtc->dev;
772         struct intel_encoder *intel_encoder;
773         struct intel_dp *intel_dp;
774         struct drm_i915_private *dev_priv = dev->dev_private;
775         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
776         int lane_count = 4;
777         struct intel_dp_m_n m_n;
778         int pipe = intel_crtc->pipe;
779         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
780         int target_clock;
781
782         /*
783          * Find the lane count in the intel_encoder private
784          */
785         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
786                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
787
788                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
789                     intel_encoder->type == INTEL_OUTPUT_EDP)
790                 {
791                         lane_count = intel_dp->lane_count;
792                         break;
793                 }
794         }
795
796         target_clock = mode->clock;
797         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
798                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
799                         target_clock = intel_edp_target_clock(intel_encoder,
800                                                               mode);
801                         break;
802                 }
803         }
804
805         /*
806          * Compute the GMCH and Link ratios. The '3' here is
807          * the number of bytes_per_pixel post-LUT, which we always
808          * set up for 8-bits of R/G/B, or 3 bytes total.
809          */
810         intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
811                              target_clock, adjusted_mode->clock, &m_n);
812
813         if (IS_HASWELL(dev)) {
814                 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
815                            TU_SIZE(m_n.tu) | m_n.gmch_m);
816                 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
817                 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
818                 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
819         } else if (HAS_PCH_SPLIT(dev)) {
820                 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
821                 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
822                 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
823                 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
824         } else if (IS_VALLEYVIEW(dev)) {
825                 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
826                 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
827                 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
828                 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
829         } else {
830                 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
831                            TU_SIZE(m_n.tu) | m_n.gmch_m);
832                 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
833                 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
834                 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
835         }
836 }
837
838 void intel_dp_init_link_config(struct intel_dp *intel_dp)
839 {
840         memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
841         intel_dp->link_configuration[0] = intel_dp->link_bw;
842         intel_dp->link_configuration[1] = intel_dp->lane_count;
843         intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
844         /*
845          * Check for DPCD version > 1.1 and enhanced framing support
846          */
847         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
848             (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
849                 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
850         }
851 }
852
853 static void
854 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
855                   struct drm_display_mode *adjusted_mode)
856 {
857         struct drm_device *dev = encoder->dev;
858         struct drm_i915_private *dev_priv = dev->dev_private;
859         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
860         struct drm_crtc *crtc = encoder->crtc;
861         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
862
863         /*
864          * There are four kinds of DP registers:
865          *
866          *      IBX PCH
867          *      SNB CPU
868          *      IVB CPU
869          *      CPT PCH
870          *
871          * IBX PCH and CPU are the same for almost everything,
872          * except that the CPU DP PLL is configured in this
873          * register
874          *
875          * CPT PCH is quite different, having many bits moved
876          * to the TRANS_DP_CTL register instead. That
877          * configuration happens (oddly) in ironlake_pch_enable
878          */
879
880         /* Preserve the BIOS-computed detected bit. This is
881          * supposed to be read-only.
882          */
883         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
884
885         /* Handle DP bits in common between all three register formats */
886         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
887
888         switch (intel_dp->lane_count) {
889         case 1:
890                 intel_dp->DP |= DP_PORT_WIDTH_1;
891                 break;
892         case 2:
893                 intel_dp->DP |= DP_PORT_WIDTH_2;
894                 break;
895         case 4:
896                 intel_dp->DP |= DP_PORT_WIDTH_4;
897                 break;
898         }
899         if (intel_dp->has_audio) {
900                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
901                                  pipe_name(intel_crtc->pipe));
902                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
903                 intel_write_eld(encoder, adjusted_mode);
904         }
905
906         intel_dp_init_link_config(intel_dp);
907
908         /* Split out the IBX/CPU vs CPT settings */
909
910         if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
911                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
912                         intel_dp->DP |= DP_SYNC_HS_HIGH;
913                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
914                         intel_dp->DP |= DP_SYNC_VS_HIGH;
915                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
916
917                 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
918                         intel_dp->DP |= DP_ENHANCED_FRAMING;
919
920                 intel_dp->DP |= intel_crtc->pipe << 29;
921
922                 /* don't miss out required setting for eDP */
923                 if (adjusted_mode->clock < 200000)
924                         intel_dp->DP |= DP_PLL_FREQ_160MHZ;
925                 else
926                         intel_dp->DP |= DP_PLL_FREQ_270MHZ;
927         } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
928                 intel_dp->DP |= intel_dp->color_range;
929
930                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
931                         intel_dp->DP |= DP_SYNC_HS_HIGH;
932                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
933                         intel_dp->DP |= DP_SYNC_VS_HIGH;
934                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
935
936                 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
937                         intel_dp->DP |= DP_ENHANCED_FRAMING;
938
939                 if (intel_crtc->pipe == 1)
940                         intel_dp->DP |= DP_PIPEB_SELECT;
941
942                 if (is_cpu_edp(intel_dp)) {
943                         /* don't miss out required setting for eDP */
944                         if (adjusted_mode->clock < 200000)
945                                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
946                         else
947                                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
948                 }
949         } else {
950                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
951         }
952 }
953
954 #define IDLE_ON_MASK            (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
955 #define IDLE_ON_VALUE           (PP_ON | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
956
957 #define IDLE_OFF_MASK           (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
958 #define IDLE_OFF_VALUE          (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
959
960 #define IDLE_CYCLE_MASK         (PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
961 #define IDLE_CYCLE_VALUE        (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
962
963 static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
964                                        u32 mask,
965                                        u32 value)
966 {
967         struct drm_device *dev = intel_dp_to_dev(intel_dp);
968         struct drm_i915_private *dev_priv = dev->dev_private;
969
970         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
971                       mask, value,
972                       I915_READ(PCH_PP_STATUS),
973                       I915_READ(PCH_PP_CONTROL));
974
975         if (_intel_wait_for(dev, (I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10, "915iwp")) {
976                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
977                           I915_READ(PCH_PP_STATUS),
978                           I915_READ(PCH_PP_CONTROL));
979         }
980 }
981
982 static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
983 {
984         DRM_DEBUG_KMS("Wait for panel power on\n");
985         ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
986 }
987
988 static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
989 {
990         DRM_DEBUG_KMS("Wait for panel power off time\n");
991         ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
992 }
993
994 static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
995 {
996         DRM_DEBUG_KMS("Wait for panel power cycle\n");
997         ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
998 }
999
1000
1001 /* Read the current pp_control value, unlocking the register if it
1002  * is locked
1003  */
1004
1005 static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
1006 {
1007         u32     control = I915_READ(PCH_PP_CONTROL);
1008
1009         control &= ~PANEL_UNLOCK_MASK;
1010         control |= PANEL_UNLOCK_REGS;
1011         return control;
1012 }
1013
1014 void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1015 {
1016         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1017         struct drm_i915_private *dev_priv = dev->dev_private;
1018         u32 pp;
1019
1020         if (!is_edp(intel_dp))
1021                 return;
1022         DRM_DEBUG_KMS("Turn eDP VDD on\n");
1023
1024         WARN(intel_dp->want_panel_vdd,
1025              "eDP VDD already requested on\n");
1026
1027         intel_dp->want_panel_vdd = true;
1028
1029         if (ironlake_edp_have_panel_vdd(intel_dp)) {
1030                 DRM_DEBUG_KMS("eDP VDD already on\n");
1031                 return;
1032         }
1033
1034         if (!ironlake_edp_have_panel_power(intel_dp))
1035                 ironlake_wait_panel_power_cycle(intel_dp);
1036
1037         pp = ironlake_get_pp_control(dev_priv);
1038         pp |= EDP_FORCE_VDD;
1039         I915_WRITE(PCH_PP_CONTROL, pp);
1040         POSTING_READ(PCH_PP_CONTROL);
1041         DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1042                       I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1043
1044         /*
1045          * If the panel wasn't on, delay before accessing aux channel
1046          */
1047         if (!ironlake_edp_have_panel_power(intel_dp)) {
1048                 DRM_DEBUG_KMS("eDP was not running\n");
1049                 DRM_MSLEEP(intel_dp->panel_power_up_delay);
1050         }
1051 }
1052
1053 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1054 {
1055         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1056         struct drm_i915_private *dev_priv = dev->dev_private;
1057         u32 pp;
1058
1059         if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1060                 pp = ironlake_get_pp_control(dev_priv);
1061                 pp &= ~EDP_FORCE_VDD;
1062                 I915_WRITE(PCH_PP_CONTROL, pp);
1063                 POSTING_READ(PCH_PP_CONTROL);
1064
1065                 /* Make sure sequencer is idle before allowing subsequent activity */
1066                 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1067                               I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1068
1069                 DRM_MSLEEP(intel_dp->panel_power_down_delay);
1070         }
1071 }
1072
1073 static void ironlake_panel_vdd_work(void *arg, int pending __unused)
1074 {
1075         struct intel_dp *intel_dp = arg;
1076         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1077
1078         sx_xlock(&dev->mode_config.mutex);
1079         ironlake_panel_vdd_off_sync(intel_dp);
1080         sx_xunlock(&dev->mode_config.mutex);
1081 }
1082
1083 void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1084 {
1085         if (!is_edp(intel_dp))
1086                 return;
1087
1088         DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1089         WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1090
1091         intel_dp->want_panel_vdd = false;
1092
1093         if (sync) {
1094                 ironlake_panel_vdd_off_sync(intel_dp);
1095         } else {
1096                 /*
1097                  * Queue the timer to fire a long
1098                  * time from now (relative to the power down delay)
1099                  * to keep the panel power up across a sequence of operations
1100                  */
1101                 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1102                 struct drm_device *dev = intel_dig_port->base.base.dev;
1103                 struct drm_i915_private *dev_priv = dev->dev_private;
1104                 taskqueue_enqueue_timeout(dev_priv->wq,
1105                     &intel_dp->panel_vdd_work,
1106                     msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1107         }
1108 }
1109
1110 void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1111 {
1112         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1113         struct drm_i915_private *dev_priv = dev->dev_private;
1114         u32 pp;
1115
1116         if (!is_edp(intel_dp))
1117                 return;
1118
1119         DRM_DEBUG_KMS("Turn eDP power on\n");
1120
1121         if (ironlake_edp_have_panel_power(intel_dp)) {
1122                 DRM_DEBUG_KMS("eDP power already on\n");
1123                 return;
1124         }
1125
1126         ironlake_wait_panel_power_cycle(intel_dp);
1127
1128         pp = ironlake_get_pp_control(dev_priv);
1129         if (IS_GEN5(dev)) {
1130                 /* ILK workaround: disable reset around power sequence */
1131                 pp &= ~PANEL_POWER_RESET;
1132                 I915_WRITE(PCH_PP_CONTROL, pp);
1133                 POSTING_READ(PCH_PP_CONTROL);
1134         }
1135
1136         pp |= POWER_TARGET_ON;
1137         if (!IS_GEN5(dev))
1138                 pp |= PANEL_POWER_RESET;
1139
1140         I915_WRITE(PCH_PP_CONTROL, pp);
1141         POSTING_READ(PCH_PP_CONTROL);
1142
1143         ironlake_wait_panel_on(intel_dp);
1144
1145         if (IS_GEN5(dev)) {
1146                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1147                 I915_WRITE(PCH_PP_CONTROL, pp);
1148                 POSTING_READ(PCH_PP_CONTROL);
1149         }
1150 }
1151
1152 void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1153 {
1154         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1155         struct drm_i915_private *dev_priv = dev->dev_private;
1156         u32 pp;
1157
1158         if (!is_edp(intel_dp))
1159                 return;
1160
1161         DRM_DEBUG_KMS("Turn eDP power off\n");
1162
1163         WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1164
1165         pp = ironlake_get_pp_control(dev_priv);
1166         /* We need to switch off panel power _and_ force vdd, for otherwise some
1167          * panels get very unhappy and cease to work. */
1168         pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1169         I915_WRITE(PCH_PP_CONTROL, pp);
1170         POSTING_READ(PCH_PP_CONTROL);
1171
1172         intel_dp->want_panel_vdd = false;
1173
1174         ironlake_wait_panel_off(intel_dp);
1175 }
1176
1177 void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1178 {
1179         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1180         struct drm_device *dev = intel_dig_port->base.base.dev;
1181         struct drm_i915_private *dev_priv = dev->dev_private;
1182         int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1183         u32 pp;
1184
1185         if (!is_edp(intel_dp))
1186                 return;
1187
1188         DRM_DEBUG_KMS("\n");
1189         /*
1190          * If we enable the backlight right away following a panel power
1191          * on, we may see slight flicker as the panel syncs with the eDP
1192          * link.  So delay a bit to make sure the image is solid before
1193          * allowing it to appear.
1194          */
1195         DRM_MSLEEP(intel_dp->backlight_on_delay);
1196         pp = ironlake_get_pp_control(dev_priv);
1197         pp |= EDP_BLC_ENABLE;
1198         I915_WRITE(PCH_PP_CONTROL, pp);
1199         POSTING_READ(PCH_PP_CONTROL);
1200
1201         intel_panel_enable_backlight(dev, pipe);
1202 }
1203
1204 void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1205 {
1206         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1207         struct drm_i915_private *dev_priv = dev->dev_private;
1208         u32 pp;
1209
1210         if (!is_edp(intel_dp))
1211                 return;
1212
1213         intel_panel_disable_backlight(dev);
1214
1215         DRM_DEBUG_KMS("\n");
1216         pp = ironlake_get_pp_control(dev_priv);
1217         pp &= ~EDP_BLC_ENABLE;
1218         I915_WRITE(PCH_PP_CONTROL, pp);
1219         POSTING_READ(PCH_PP_CONTROL);
1220         DRM_MSLEEP(intel_dp->backlight_off_delay);
1221 }
1222
1223 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1224 {
1225         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1226         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1227         struct drm_device *dev = crtc->dev;
1228         struct drm_i915_private *dev_priv = dev->dev_private;
1229         u32 dpa_ctl;
1230
1231         assert_pipe_disabled(dev_priv,
1232                              to_intel_crtc(crtc)->pipe);
1233
1234         DRM_DEBUG_KMS("\n");
1235         dpa_ctl = I915_READ(DP_A);
1236         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1237         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1238
1239         /* We don't adjust intel_dp->DP while tearing down the link, to
1240          * facilitate link retraining (e.g. after hotplug). Hence clear all
1241          * enable bits here to ensure that we don't enable too much. */
1242         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1243         intel_dp->DP |= DP_PLL_ENABLE;
1244         I915_WRITE(DP_A, intel_dp->DP);
1245         POSTING_READ(DP_A);
1246         udelay(200);
1247 }
1248
1249 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1250 {
1251         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1252         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1253         struct drm_device *dev = crtc->dev;
1254         struct drm_i915_private *dev_priv = dev->dev_private;
1255         u32 dpa_ctl;
1256
1257         assert_pipe_disabled(dev_priv,
1258                              to_intel_crtc(crtc)->pipe);
1259
1260         dpa_ctl = I915_READ(DP_A);
1261         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1262              "dp pll off, should be on\n");
1263         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1264
1265         /* We can't rely on the value tracked for the DP register in
1266          * intel_dp->DP because link_down must not change that (otherwise link
1267          * re-training will fail. */
1268         dpa_ctl &= ~DP_PLL_ENABLE;
1269         I915_WRITE(DP_A, dpa_ctl);
1270         POSTING_READ(DP_A);
1271         udelay(200);
1272 }
1273
1274 /* If the sink supports it, try to set the power state appropriately */
1275 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1276 {
1277         int ret, i;
1278
1279         /* Should have a valid DPCD by this point */
1280         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1281                 return;
1282
1283         if (mode != DRM_MODE_DPMS_ON) {
1284                 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1285                                                   DP_SET_POWER_D3);
1286                 if (ret != 1)
1287                         DRM_DEBUG_DRIVER("failed to write sink power state\n");
1288         } else {
1289                 /*
1290                  * When turning on, we need to retry for 1ms to give the sink
1291                  * time to wake up.
1292                  */
1293                 for (i = 0; i < 3; i++) {
1294                         ret = intel_dp_aux_native_write_1(intel_dp,
1295                                                           DP_SET_POWER,
1296                                                           DP_SET_POWER_D0);
1297                         if (ret == 1)
1298                                 break;
1299                         DRM_MSLEEP(1);
1300                 }
1301         }
1302 }
1303
1304 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1305                                   enum pipe *pipe)
1306 {
1307         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1308         struct drm_device *dev = encoder->base.dev;
1309         struct drm_i915_private *dev_priv = dev->dev_private;
1310         u32 tmp = I915_READ(intel_dp->output_reg);
1311
1312         if (!(tmp & DP_PORT_EN))
1313                 return false;
1314
1315         if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1316                 *pipe = PORT_TO_PIPE_CPT(tmp);
1317         } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1318                 *pipe = PORT_TO_PIPE(tmp);
1319         } else {
1320                 u32 trans_sel;
1321                 u32 trans_dp;
1322                 int i;
1323
1324                 switch (intel_dp->output_reg) {
1325                 case PCH_DP_B:
1326                         trans_sel = TRANS_DP_PORT_SEL_B;
1327                         break;
1328                 case PCH_DP_C:
1329                         trans_sel = TRANS_DP_PORT_SEL_C;
1330                         break;
1331                 case PCH_DP_D:
1332                         trans_sel = TRANS_DP_PORT_SEL_D;
1333                         break;
1334                 default:
1335                         return true;
1336                 }
1337
1338                 for_each_pipe(i) {
1339                         trans_dp = I915_READ(TRANS_DP_CTL(i));
1340                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1341                                 *pipe = i;
1342                                 return true;
1343                         }
1344                 }
1345
1346                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1347                               intel_dp->output_reg);
1348         }
1349
1350         return true;
1351 }
1352
1353 static void intel_disable_dp(struct intel_encoder *encoder)
1354 {
1355         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1356
1357         /* Make sure the panel is off before trying to change the mode. But also
1358          * ensure that we have vdd while we switch off the panel. */
1359         ironlake_edp_panel_vdd_on(intel_dp);
1360         ironlake_edp_backlight_off(intel_dp);
1361         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1362         ironlake_edp_panel_off(intel_dp);
1363
1364         /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1365         if (!is_cpu_edp(intel_dp))
1366                 intel_dp_link_down(intel_dp);
1367 }
1368
1369 static void intel_post_disable_dp(struct intel_encoder *encoder)
1370 {
1371         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1372
1373         if (is_cpu_edp(intel_dp)) {
1374                 intel_dp_link_down(intel_dp);
1375                 ironlake_edp_pll_off(intel_dp);
1376         }
1377 }
1378
1379 static void intel_enable_dp(struct intel_encoder *encoder)
1380 {
1381         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1382         struct drm_device *dev = encoder->base.dev;
1383         struct drm_i915_private *dev_priv = dev->dev_private;
1384         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1385
1386         if (WARN_ON(dp_reg & DP_PORT_EN))
1387                 return;
1388
1389         ironlake_edp_panel_vdd_on(intel_dp);
1390         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1391         intel_dp_start_link_train(intel_dp);
1392         ironlake_edp_panel_on(intel_dp);
1393         ironlake_edp_panel_vdd_off(intel_dp, true);
1394         intel_dp_complete_link_train(intel_dp);
1395         ironlake_edp_backlight_on(intel_dp);
1396 }
1397
1398 static void intel_pre_enable_dp(struct intel_encoder *encoder)
1399 {
1400         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1401
1402         if (is_cpu_edp(intel_dp))
1403                 ironlake_edp_pll_on(intel_dp);
1404 }
1405
1406 /*
1407  * Native read with retry for link status and receiver capability reads for
1408  * cases where the sink may still be asleep.
1409  */
1410 static bool
1411 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1412                                uint8_t *recv, int recv_bytes)
1413 {
1414         int ret, i;
1415
1416         /*
1417          * Sinks are *supposed* to come up within 1ms from an off state,
1418          * but we're also supposed to retry 3 times per the spec.
1419          */
1420         for (i = 0; i < 3; i++) {
1421                 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1422                                                recv_bytes);
1423                 if (ret == recv_bytes)
1424                         return true;
1425                 DRM_MSLEEP(1);
1426         }
1427
1428         return false;
1429 }
1430
1431 /*
1432  * Fetch AUX CH registers 0x202 - 0x207 which contain
1433  * link status information
1434  */
1435 static bool
1436 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1437 {
1438         return intel_dp_aux_native_read_retry(intel_dp,
1439                                               DP_LANE0_1_STATUS,
1440                                               link_status,
1441                                               DP_LINK_STATUS_SIZE);
1442 }
1443
1444 #if 0
1445 static char     *voltage_names[] = {
1446         "0.4V", "0.6V", "0.8V", "1.2V"
1447 };
1448 static char     *pre_emph_names[] = {
1449         "0dB", "3.5dB", "6dB", "9.5dB"
1450 };
1451 static char     *link_train_names[] = {
1452         "pattern 1", "pattern 2", "idle", "off"
1453 };
1454 #endif
1455
1456 /*
1457  * These are source-specific values; current Intel hardware supports
1458  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1459  */
1460
1461 static uint8_t
1462 intel_dp_voltage_max(struct intel_dp *intel_dp)
1463 {
1464         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1465
1466         if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1467                 return DP_TRAIN_VOLTAGE_SWING_800;
1468         else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1469                 return DP_TRAIN_VOLTAGE_SWING_1200;
1470         else
1471                 return DP_TRAIN_VOLTAGE_SWING_800;
1472 }
1473
1474 static uint8_t
1475 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1476 {
1477         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1478
1479         if (IS_HASWELL(dev)) {
1480                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1481                 case DP_TRAIN_VOLTAGE_SWING_400:
1482                         return DP_TRAIN_PRE_EMPHASIS_9_5;
1483                 case DP_TRAIN_VOLTAGE_SWING_600:
1484                         return DP_TRAIN_PRE_EMPHASIS_6;
1485                 case DP_TRAIN_VOLTAGE_SWING_800:
1486                         return DP_TRAIN_PRE_EMPHASIS_3_5;
1487                 case DP_TRAIN_VOLTAGE_SWING_1200:
1488                 default:
1489                         return DP_TRAIN_PRE_EMPHASIS_0;
1490                 }
1491         } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1492                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1493                 case DP_TRAIN_VOLTAGE_SWING_400:
1494                         return DP_TRAIN_PRE_EMPHASIS_6;
1495                 case DP_TRAIN_VOLTAGE_SWING_600:
1496                 case DP_TRAIN_VOLTAGE_SWING_800:
1497                         return DP_TRAIN_PRE_EMPHASIS_3_5;
1498                 default:
1499                         return DP_TRAIN_PRE_EMPHASIS_0;
1500                 }
1501         } else {
1502                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1503                 case DP_TRAIN_VOLTAGE_SWING_400:
1504                         return DP_TRAIN_PRE_EMPHASIS_6;
1505                 case DP_TRAIN_VOLTAGE_SWING_600:
1506                         return DP_TRAIN_PRE_EMPHASIS_6;
1507                 case DP_TRAIN_VOLTAGE_SWING_800:
1508                         return DP_TRAIN_PRE_EMPHASIS_3_5;
1509                 case DP_TRAIN_VOLTAGE_SWING_1200:
1510                 default:
1511                         return DP_TRAIN_PRE_EMPHASIS_0;
1512                 }
1513         }
1514 }
1515
1516 static void
1517 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1518 {
1519         uint8_t v = 0;
1520         uint8_t p = 0;
1521         int lane;
1522         uint8_t voltage_max;
1523         uint8_t preemph_max;
1524
1525         for (lane = 0; lane < intel_dp->lane_count; lane++) {
1526                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1527                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1528
1529                 if (this_v > v)
1530                         v = this_v;
1531                 if (this_p > p)
1532                         p = this_p;
1533         }
1534
1535         voltage_max = intel_dp_voltage_max(intel_dp);
1536         if (v >= voltage_max)
1537                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1538
1539         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1540         if (p >= preemph_max)
1541                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1542
1543         for (lane = 0; lane < 4; lane++)
1544                 intel_dp->train_set[lane] = v | p;
1545 }
1546
1547 static uint32_t
1548 intel_dp_signal_levels(uint8_t train_set)
1549 {
1550         uint32_t        signal_levels = 0;
1551
1552         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1553         case DP_TRAIN_VOLTAGE_SWING_400:
1554         default:
1555                 signal_levels |= DP_VOLTAGE_0_4;
1556                 break;
1557         case DP_TRAIN_VOLTAGE_SWING_600:
1558                 signal_levels |= DP_VOLTAGE_0_6;
1559                 break;
1560         case DP_TRAIN_VOLTAGE_SWING_800:
1561                 signal_levels |= DP_VOLTAGE_0_8;
1562                 break;
1563         case DP_TRAIN_VOLTAGE_SWING_1200:
1564                 signal_levels |= DP_VOLTAGE_1_2;
1565                 break;
1566         }
1567         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1568         case DP_TRAIN_PRE_EMPHASIS_0:
1569         default:
1570                 signal_levels |= DP_PRE_EMPHASIS_0;
1571                 break;
1572         case DP_TRAIN_PRE_EMPHASIS_3_5:
1573                 signal_levels |= DP_PRE_EMPHASIS_3_5;
1574                 break;
1575         case DP_TRAIN_PRE_EMPHASIS_6:
1576                 signal_levels |= DP_PRE_EMPHASIS_6;
1577                 break;
1578         case DP_TRAIN_PRE_EMPHASIS_9_5:
1579                 signal_levels |= DP_PRE_EMPHASIS_9_5;
1580                 break;
1581         }
1582         return signal_levels;
1583 }
1584
1585 /* Gen6's DP voltage swing and pre-emphasis control */
1586 static uint32_t
1587 intel_gen6_edp_signal_levels(uint8_t train_set)
1588 {
1589         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1590                                          DP_TRAIN_PRE_EMPHASIS_MASK);
1591         switch (signal_levels) {
1592         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1593         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1594                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1595         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1596                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1597         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1598         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1599                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1600         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1601         case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1602                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1603         case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1604         case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1605                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1606         default:
1607                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1608                               "0x%x\n", signal_levels);
1609                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1610         }
1611 }
1612
1613 /* Gen7's DP voltage swing and pre-emphasis control */
1614 static uint32_t
1615 intel_gen7_edp_signal_levels(uint8_t train_set)
1616 {
1617         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1618                                          DP_TRAIN_PRE_EMPHASIS_MASK);
1619         switch (signal_levels) {
1620         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1621                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1622         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1623                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1624         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1625                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1626
1627         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1628                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1629         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1630                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1631
1632         case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1633                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1634         case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1635                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1636
1637         default:
1638                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1639                               "0x%x\n", signal_levels);
1640                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1641         }
1642 }
1643
1644 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1645 static uint32_t
1646 intel_dp_signal_levels_hsw(uint8_t train_set)
1647 {
1648         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1649                                          DP_TRAIN_PRE_EMPHASIS_MASK);
1650         switch (signal_levels) {
1651         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1652                 return DDI_BUF_EMP_400MV_0DB_HSW;
1653         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1654                 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1655         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1656                 return DDI_BUF_EMP_400MV_6DB_HSW;
1657         case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1658                 return DDI_BUF_EMP_400MV_9_5DB_HSW;
1659
1660         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1661                 return DDI_BUF_EMP_600MV_0DB_HSW;
1662         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1663                 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1664         case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1665                 return DDI_BUF_EMP_600MV_6DB_HSW;
1666
1667         case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1668                 return DDI_BUF_EMP_800MV_0DB_HSW;
1669         case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1670                 return DDI_BUF_EMP_800MV_3_5DB_HSW;
1671         default:
1672                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1673                               "0x%x\n", signal_levels);
1674                 return DDI_BUF_EMP_400MV_0DB_HSW;
1675         }
1676 }
1677
1678 static bool
1679 intel_dp_set_link_train(struct intel_dp *intel_dp,
1680                         uint32_t dp_reg_value,
1681                         uint8_t dp_train_pat)
1682 {
1683         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1684         struct drm_device *dev = intel_dig_port->base.base.dev;
1685         struct drm_i915_private *dev_priv = dev->dev_private;
1686         enum port port = intel_dig_port->port;
1687         int ret;
1688         uint32_t temp;
1689
1690         if (IS_HASWELL(dev)) {
1691                 temp = I915_READ(DP_TP_CTL(port));
1692
1693                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1694                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1695                 else
1696                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1697
1698                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1699                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1700                 case DP_TRAINING_PATTERN_DISABLE:
1701
1702                         if (port != PORT_A) {
1703                                 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1704                                 I915_WRITE(DP_TP_CTL(port), temp);
1705
1706                                 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1707                                               DP_TP_STATUS_IDLE_DONE), 1))
1708                                         DRM_ERROR("Timed out waiting for DP idle patterns\n");
1709
1710                                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1711                         }
1712
1713                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1714
1715                         break;
1716                 case DP_TRAINING_PATTERN_1:
1717                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1718                         break;
1719                 case DP_TRAINING_PATTERN_2:
1720                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1721                         break;
1722                 case DP_TRAINING_PATTERN_3:
1723                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1724                         break;
1725                 }
1726                 I915_WRITE(DP_TP_CTL(port), temp);
1727
1728         } else if (HAS_PCH_CPT(dev) &&
1729                    (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1730                 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1731
1732                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1733                 case DP_TRAINING_PATTERN_DISABLE:
1734                         dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1735                         break;
1736                 case DP_TRAINING_PATTERN_1:
1737                         dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1738                         break;
1739                 case DP_TRAINING_PATTERN_2:
1740                         dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1741                         break;
1742                 case DP_TRAINING_PATTERN_3:
1743                         DRM_ERROR("DP training pattern 3 not supported\n");
1744                         dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1745                         break;
1746                 }
1747
1748         } else {
1749                 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1750
1751                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1752                 case DP_TRAINING_PATTERN_DISABLE:
1753                         dp_reg_value |= DP_LINK_TRAIN_OFF;
1754                         break;
1755                 case DP_TRAINING_PATTERN_1:
1756                         dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1757                         break;
1758                 case DP_TRAINING_PATTERN_2:
1759                         dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1760                         break;
1761                 case DP_TRAINING_PATTERN_3:
1762                         DRM_ERROR("DP training pattern 3 not supported\n");
1763                         dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1764                         break;
1765                 }
1766         }
1767
1768         I915_WRITE(intel_dp->output_reg, dp_reg_value);
1769         POSTING_READ(intel_dp->output_reg);
1770
1771         intel_dp_aux_native_write_1(intel_dp,
1772                                     DP_TRAINING_PATTERN_SET,
1773                                     dp_train_pat);
1774
1775         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1776             DP_TRAINING_PATTERN_DISABLE) {
1777                 ret = intel_dp_aux_native_write(intel_dp,
1778                                                 DP_TRAINING_LANE0_SET,
1779                                                 intel_dp->train_set,
1780                                                 intel_dp->lane_count);
1781                 if (ret != intel_dp->lane_count)
1782                         return false;
1783         }
1784
1785         return true;
1786 }
1787
1788 /* Enable corresponding port and start training pattern 1 */
1789 void
1790 intel_dp_start_link_train(struct intel_dp *intel_dp)
1791 {
1792         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
1793         struct drm_device *dev = encoder->dev;
1794         int i;
1795         uint8_t voltage;
1796         bool clock_recovery = false;
1797         int voltage_tries, loop_tries;
1798         uint32_t DP = intel_dp->DP;
1799
1800         if (IS_HASWELL(dev))
1801                 intel_ddi_prepare_link_retrain(encoder);
1802
1803         /* Write the link configuration data */
1804         intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1805                                   intel_dp->link_configuration,
1806                                   DP_LINK_CONFIGURATION_SIZE);
1807
1808         DP |= DP_PORT_EN;
1809
1810         memset(intel_dp->train_set, 0, 4);
1811         voltage = 0xff;
1812         voltage_tries = 0;
1813         loop_tries = 0;
1814         clock_recovery = false;
1815         for (;;) {
1816                 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1817                 uint8_t     link_status[DP_LINK_STATUS_SIZE];
1818                 uint32_t    signal_levels;
1819
1820                 if (IS_HASWELL(dev)) {
1821                         signal_levels = intel_dp_signal_levels_hsw(
1822                                                         intel_dp->train_set[0]);
1823                         DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1824                 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1825                         signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1826                         DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1827                 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1828                         signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1829                         DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1830                 } else {
1831                         signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1832                         DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1833                 }
1834                 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1835                               signal_levels);
1836
1837                 /* Set training pattern 1 */
1838                 if (!intel_dp_set_link_train(intel_dp, DP,
1839                                              DP_TRAINING_PATTERN_1 |
1840                                              DP_LINK_SCRAMBLING_DISABLE))
1841                         break;
1842
1843                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
1844                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1845                         DRM_ERROR("failed to get link status\n");
1846                         break;
1847                 }
1848
1849                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1850                         DRM_DEBUG_KMS("clock recovery OK\n");
1851                         clock_recovery = true;
1852                         break;
1853                 }
1854
1855                 /* Check to see if we've tried the max voltage */
1856                 for (i = 0; i < intel_dp->lane_count; i++)
1857                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1858                                 break;
1859                 if (i == intel_dp->lane_count) {
1860                         ++loop_tries;
1861                         if (loop_tries == 5) {
1862                                 DRM_DEBUG_KMS("too many full retries, give up\n");
1863                                 break;
1864                         }
1865                         memset(intel_dp->train_set, 0, 4);
1866                         voltage_tries = 0;
1867                         continue;
1868                 }
1869
1870                 /* Check to see if we've tried the same voltage 5 times */
1871                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1872                         ++voltage_tries;
1873                         if (voltage_tries == 5) {
1874                                 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1875                                 break;
1876                         }
1877                 } else
1878                         voltage_tries = 0;
1879                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1880
1881                 /* Compute new intel_dp->train_set as requested by target */
1882                 intel_get_adjust_train(intel_dp, link_status);
1883         }
1884
1885         intel_dp->DP = DP;
1886 }
1887
1888 void
1889 intel_dp_complete_link_train(struct intel_dp *intel_dp)
1890 {
1891         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1892         bool channel_eq = false;
1893         int tries, cr_tries;
1894         uint32_t DP = intel_dp->DP;
1895
1896         /* channel equalization */
1897         tries = 0;
1898         cr_tries = 0;
1899         channel_eq = false;
1900         for (;;) {
1901                 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1902                 uint32_t    signal_levels;
1903                 uint8_t     link_status[DP_LINK_STATUS_SIZE];
1904
1905                 if (cr_tries > 5) {
1906                         DRM_ERROR("failed to train DP, aborting\n");
1907                         intel_dp_link_down(intel_dp);
1908                         break;
1909                 }
1910
1911                 if (IS_HASWELL(dev)) {
1912                         signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1913                         DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1914                 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1915                         signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1916                         DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1917                 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1918                         signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1919                         DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1920                 } else {
1921                         signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1922                         DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1923                 }
1924
1925                 /* channel eq pattern */
1926                 if (!intel_dp_set_link_train(intel_dp, DP,
1927                                              DP_TRAINING_PATTERN_2 |
1928                                              DP_LINK_SCRAMBLING_DISABLE))
1929                         break;
1930
1931                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
1932                 if (!intel_dp_get_link_status(intel_dp, link_status))
1933                         break;
1934
1935                 /* Make sure clock is still ok */
1936                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1937                         intel_dp_start_link_train(intel_dp);
1938                         cr_tries++;
1939                         continue;
1940                 }
1941
1942                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
1943                         channel_eq = true;
1944                         break;
1945                 }
1946
1947                 /* Try 5 times, then try clock recovery if that fails */
1948                 if (tries > 5) {
1949                         intel_dp_link_down(intel_dp);
1950                         intel_dp_start_link_train(intel_dp);
1951                         tries = 0;
1952                         cr_tries++;
1953                         continue;
1954                 }
1955
1956                 /* Compute new intel_dp->train_set as requested by target */
1957                 intel_get_adjust_train(intel_dp, link_status);
1958                 ++tries;
1959         }
1960
1961         if (channel_eq)
1962                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1963
1964         intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
1965 }
1966
1967 static void
1968 intel_dp_link_down(struct intel_dp *intel_dp)
1969 {
1970         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1971         struct drm_device *dev = intel_dig_port->base.base.dev;
1972         struct drm_i915_private *dev_priv = dev->dev_private;
1973         uint32_t DP = intel_dp->DP;
1974
1975         /*
1976          * DDI code has a strict mode set sequence and we should try to respect
1977          * it, otherwise we might hang the machine in many different ways. So we
1978          * really should be disabling the port only on a complete crtc_disable
1979          * sequence. This function is just called under two conditions on DDI
1980          * code:
1981          * - Link train failed while doing crtc_enable, and on this case we
1982          *   really should respect the mode set sequence and wait for a
1983          *   crtc_disable.
1984          * - Someone turned the monitor off and intel_dp_check_link_status
1985          *   called us. We don't need to disable the whole port on this case, so
1986          *   when someone turns the monitor on again,
1987          *   intel_ddi_prepare_link_retrain will take care of redoing the link
1988          *   train.
1989          */
1990         if (IS_HASWELL(dev))
1991                 return;
1992
1993         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1994                 return;
1995
1996         DRM_DEBUG_KMS("\n");
1997
1998         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1999                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
2000                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2001         } else {
2002                 DP &= ~DP_LINK_TRAIN_MASK;
2003                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2004         }
2005         POSTING_READ(intel_dp->output_reg);
2006
2007         DRM_MSLEEP(17);
2008
2009         if (HAS_PCH_IBX(dev) &&
2010             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2011                 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2012
2013                 /* Hardware workaround: leaving our transcoder select
2014                  * set to transcoder B while it's off will prevent the
2015                  * corresponding HDMI output on transcoder A.
2016                  *
2017                  * Combine this with another hardware workaround:
2018                  * transcoder select bit can only be cleared while the
2019                  * port is enabled.
2020                  */
2021                 DP &= ~DP_PIPEB_SELECT;
2022                 I915_WRITE(intel_dp->output_reg, DP);
2023
2024                 /* Changes to enable or select take place the vblank
2025                  * after being written.
2026                  */
2027                 if (crtc == NULL) {
2028                         /* We can arrive here never having been attached
2029                          * to a CRTC, for instance, due to inheriting
2030                          * random state from the BIOS.
2031                          *
2032                          * If the pipe is not running, play safe and
2033                          * wait for the clocks to stabilise before
2034                          * continuing.
2035                          */
2036                         POSTING_READ(intel_dp->output_reg);
2037                         DRM_MSLEEP(50);
2038                 } else
2039                         intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
2040         }
2041
2042         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2043         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2044         POSTING_READ(intel_dp->output_reg);
2045         DRM_MSLEEP(intel_dp->panel_power_down_delay);
2046 }
2047
2048 static bool
2049 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2050 {
2051         if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2052                                            sizeof(intel_dp->dpcd)) == 0)
2053                 return false; /* aux transfer failed */
2054
2055         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2056                 return false; /* DPCD not present */
2057
2058         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2059               DP_DWN_STRM_PORT_PRESENT))
2060                 return true; /* native DP sink */
2061
2062         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2063                 return true; /* no per-port downstream info */
2064
2065         if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2066                                            intel_dp->downstream_ports,
2067                                            DP_MAX_DOWNSTREAM_PORTS) == 0)
2068                 return false; /* downstream port status fetch failed */
2069
2070         return true;
2071 }
2072
2073 static void
2074 intel_dp_probe_oui(struct intel_dp *intel_dp)
2075 {
2076         u8 buf[3];
2077
2078         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2079                 return;
2080
2081         ironlake_edp_panel_vdd_on(intel_dp);
2082
2083         if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2084                 DRM_DEBUG_KMS("Sink OUI: %02x%02x%02x\n",
2085                               buf[0], buf[1], buf[2]);
2086
2087         if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2088                 DRM_DEBUG_KMS("Branch OUI: %02x%02x%02x\n",
2089                               buf[0], buf[1], buf[2]);
2090
2091         ironlake_edp_panel_vdd_off(intel_dp, false);
2092 }
2093
2094 static bool
2095 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2096 {
2097         int ret;
2098
2099         ret = intel_dp_aux_native_read_retry(intel_dp,
2100                                              DP_DEVICE_SERVICE_IRQ_VECTOR,
2101                                              sink_irq_vector, 1);
2102         if (!ret)
2103                 return false;
2104
2105         return true;
2106 }
2107
2108 static void
2109 intel_dp_handle_test_request(struct intel_dp *intel_dp)
2110 {
2111         /* NAK by default */
2112         intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2113 }
2114
2115 /*
2116  * According to DP spec
2117  * 5.1.2:
2118  *  1. Read DPCD
2119  *  2. Configure link according to Receiver Capabilities
2120  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2121  *  4. Check link status on receipt of hot-plug interrupt
2122  */
2123
2124 void
2125 intel_dp_check_link_status(struct intel_dp *intel_dp)
2126 {
2127         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2128         u8 sink_irq_vector;
2129         u8 link_status[DP_LINK_STATUS_SIZE];
2130
2131         if (!intel_encoder->connectors_active)
2132                 return;
2133
2134         if (WARN_ON(!intel_encoder->base.crtc))
2135                 return;
2136
2137         /* Try to read receiver status if the link appears to be up */
2138         if (!intel_dp_get_link_status(intel_dp, link_status)) {
2139                 intel_dp_link_down(intel_dp);
2140                 return;
2141         }
2142
2143         /* Now read the DPCD to see if it's actually running */
2144         if (!intel_dp_get_dpcd(intel_dp)) {
2145                 intel_dp_link_down(intel_dp);
2146                 return;
2147         }
2148
2149         /* Try to read the source of the interrupt */
2150         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2151             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2152                 /* Clear interrupt source */
2153                 intel_dp_aux_native_write_1(intel_dp,
2154                                             DP_DEVICE_SERVICE_IRQ_VECTOR,
2155                                             sink_irq_vector);
2156
2157                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2158                         intel_dp_handle_test_request(intel_dp);
2159                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2160                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2161         }
2162
2163         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2164                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2165                               drm_get_encoder_name(&intel_encoder->base));
2166                 intel_dp_start_link_train(intel_dp);
2167                 intel_dp_complete_link_train(intel_dp);
2168         }
2169 }
2170
2171 /* XXX this is probably wrong for multiple downstream ports */
2172 static enum drm_connector_status
2173 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2174 {
2175         uint8_t *dpcd = intel_dp->dpcd;
2176         bool hpd;
2177         uint8_t type;
2178
2179         if (!intel_dp_get_dpcd(intel_dp))
2180                 return connector_status_disconnected;
2181
2182         /* if there's no downstream port, we're done */
2183         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2184                 return connector_status_connected;
2185
2186         /* If we're HPD-aware, SINK_COUNT changes dynamically */
2187         hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2188         if (hpd) {
2189                 uint8_t reg;
2190                 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2191                                                     &reg, 1))
2192                         return connector_status_unknown;
2193                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2194                                               : connector_status_disconnected;
2195         }
2196
2197         /* If no HPD, poke DDC gently */
2198         if (drm_probe_ddc(intel_dp->adapter))
2199                 return connector_status_connected;
2200
2201         /* Well we tried, say unknown for unreliable port types */
2202         type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2203         if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2204                 return connector_status_unknown;
2205
2206         /* Anything else is out of spec, warn and ignore */
2207         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2208         return connector_status_disconnected;
2209 }
2210
2211 static enum drm_connector_status
2212 ironlake_dp_detect(struct intel_dp *intel_dp)
2213 {
2214         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2215         enum drm_connector_status status;
2216
2217         /* Can't disconnect eDP, but you can close the lid... */
2218         if (is_edp(intel_dp)) {
2219                 status = intel_panel_detect(dev);
2220                 if (status == connector_status_unknown)
2221                         status = connector_status_connected;
2222                 return status;
2223         }
2224
2225         return intel_dp_detect_dpcd(intel_dp);
2226 }
2227
2228 static enum drm_connector_status
2229 g4x_dp_detect(struct intel_dp *intel_dp)
2230 {
2231         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2232         struct drm_i915_private *dev_priv = dev->dev_private;
2233         uint32_t bit;
2234
2235         switch (intel_dp->output_reg) {
2236         case DP_B:
2237                 bit = DPB_HOTPLUG_LIVE_STATUS;
2238                 break;
2239         case DP_C:
2240                 bit = DPC_HOTPLUG_LIVE_STATUS;
2241                 break;
2242         case DP_D:
2243                 bit = DPD_HOTPLUG_LIVE_STATUS;
2244                 break;
2245         default:
2246                 return connector_status_unknown;
2247         }
2248
2249         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2250                 return connector_status_disconnected;
2251
2252         return intel_dp_detect_dpcd(intel_dp);
2253 }
2254
2255 static struct edid *
2256 intel_dp_get_edid(struct drm_connector *connector, device_t adapter)
2257 {
2258         struct intel_connector *intel_connector = to_intel_connector(connector);
2259
2260         /* use cached edid if we have one */
2261         if (intel_connector->edid) {
2262                 struct edid *edid;
2263                 int size;
2264
2265                 /* invalid edid */
2266                 if (intel_connector->edid_err)
2267                         return NULL;
2268
2269                 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2270                 edid = malloc(size, DRM_MEM_KMS, M_WAITOK);
2271                 if (!edid)
2272                         return NULL;
2273
2274                 memcpy(edid, intel_connector->edid, size);
2275                 return edid;
2276         }
2277
2278         return drm_get_edid(connector, adapter);
2279 }
2280
2281 static int
2282 intel_dp_get_edid_modes(struct drm_connector *connector, device_t adapter)
2283 {
2284         struct intel_connector *intel_connector = to_intel_connector(connector);
2285
2286         /* use cached edid if we have one */
2287         if (intel_connector->edid) {
2288                 /* invalid edid */
2289                 if (intel_connector->edid_err)
2290                         return 0;
2291
2292                 return intel_connector_update_modes(connector,
2293                                                     intel_connector->edid);
2294         }
2295
2296         return intel_ddc_get_modes(connector, adapter);
2297 }
2298
2299
2300 /**
2301  * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2302  *
2303  * \return true if DP port is connected.
2304  * \return false if DP port is disconnected.
2305  */
2306 static enum drm_connector_status
2307 intel_dp_detect(struct drm_connector *connector, bool force)
2308 {
2309         struct intel_dp *intel_dp = intel_attached_dp(connector);
2310         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2311         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2312         struct drm_device *dev = connector->dev;
2313         enum drm_connector_status status;
2314         struct edid *edid = NULL;
2315         char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2316
2317         intel_dp->has_audio = false;
2318
2319         if (HAS_PCH_SPLIT(dev))
2320                 status = ironlake_dp_detect(intel_dp);
2321         else
2322                 status = g4x_dp_detect(intel_dp);
2323
2324         hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2325                            32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2326         DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2327
2328         if (status != connector_status_connected)
2329                 return status;
2330
2331         intel_dp_probe_oui(intel_dp);
2332
2333         if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2334                 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2335         } else {
2336                 edid = intel_dp_get_edid(connector, intel_dp->adapter);
2337                 if (edid) {
2338                         intel_dp->has_audio = drm_detect_monitor_audio(edid);
2339                         free(edid, DRM_MEM_KMS);
2340                 }
2341         }
2342
2343         if (intel_encoder->type != INTEL_OUTPUT_EDP)
2344                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2345         return connector_status_connected;
2346 }
2347
2348 static int intel_dp_get_modes(struct drm_connector *connector)
2349 {
2350         struct intel_dp *intel_dp = intel_attached_dp(connector);
2351         struct intel_connector *intel_connector = to_intel_connector(connector);
2352         struct drm_device *dev = connector->dev;
2353         int ret;
2354
2355         /* We should parse the EDID data and find out if it has an audio sink
2356          */
2357
2358         ret = intel_dp_get_edid_modes(connector, intel_dp->adapter);
2359         if (ret)
2360                 return ret;
2361
2362         /* if eDP has no EDID, fall back to fixed mode */
2363         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2364                 struct drm_display_mode *mode;
2365                 mode = drm_mode_duplicate(dev,
2366                                           intel_connector->panel.fixed_mode);
2367                 if (mode) {
2368                         drm_mode_probed_add(connector, mode);
2369                         return 1;
2370                 }
2371         }
2372         return 0;
2373 }
2374
2375 static bool
2376 intel_dp_detect_audio(struct drm_connector *connector)
2377 {
2378         struct intel_dp *intel_dp = intel_attached_dp(connector);
2379         struct edid *edid;
2380         bool has_audio = false;
2381
2382         edid = intel_dp_get_edid(connector, intel_dp->adapter);
2383         if (edid) {
2384                 has_audio = drm_detect_monitor_audio(edid);
2385                 free(edid, DRM_MEM_KMS);
2386         }
2387
2388         return has_audio;
2389 }
2390
2391 static int
2392 intel_dp_set_property(struct drm_connector *connector,
2393                       struct drm_property *property,
2394                       uint64_t val)
2395 {
2396         struct drm_i915_private *dev_priv = connector->dev->dev_private;
2397         struct intel_connector *intel_connector = to_intel_connector(connector);
2398         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2399         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2400         int ret;
2401
2402         ret = drm_object_property_set_value(&connector->base, property, val);
2403         if (ret)
2404                 return ret;
2405
2406         if (property == dev_priv->force_audio_property) {
2407                 int i = val;
2408                 bool has_audio;
2409
2410                 if (i == intel_dp->force_audio)
2411                         return 0;
2412
2413                 intel_dp->force_audio = i;
2414
2415                 if (i == HDMI_AUDIO_AUTO)
2416                         has_audio = intel_dp_detect_audio(connector);
2417                 else
2418                         has_audio = (i == HDMI_AUDIO_ON);
2419
2420                 if (has_audio == intel_dp->has_audio)
2421                         return 0;
2422
2423                 intel_dp->has_audio = has_audio;
2424                 goto done;
2425         }
2426
2427         if (property == dev_priv->broadcast_rgb_property) {
2428                 if (val == !!intel_dp->color_range)
2429                         return 0;
2430
2431                 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
2432                 goto done;
2433         }
2434
2435         if (is_edp(intel_dp) &&
2436             property == connector->dev->mode_config.scaling_mode_property) {
2437                 if (val == DRM_MODE_SCALE_NONE) {
2438                         DRM_DEBUG_KMS("no scaling not supported\n");
2439                         return -EINVAL;
2440                 }
2441
2442                 if (intel_connector->panel.fitting_mode == val) {
2443                         /* the eDP scaling property is not changed */
2444                         return 0;
2445                 }
2446                 intel_connector->panel.fitting_mode = val;
2447
2448                 goto done;
2449         }
2450
2451         return -EINVAL;
2452
2453 done:
2454         if (intel_encoder->base.crtc) {
2455                 struct drm_crtc *crtc = intel_encoder->base.crtc;
2456                 intel_set_mode(crtc, &crtc->mode,
2457                                crtc->x, crtc->y, crtc->fb);
2458         }
2459
2460         return 0;
2461 }
2462
2463 static void
2464 intel_dp_destroy(struct drm_connector *connector)
2465 {
2466         struct intel_dp *intel_dp = intel_attached_dp(connector);
2467         struct intel_connector *intel_connector = to_intel_connector(connector);
2468
2469         free(intel_connector->edid, DRM_MEM_KMS);
2470
2471         if (is_edp(intel_dp))
2472                 intel_panel_fini(&intel_connector->panel);
2473
2474         drm_connector_cleanup(connector);
2475         free(connector, DRM_MEM_KMS);
2476 }
2477
2478 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2479 {
2480         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2481         struct intel_dp *intel_dp = &intel_dig_port->dp;
2482         struct drm_device *dev = intel_dig_port->base.base.dev;
2483
2484         if (intel_dp->dp_iic_bus != NULL) {
2485                 if (intel_dp->adapter != NULL) {
2486                         device_delete_child(intel_dp->dp_iic_bus,
2487                             intel_dp->adapter);
2488                 }
2489                 device_delete_child(dev->dev, intel_dp->dp_iic_bus);
2490         }
2491         drm_encoder_cleanup(encoder);
2492         if (is_edp(intel_dp)) {
2493                 struct drm_i915_private *dev_priv = dev->dev_private;
2494
2495                 taskqueue_cancel_timeout(dev_priv->wq,
2496                     &intel_dp->panel_vdd_work, NULL);
2497                 taskqueue_drain_timeout(dev_priv->wq,
2498                     &intel_dp->panel_vdd_work);
2499                 ironlake_panel_vdd_off_sync(intel_dp);
2500         }
2501         free(intel_dig_port, DRM_MEM_KMS);
2502 }
2503
2504 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2505         .mode_fixup = intel_dp_mode_fixup,
2506         .mode_set = intel_dp_mode_set,
2507         .disable = intel_encoder_noop,
2508 };
2509
2510 static const struct drm_connector_funcs intel_dp_connector_funcs = {
2511         .dpms = intel_connector_dpms,
2512         .detect = intel_dp_detect,
2513         .fill_modes = drm_helper_probe_single_connector_modes,
2514         .set_property = intel_dp_set_property,
2515         .destroy = intel_dp_destroy,
2516 };
2517
2518 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2519         .get_modes = intel_dp_get_modes,
2520         .mode_valid = intel_dp_mode_valid,
2521         .best_encoder = intel_best_encoder,
2522 };
2523
2524 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2525         .destroy = intel_dp_encoder_destroy,
2526 };
2527
2528 static void
2529 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2530 {
2531         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2532
2533         intel_dp_check_link_status(intel_dp);
2534 }
2535
2536 /* Return which DP Port should be selected for Transcoder DP control */
2537 int
2538 intel_trans_dp_port_sel(struct drm_crtc *crtc)
2539 {
2540         struct drm_device *dev = crtc->dev;
2541         struct intel_encoder *intel_encoder;
2542         struct intel_dp *intel_dp;
2543
2544         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2545                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
2546
2547                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2548                     intel_encoder->type == INTEL_OUTPUT_EDP)
2549                         return intel_dp->output_reg;
2550         }
2551
2552         return -1;
2553 }
2554
2555 /* check the VBT to see whether the eDP is on DP-D port */
2556 bool intel_dpd_is_edp(struct drm_device *dev)
2557 {
2558         struct drm_i915_private *dev_priv = dev->dev_private;
2559         struct child_device_config *p_child;
2560         int i;
2561
2562         if (!dev_priv->child_dev_num)
2563                 return false;
2564
2565         for (i = 0; i < dev_priv->child_dev_num; i++) {
2566                 p_child = dev_priv->child_dev + i;
2567
2568                 if (p_child->dvo_port == PORT_IDPD &&
2569                     p_child->device_type == DEVICE_TYPE_eDP)
2570                         return true;
2571         }
2572         return false;
2573 }
2574
2575 static void
2576 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2577 {
2578         struct intel_connector *intel_connector = to_intel_connector(connector);
2579
2580         intel_attach_force_audio_property(connector);
2581         intel_attach_broadcast_rgb_property(connector);
2582
2583         if (is_edp(intel_dp)) {
2584                 drm_mode_create_scaling_mode_property(connector->dev);
2585                 drm_object_attach_property(
2586                         &connector->base,
2587                         connector->dev->mode_config.scaling_mode_property,
2588                         DRM_MODE_SCALE_ASPECT);
2589                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2590         }
2591 }
2592
2593 static void
2594 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2595                                     struct intel_dp *intel_dp,
2596                                     struct edp_power_seq *out)
2597 {
2598         struct drm_i915_private *dev_priv = dev->dev_private;
2599         struct edp_power_seq cur, vbt, spec, final;
2600         u32 pp_on, pp_off, pp_div, pp;
2601
2602         /* Workaround: Need to write PP_CONTROL with the unlock key as
2603          * the very first thing. */
2604         pp = ironlake_get_pp_control(dev_priv);
2605         I915_WRITE(PCH_PP_CONTROL, pp);
2606
2607         pp_on = I915_READ(PCH_PP_ON_DELAYS);
2608         pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2609         pp_div = I915_READ(PCH_PP_DIVISOR);
2610
2611         /* Pull timing values out of registers */
2612         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2613                 PANEL_POWER_UP_DELAY_SHIFT;
2614
2615         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2616                 PANEL_LIGHT_ON_DELAY_SHIFT;
2617
2618         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2619                 PANEL_LIGHT_OFF_DELAY_SHIFT;
2620
2621         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2622                 PANEL_POWER_DOWN_DELAY_SHIFT;
2623
2624         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2625                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2626
2627         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2628                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2629
2630         vbt = dev_priv->edp.pps;
2631
2632         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2633          * our hw here, which are all in 100usec. */
2634         spec.t1_t3 = 210 * 10;
2635         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2636         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2637         spec.t10 = 500 * 10;
2638         /* This one is special and actually in units of 100ms, but zero
2639          * based in the hw (so we need to add 100 ms). But the sw vbt
2640          * table multiplies it with 1000 to make it in units of 100usec,
2641          * too. */
2642         spec.t11_t12 = (510 + 100) * 10;
2643
2644         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2645                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2646
2647         /* Use the max of the register settings and vbt. If both are
2648          * unset, fall back to the spec limits. */
2649 #define assign_final(field)     final.field = (max(cur.field, vbt.field) == 0 ? \
2650                                        spec.field : \
2651                                        max(cur.field, vbt.field))
2652         assign_final(t1_t3);
2653         assign_final(t8);
2654         assign_final(t9);
2655         assign_final(t10);
2656         assign_final(t11_t12);
2657 #undef assign_final
2658
2659 #define get_delay(field)        (DIV_ROUND_UP(final.field, 10))
2660         intel_dp->panel_power_up_delay = get_delay(t1_t3);
2661         intel_dp->backlight_on_delay = get_delay(t8);
2662         intel_dp->backlight_off_delay = get_delay(t9);
2663         intel_dp->panel_power_down_delay = get_delay(t10);
2664         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2665 #undef get_delay
2666
2667         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2668                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2669                       intel_dp->panel_power_cycle_delay);
2670
2671         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2672                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2673
2674         if (out)
2675                 *out = final;
2676 }
2677
2678 static void
2679 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2680                                               struct intel_dp *intel_dp,
2681                                               struct edp_power_seq *seq)
2682 {
2683         struct drm_i915_private *dev_priv = dev->dev_private;
2684         u32 pp_on, pp_off, pp_div;
2685
2686         /* And finally store the new values in the power sequencer. */
2687         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2688                 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2689         pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2690                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2691         /* Compute the divisor for the pp clock, simply match the Bspec
2692          * formula. */
2693         pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2694                         << PP_REFERENCE_DIVIDER_SHIFT;
2695         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2696                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
2697
2698         /* Haswell doesn't have any port selection bits for the panel
2699          * power sequencer any more. */
2700         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2701                 if (is_cpu_edp(intel_dp))
2702                         pp_on |= PANEL_POWER_PORT_DP_A;
2703                 else
2704                         pp_on |= PANEL_POWER_PORT_DP_D;
2705         }
2706
2707         I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
2708         I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2709         I915_WRITE(PCH_PP_DIVISOR, pp_div);
2710
2711         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2712                       I915_READ(PCH_PP_ON_DELAYS),
2713                       I915_READ(PCH_PP_OFF_DELAYS),
2714                       I915_READ(PCH_PP_DIVISOR));
2715 }
2716
2717 void
2718 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2719                         struct intel_connector *intel_connector)
2720 {
2721         struct drm_connector *connector = &intel_connector->base;
2722         struct intel_dp *intel_dp = &intel_dig_port->dp;
2723         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2724         struct drm_device *dev = intel_encoder->base.dev;
2725         struct drm_i915_private *dev_priv = dev->dev_private;
2726         struct drm_display_mode *fixed_mode = NULL;
2727         struct edp_power_seq power_seq = { 0 };
2728         enum port port = intel_dig_port->port;
2729         const char *name = NULL;
2730         int type;
2731
2732         /* Preserve the current hw state. */
2733         intel_dp->DP = I915_READ(intel_dp->output_reg);
2734         intel_dp->attached_connector = intel_connector;
2735
2736         if (HAS_PCH_SPLIT(dev) && port == PORT_D)
2737                 if (intel_dpd_is_edp(dev))
2738                         intel_dp->is_pch_edp = true;
2739
2740         /*
2741          * FIXME : We need to initialize built-in panels before external panels.
2742          * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2743          */
2744         if (IS_VALLEYVIEW(dev) && port == PORT_C) {
2745                 type = DRM_MODE_CONNECTOR_eDP;
2746                 intel_encoder->type = INTEL_OUTPUT_EDP;
2747         } else if (port == PORT_A || is_pch_edp(intel_dp)) {
2748                 type = DRM_MODE_CONNECTOR_eDP;
2749                 intel_encoder->type = INTEL_OUTPUT_EDP;
2750         } else {
2751                 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
2752                  * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
2753                  * rewrite it.
2754                  */
2755                 type = DRM_MODE_CONNECTOR_DisplayPort;
2756         }
2757
2758         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2759         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2760
2761         connector->polled = DRM_CONNECTOR_POLL_HPD;
2762         connector->interlace_allowed = true;
2763         connector->doublescan_allowed = 0;
2764
2765         TIMEOUT_TASK_INIT(dev_priv->wq, &intel_dp->panel_vdd_work, 0,
2766             ironlake_panel_vdd_work, intel_dp);
2767
2768         intel_connector_attach_encoder(intel_connector, intel_encoder);
2769
2770         if (IS_HASWELL(dev))
2771                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2772         else
2773                 intel_connector->get_hw_state = intel_connector_get_hw_state;
2774
2775
2776         /* Set up the DDC bus. */
2777         switch (port) {
2778         case PORT_A:
2779                 name = "DPDDC-A";
2780                 break;
2781         case PORT_B:
2782                 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
2783                 name = "DPDDC-B";
2784                 break;
2785         case PORT_C:
2786                 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
2787                 name = "DPDDC-C";
2788                 break;
2789         case PORT_D:
2790                 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
2791                 name = "DPDDC-D";
2792                 break;
2793         default:
2794                 WARN(1, "Invalid port %c\n", port_name(port));
2795                 break;
2796         }
2797
2798         if (is_edp(intel_dp))
2799                 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2800
2801         intel_dp_i2c_init(intel_dp, intel_connector, name);
2802
2803         /* Cache DPCD and EDID for edp. */
2804         if (is_edp(intel_dp)) {
2805                 bool ret;
2806                 struct drm_display_mode *scan;
2807                 struct edid *edid;
2808                 int edid_err = 0;
2809
2810                 ironlake_edp_panel_vdd_on(intel_dp);
2811                 ret = intel_dp_get_dpcd(intel_dp);
2812                 ironlake_edp_panel_vdd_off(intel_dp, false);
2813
2814                 if (ret) {
2815                         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2816                                 dev_priv->no_aux_handshake =
2817                                         intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2818                                         DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2819                 } else {
2820                         /* if this fails, presume the device is a ghost */
2821                         DRM_INFO("failed to retrieve link info, disabling eDP\n");
2822                         intel_dp_encoder_destroy(&intel_encoder->base);
2823                         intel_dp_destroy(connector);
2824                         return;
2825                 }
2826
2827                 /* We now know it's not a ghost, init power sequence regs. */
2828                 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2829                                                               &power_seq);
2830
2831                 ironlake_edp_panel_vdd_on(intel_dp);
2832                 edid = drm_get_edid(connector, intel_dp->adapter);
2833                 if (edid) {
2834                         if (drm_add_edid_modes(connector, edid)) {
2835                                 drm_mode_connector_update_edid_property(connector, edid);
2836                                 drm_edid_to_eld(connector, edid);
2837                         } else {
2838                                 free(edid, DRM_MEM_KMS);
2839                                 edid = NULL;
2840                                 edid_err = -EINVAL;
2841                         }
2842                 } else {
2843                         edid = NULL;
2844                         edid_err = -ENOENT;
2845                 }
2846                 intel_connector->edid = edid;
2847                 intel_connector->edid_err = edid_err;
2848
2849                 /* prefer fixed mode from EDID if available */
2850                 list_for_each_entry(scan, &connector->probed_modes, head) {
2851                         if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2852                                 fixed_mode = drm_mode_duplicate(dev, scan);
2853                                 break;
2854                         }
2855                 }
2856
2857                 /* fallback to VBT if available for eDP */
2858                 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2859                         fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2860                         if (fixed_mode)
2861                                 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2862                 }
2863
2864                 ironlake_edp_panel_vdd_off(intel_dp, false);
2865         }
2866
2867         if (is_edp(intel_dp)) {
2868                 intel_panel_init(&intel_connector->panel, fixed_mode);
2869                 intel_panel_setup_backlight(connector);
2870         }
2871
2872         intel_dp_add_properties(intel_dp, connector);
2873
2874         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2875          * 0xd.  Failure to do so will result in spurious interrupts being
2876          * generated on the port when a cable is not attached.
2877          */
2878         if (IS_G4X(dev) && !IS_GM45(dev)) {
2879                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2880                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2881         }
2882 }
2883
2884 void
2885 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2886 {
2887         struct intel_digital_port *intel_dig_port;
2888         struct intel_encoder *intel_encoder;
2889         struct drm_encoder *encoder;
2890         struct intel_connector *intel_connector;
2891
2892         intel_dig_port = malloc(sizeof(struct intel_digital_port), DRM_MEM_KMS, M_WAITOK | M_ZERO);
2893         if (!intel_dig_port)
2894                 return;
2895
2896         intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
2897         if (!intel_connector) {
2898                 free(intel_dig_port, DRM_MEM_KMS);
2899                 return;
2900         }
2901
2902         intel_encoder = &intel_dig_port->base;
2903         encoder = &intel_encoder->base;
2904
2905         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2906                          DRM_MODE_ENCODER_TMDS);
2907         drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2908
2909         intel_encoder->enable = intel_enable_dp;
2910         intel_encoder->pre_enable = intel_pre_enable_dp;
2911         intel_encoder->disable = intel_disable_dp;
2912         intel_encoder->post_disable = intel_post_disable_dp;
2913         intel_encoder->get_hw_state = intel_dp_get_hw_state;
2914
2915         intel_dig_port->port = port;
2916         intel_dig_port->dp.output_reg = output_reg;
2917
2918         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2919         intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2920         intel_encoder->cloneable = false;
2921         intel_encoder->hot_plug = intel_dp_hot_plug;
2922
2923         intel_dp_init_connector(intel_dig_port, intel_connector);
2924 }