1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41 
42 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
43 
44 struct dp_link_dpll {
45 	int link_bw;
46 	struct dpll dpll;
47 };
48 
49 static const struct dp_link_dpll gen4_dpll[] = {
50 	{ DP_LINK_BW_1_62,
51 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 	{ DP_LINK_BW_2_7,
53 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55 
56 static const struct dp_link_dpll pch_dpll[] = {
57 	{ DP_LINK_BW_1_62,
58 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 	{ DP_LINK_BW_2_7,
60 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62 
63 static const struct dp_link_dpll vlv_dpll[] = {
64 	{ DP_LINK_BW_1_62,
65 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66 	{ DP_LINK_BW_2_7,
67 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69 
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75 	/*
76 	 * CHV requires to program fractional division for m2.
77 	 * m2 is stored in fixed point format using formula below
78 	 * (m2_int << 22) | m2_fraction
79 	 */
80 	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
81 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
83 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
85 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89 				  324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 				 243000, 270000, 324000, 405000,
92 				 420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
94 
95 /**
96  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97  * @intel_dp: DP struct
98  *
99  * If a CPU or PCH DP output is attached to an eDP panel, this function
100  * will return true, and false otherwise.
101  */
is_edp(struct intel_dp * intel_dp)102 static bool is_edp(struct intel_dp *intel_dp)
103 {
104 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105 
106 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
107 }
108 
intel_dp_to_dev(struct intel_dp * intel_dp)109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
110 {
111 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 
113 	return intel_dig_port->base.base.dev;
114 }
115 
intel_attached_dp(struct drm_connector * connector)116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117 {
118 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
119 }
120 
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
126 				      enum pipe pipe);
127 
128 static int
intel_dp_max_link_bw(struct intel_dp * intel_dp)129 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
130 {
131 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
132 
133 	switch (max_link_bw) {
134 	case DP_LINK_BW_1_62:
135 	case DP_LINK_BW_2_7:
136 	case DP_LINK_BW_5_4:
137 		break;
138 	default:
139 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 		     max_link_bw);
141 		max_link_bw = DP_LINK_BW_1_62;
142 		break;
143 	}
144 	return max_link_bw;
145 }
146 
intel_dp_max_lane_count(struct intel_dp * intel_dp)147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148 {
149 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 	struct drm_device *dev = intel_dig_port->base.base.dev;
151 	u8 source_max, sink_max;
152 
153 	source_max = 4;
154 	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 		source_max = 2;
157 
158 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159 
160 	return min(source_max, sink_max);
161 }
162 
163 /*
164  * The units on the numbers in the next two are... bizarre.  Examples will
165  * make it clearer; this one parallels an example in the eDP spec.
166  *
167  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168  *
169  *     270000 * 1 * 8 / 10 == 216000
170  *
171  * The actual data capacity of that configuration is 2.16Gbit/s, so the
172  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
173  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174  * 119000.  At 18bpp that's 2142000 kilobits per second.
175  *
176  * Thus the strange-looking division by 10 in intel_dp_link_required, to
177  * get the result in decakilobits instead of kilobits.
178  */
179 
180 static int
intel_dp_link_required(int pixel_clock,int bpp)181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183 	return (pixel_clock * bpp + 9) / 10;
184 }
185 
186 static int
intel_dp_max_data_rate(int max_link_clock,int max_lanes)187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188 {
189 	return (max_link_clock * max_lanes * 8) / 10;
190 }
191 
192 static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)193 intel_dp_mode_valid(struct drm_connector *connector,
194 		    struct drm_display_mode *mode)
195 {
196 	struct intel_dp *intel_dp = intel_attached_dp(connector);
197 	struct intel_connector *intel_connector = to_intel_connector(connector);
198 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199 	int target_clock = mode->clock;
200 	int max_rate, mode_rate, max_lanes, max_link_clock;
201 
202 	if (is_edp(intel_dp) && fixed_mode) {
203 		if (mode->hdisplay > fixed_mode->hdisplay)
204 			return MODE_PANEL;
205 
206 		if (mode->vdisplay > fixed_mode->vdisplay)
207 			return MODE_PANEL;
208 
209 		target_clock = fixed_mode->clock;
210 	}
211 
212 	max_link_clock = intel_dp_max_link_rate(intel_dp);
213 	max_lanes = intel_dp_max_lane_count(intel_dp);
214 
215 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 	mode_rate = intel_dp_link_required(target_clock, 18);
217 
218 	if (mode_rate > max_rate)
219 		return MODE_CLOCK_HIGH;
220 
221 	if (mode->clock < 10000)
222 		return MODE_CLOCK_LOW;
223 
224 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 		return MODE_H_ILLEGAL;
226 
227 	return MODE_OK;
228 }
229 
intel_dp_pack_aux(const uint8_t * src,int src_bytes)230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231 {
232 	int	i;
233 	uint32_t v = 0;
234 
235 	if (src_bytes > 4)
236 		src_bytes = 4;
237 	for (i = 0; i < src_bytes; i++)
238 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 	return v;
240 }
241 
intel_dp_unpack_aux(uint32_t src,uint8_t * dst,int dst_bytes)242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
243 {
244 	int i;
245 	if (dst_bytes > 4)
246 		dst_bytes = 4;
247 	for (i = 0; i < dst_bytes; i++)
248 		dst[i] = src >> ((3-i) * 8);
249 }
250 
251 /* hrawclock is 1/4 the FSB frequency */
252 static int
intel_hrawclk(struct drm_device * dev)253 intel_hrawclk(struct drm_device *dev)
254 {
255 	struct drm_i915_private *dev_priv = dev->dev_private;
256 	uint32_t clkcfg;
257 
258 	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 	if (IS_VALLEYVIEW(dev))
260 		return 200;
261 
262 	clkcfg = I915_READ(CLKCFG);
263 	switch (clkcfg & CLKCFG_FSB_MASK) {
264 	case CLKCFG_FSB_400:
265 		return 100;
266 	case CLKCFG_FSB_533:
267 		return 133;
268 	case CLKCFG_FSB_667:
269 		return 166;
270 	case CLKCFG_FSB_800:
271 		return 200;
272 	case CLKCFG_FSB_1067:
273 		return 266;
274 	case CLKCFG_FSB_1333:
275 		return 333;
276 	/* these two are just a guess; one of them might be right */
277 	case CLKCFG_FSB_1600:
278 	case CLKCFG_FSB_1600_ALT:
279 		return 400;
280 	default:
281 		return 133;
282 	}
283 }
284 
285 static void
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287 				    struct intel_dp *intel_dp);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 					      struct intel_dp *intel_dp);
291 
pps_lock(struct intel_dp * intel_dp)292 static void pps_lock(struct intel_dp *intel_dp)
293 {
294 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 	struct intel_encoder *encoder = &intel_dig_port->base;
296 	struct drm_device *dev = encoder->base.dev;
297 	struct drm_i915_private *dev_priv = dev->dev_private;
298 	enum intel_display_power_domain power_domain;
299 
300 	/*
301 	 * See vlv_power_sequencer_reset() why we need
302 	 * a power domain reference here.
303 	 */
304 	power_domain = intel_display_port_power_domain(encoder);
305 	intel_display_power_get(dev_priv, power_domain);
306 
307 	mutex_lock(&dev_priv->pps_mutex);
308 }
309 
pps_unlock(struct intel_dp * intel_dp)310 static void pps_unlock(struct intel_dp *intel_dp)
311 {
312 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 	struct intel_encoder *encoder = &intel_dig_port->base;
314 	struct drm_device *dev = encoder->base.dev;
315 	struct drm_i915_private *dev_priv = dev->dev_private;
316 	enum intel_display_power_domain power_domain;
317 
318 	mutex_unlock(&dev_priv->pps_mutex);
319 
320 	power_domain = intel_display_port_power_domain(encoder);
321 	intel_display_power_put(dev_priv, power_domain);
322 }
323 
324 static void
vlv_power_sequencer_kick(struct intel_dp * intel_dp)325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326 {
327 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 	struct drm_device *dev = intel_dig_port->base.base.dev;
329 	struct drm_i915_private *dev_priv = dev->dev_private;
330 	enum pipe pipe = intel_dp->pps_pipe;
331 	bool pll_enabled;
332 	uint32_t DP;
333 
334 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 		 pipe_name(pipe), port_name(intel_dig_port->port)))
337 		return;
338 
339 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 		      pipe_name(pipe), port_name(intel_dig_port->port));
341 
342 	/* Preserve the BIOS-computed detected bit. This is
343 	 * supposed to be read-only.
344 	 */
345 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 	DP |= DP_PORT_WIDTH(1);
348 	DP |= DP_LINK_TRAIN_PAT_1;
349 
350 	if (IS_CHERRYVIEW(dev))
351 		DP |= DP_PIPE_SELECT_CHV(pipe);
352 	else if (pipe == PIPE_B)
353 		DP |= DP_PIPEB_SELECT;
354 
355 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356 
357 	/*
358 	 * The DPLL for the pipe must be enabled for this to work.
359 	 * So enable temporarily it if it's not already enabled.
360 	 */
361 	if (!pll_enabled)
362 		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364 
365 	/*
366 	 * Similar magic as in intel_dp_enable_port().
367 	 * We _must_ do this port enable + disable trick
368 	 * to make this power seqeuencer lock onto the port.
369 	 * Otherwise even VDD force bit won't work.
370 	 */
371 	I915_WRITE(intel_dp->output_reg, DP);
372 	POSTING_READ(intel_dp->output_reg);
373 
374 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 	POSTING_READ(intel_dp->output_reg);
376 
377 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 	POSTING_READ(intel_dp->output_reg);
379 
380 	if (!pll_enabled)
381 		vlv_force_pll_off(dev, pipe);
382 }
383 
384 static enum pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386 {
387 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388 	struct drm_device *dev = intel_dig_port->base.base.dev;
389 	struct drm_i915_private *dev_priv = dev->dev_private;
390 	struct intel_encoder *encoder;
391 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
392 	enum pipe pipe;
393 
394 	lockdep_assert_held(&dev_priv->pps_mutex);
395 
396 	/* We should never land here with regular DP ports */
397 	WARN_ON(!is_edp(intel_dp));
398 
399 	if (intel_dp->pps_pipe != INVALID_PIPE)
400 		return intel_dp->pps_pipe;
401 
402 	/*
403 	 * We don't have power sequencer currently.
404 	 * Pick one that's not used by other ports.
405 	 */
406 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 			    base.head) {
408 		struct intel_dp *tmp;
409 
410 		if (encoder->type != INTEL_OUTPUT_EDP)
411 			continue;
412 
413 		tmp = enc_to_intel_dp(&encoder->base);
414 
415 		if (tmp->pps_pipe != INVALID_PIPE)
416 			pipes &= ~(1 << tmp->pps_pipe);
417 	}
418 
419 	/*
420 	 * Didn't find one. This should not happen since there
421 	 * are two power sequencers and up to two eDP ports.
422 	 */
423 	if (WARN_ON(pipes == 0))
424 		pipe = PIPE_A;
425 	else
426 		pipe = ffs(pipes) - 1;
427 
428 	vlv_steal_power_sequencer(dev, pipe);
429 	intel_dp->pps_pipe = pipe;
430 
431 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 		      pipe_name(intel_dp->pps_pipe),
433 		      port_name(intel_dig_port->port));
434 
435 	/* init power sequencer on this pipe and port */
436 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
438 
439 	/*
440 	 * Even vdd force doesn't work until we've made
441 	 * the power sequencer lock in on the port.
442 	 */
443 	vlv_power_sequencer_kick(intel_dp);
444 
445 	return intel_dp->pps_pipe;
446 }
447 
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 			       enum pipe pipe);
450 
vlv_pipe_has_pp_on(struct drm_i915_private * dev_priv,enum pipe pipe)451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 			       enum pipe pipe)
453 {
454 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455 }
456 
vlv_pipe_has_vdd_on(struct drm_i915_private * dev_priv,enum pipe pipe)457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 				enum pipe pipe)
459 {
460 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461 }
462 
vlv_pipe_any(struct drm_i915_private * dev_priv,enum pipe pipe)463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 			 enum pipe pipe)
465 {
466 	return true;
467 }
468 
469 static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private * dev_priv,enum port port,vlv_pipe_check pipe_check)470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 		     enum port port,
472 		     vlv_pipe_check pipe_check)
473 {
474 	enum pipe pipe;
475 
476 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 			PANEL_PORT_SELECT_MASK;
479 
480 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 			continue;
482 
483 		if (!pipe_check(dev_priv, pipe))
484 			continue;
485 
486 		return pipe;
487 	}
488 
489 	return INVALID_PIPE;
490 }
491 
492 static void
vlv_initial_power_sequencer_setup(struct intel_dp * intel_dp)493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494 {
495 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 	struct drm_device *dev = intel_dig_port->base.base.dev;
497 	struct drm_i915_private *dev_priv = dev->dev_private;
498 	enum port port = intel_dig_port->port;
499 
500 	lockdep_assert_held(&dev_priv->pps_mutex);
501 
502 	/* try to find a pipe with this port selected */
503 	/* first pick one where the panel is on */
504 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 						  vlv_pipe_has_pp_on);
506 	/* didn't find one? pick one where vdd is on */
507 	if (intel_dp->pps_pipe == INVALID_PIPE)
508 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 							  vlv_pipe_has_vdd_on);
510 	/* didn't find one? pick one with just the correct port */
511 	if (intel_dp->pps_pipe == INVALID_PIPE)
512 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 							  vlv_pipe_any);
514 
515 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 	if (intel_dp->pps_pipe == INVALID_PIPE) {
517 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 			      port_name(port));
519 		return;
520 	}
521 
522 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 		      port_name(port), pipe_name(intel_dp->pps_pipe));
524 
525 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
527 }
528 
vlv_power_sequencer_reset(struct drm_i915_private * dev_priv)529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530 {
531 	struct drm_device *dev = dev_priv->dev;
532 	struct intel_encoder *encoder;
533 
534 	if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 		return;
536 
537 	/*
538 	 * We can't grab pps_mutex here due to deadlock with power_domain
539 	 * mutex when power_domain functions are called while holding pps_mutex.
540 	 * That also means that in order to use pps_pipe the code needs to
541 	 * hold both a power domain reference and pps_mutex, and the power domain
542 	 * reference get/put must be done while _not_ holding pps_mutex.
543 	 * pps_{lock,unlock}() do these steps in the correct order, so one
544 	 * should use them always.
545 	 */
546 
547 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 		struct intel_dp *intel_dp;
549 
550 		if (encoder->type != INTEL_OUTPUT_EDP)
551 			continue;
552 
553 		intel_dp = enc_to_intel_dp(&encoder->base);
554 		intel_dp->pps_pipe = INVALID_PIPE;
555 	}
556 }
557 
_pp_ctrl_reg(struct intel_dp * intel_dp)558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559 {
560 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
561 
562 	if (HAS_PCH_SPLIT(dev))
563 		return PCH_PP_CONTROL;
564 	else
565 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566 }
567 
_pp_stat_reg(struct intel_dp * intel_dp)568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569 {
570 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
571 
572 	if (HAS_PCH_SPLIT(dev))
573 		return PCH_PP_STATUS;
574 	else
575 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576 }
577 
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579    This function only applicable when panel PM state is not to be tracked */
edp_notify_handler(struct notifier_block * this,unsigned long code,void * unused)580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 			      void *unused)
582 {
583 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 						 edp_notifier);
585 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 	struct drm_i915_private *dev_priv = dev->dev_private;
587 	u32 pp_div;
588 	u32 pp_ctrl_reg, pp_div_reg;
589 
590 	if (!is_edp(intel_dp) || code != SYS_RESTART)
591 		return 0;
592 
593 	pps_lock(intel_dp);
594 
595 	if (IS_VALLEYVIEW(dev)) {
596 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597 
598 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
600 		pp_div = I915_READ(pp_div_reg);
601 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
602 
603 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
604 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 		msleep(intel_dp->panel_power_cycle_delay);
607 	}
608 
609 	pps_unlock(intel_dp);
610 
611 	return 0;
612 }
613 
edp_have_panel_power(struct intel_dp * intel_dp)614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
615 {
616 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
617 	struct drm_i915_private *dev_priv = dev->dev_private;
618 
619 	lockdep_assert_held(&dev_priv->pps_mutex);
620 
621 	if (IS_VALLEYVIEW(dev) &&
622 	    intel_dp->pps_pipe == INVALID_PIPE)
623 		return false;
624 
625 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
626 }
627 
edp_have_panel_vdd(struct intel_dp * intel_dp)628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
629 {
630 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
631 	struct drm_i915_private *dev_priv = dev->dev_private;
632 
633 	lockdep_assert_held(&dev_priv->pps_mutex);
634 
635 	if (IS_VALLEYVIEW(dev) &&
636 	    intel_dp->pps_pipe == INVALID_PIPE)
637 		return false;
638 
639 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
640 }
641 
642 static void
intel_dp_check_edp(struct intel_dp * intel_dp)643 intel_dp_check_edp(struct intel_dp *intel_dp)
644 {
645 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
646 	struct drm_i915_private *dev_priv = dev->dev_private;
647 
648 	if (!is_edp(intel_dp))
649 		return;
650 
651 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654 			      I915_READ(_pp_stat_reg(intel_dp)),
655 			      I915_READ(_pp_ctrl_reg(intel_dp)));
656 	}
657 }
658 
659 static uint32_t
intel_dp_aux_wait_done(struct intel_dp * intel_dp,bool has_aux_irq)660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661 {
662 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 	struct drm_device *dev = intel_dig_port->base.base.dev;
664 	struct drm_i915_private *dev_priv = dev->dev_private;
665 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
666 	uint32_t status;
667 	bool done;
668 
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
670 	if (has_aux_irq)
671 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672 					  msecs_to_jiffies_timeout(10));
673 	else
674 		done = wait_for_atomic(C, 10) == 0;
675 	if (!done)
676 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 			  has_aux_irq);
678 #undef C
679 
680 	return status;
681 }
682 
i9xx_get_aux_clock_divider(struct intel_dp * intel_dp,int index)683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
684 {
685 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 	struct drm_device *dev = intel_dig_port->base.base.dev;
687 
688 	/*
689 	 * The clock divider is based off the hrawclk, and would like to run at
690 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
691 	 */
692 	return index ? 0 : intel_hrawclk(dev) / 2;
693 }
694 
ilk_get_aux_clock_divider(struct intel_dp * intel_dp,int index)695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 	struct drm_device *dev = intel_dig_port->base.base.dev;
699 
700 	if (index)
701 		return 0;
702 
703 	if (intel_dig_port->port == PORT_A) {
704 		if (IS_GEN6(dev) || IS_GEN7(dev))
705 			return 200; /* SNB & IVB eDP input clock at 400Mhz */
706 		else
707 			return 225; /* eDP input clock at 450Mhz */
708 	} else {
709 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 	}
711 }
712 
hsw_get_aux_clock_divider(struct intel_dp * intel_dp,int index)713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 {
715 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 	struct drm_device *dev = intel_dig_port->base.base.dev;
717 	struct drm_i915_private *dev_priv = dev->dev_private;
718 
719 	if (intel_dig_port->port == PORT_A) {
720 		if (index)
721 			return 0;
722 		return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 		/* Workaround for non-ULT HSW */
725 		switch (index) {
726 		case 0: return 63;
727 		case 1: return 72;
728 		default: return 0;
729 		}
730 	} else  {
731 		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
732 	}
733 }
734 
vlv_get_aux_clock_divider(struct intel_dp * intel_dp,int index)735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 {
737 	return index ? 0 : 100;
738 }
739 
skl_get_aux_clock_divider(struct intel_dp * intel_dp,int index)740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 {
742 	/*
743 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 	 * derive the clock from CDCLK automatically). We still implement the
745 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 	 */
747 	return index ? 0 : 1;
748 }
749 
i9xx_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t aux_clock_divider)750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 				      bool has_aux_irq,
752 				      int send_bytes,
753 				      uint32_t aux_clock_divider)
754 {
755 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 	struct drm_device *dev = intel_dig_port->base.base.dev;
757 	uint32_t precharge, timeout;
758 
759 	if (IS_GEN6(dev))
760 		precharge = 3;
761 	else
762 		precharge = 5;
763 
764 	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 	else
767 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768 
769 	return DP_AUX_CH_CTL_SEND_BUSY |
770 	       DP_AUX_CH_CTL_DONE |
771 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
773 	       timeout |
774 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
775 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
778 }
779 
skl_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t unused)780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 				      bool has_aux_irq,
782 				      int send_bytes,
783 				      uint32_t unused)
784 {
785 	return DP_AUX_CH_CTL_SEND_BUSY |
786 	       DP_AUX_CH_CTL_DONE |
787 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
790 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
791 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793 }
794 
795 static int
intel_dp_aux_ch(struct intel_dp * intel_dp,const uint8_t * send,int send_bytes,uint8_t * recv,int recv_size)796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797 		const uint8_t *send, int send_bytes,
798 		uint8_t *recv, int recv_size)
799 {
800 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 	struct drm_device *dev = intel_dig_port->base.base.dev;
802 	struct drm_i915_private *dev_priv = dev->dev_private;
803 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 	uint32_t ch_data = ch_ctl + 4;
805 	uint32_t aux_clock_divider;
806 	int i, ret, recv_bytes;
807 	uint32_t status;
808 	int try, clock = 0;
809 	bool has_aux_irq = HAS_AUX_IRQ(dev);
810 	bool vdd;
811 
812 	pps_lock(intel_dp);
813 
814 	/*
815 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 	 * In such cases we want to leave VDD enabled and it's up to upper layers
817 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 	 * ourselves.
819 	 */
820 	vdd = edp_panel_vdd_on(intel_dp);
821 
822 	/* dp aux is extremely sensitive to irq latency, hence request the
823 	 * lowest possible wakeup latency and so prevent the cpu from going into
824 	 * deep sleep states.
825 	 */
826 	pm_qos_update_request(&dev_priv->pm_qos, 0);
827 
828 	intel_dp_check_edp(intel_dp);
829 
830 	intel_aux_display_runtime_get(dev_priv);
831 
832 	/* Try to wait for any previous AUX channel activity */
833 	for (try = 0; try < 3; try++) {
834 		status = I915_READ_NOTRACE(ch_ctl);
835 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 			break;
837 		msleep(1);
838 	}
839 
840 	if (try == 3) {
841 		WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 		     I915_READ(ch_ctl));
843 		ret = -EBUSY;
844 		goto out;
845 	}
846 
847 	/* Only 5 data registers! */
848 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 		ret = -E2BIG;
850 		goto out;
851 	}
852 
853 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 							  has_aux_irq,
856 							  send_bytes,
857 							  aux_clock_divider);
858 
859 		/* Must try at least 3 times according to DP spec */
860 		for (try = 0; try < 5; try++) {
861 			/* Load the send data into the aux channel data registers */
862 			for (i = 0; i < send_bytes; i += 4)
863 				I915_WRITE(ch_data + i,
864 					   intel_dp_pack_aux(send + i,
865 							     send_bytes - i));
866 
867 			/* Send the command and wait for it to complete */
868 			I915_WRITE(ch_ctl, send_ctl);
869 
870 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871 
872 			/* Clear done status and any errors */
873 			I915_WRITE(ch_ctl,
874 				   status |
875 				   DP_AUX_CH_CTL_DONE |
876 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
878 
879 			if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 				      DP_AUX_CH_CTL_RECEIVE_ERROR))
881 				continue;
882 			if (status & DP_AUX_CH_CTL_DONE)
883 				goto done;
884 		}
885 	}
886 
887 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
888 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
889 		ret = -EBUSY;
890 		goto out;
891 	}
892 
893 done:
894 	/* Check for timeout or receive error.
895 	 * Timeouts occur when the sink is not connected
896 	 */
897 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
898 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
899 		ret = -EIO;
900 		goto out;
901 	}
902 
903 	/* Timeouts occur when the device isn't connected, so they're
904 	 * "normal" -- don't fill the kernel log with these */
905 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
906 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
907 		ret = -ETIMEDOUT;
908 		goto out;
909 	}
910 
911 	/* Unload any bytes sent back from the other side */
912 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
913 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
914 	if (recv_bytes > recv_size)
915 		recv_bytes = recv_size;
916 
917 	for (i = 0; i < recv_bytes; i += 4)
918 		intel_dp_unpack_aux(I915_READ(ch_data + i),
919 				    recv + i, recv_bytes - i);
920 
921 	ret = recv_bytes;
922 out:
923 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
924 	intel_aux_display_runtime_put(dev_priv);
925 
926 	if (vdd)
927 		edp_panel_vdd_off(intel_dp, false);
928 
929 	pps_unlock(intel_dp);
930 
931 	return ret;
932 }
933 
934 #define BARE_ADDRESS_SIZE	3
935 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
936 static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)937 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
938 {
939 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
940 	uint8_t txbuf[20], rxbuf[20];
941 	size_t txsize, rxsize;
942 	int ret;
943 
944 	txbuf[0] = (msg->request << 4) |
945 		((msg->address >> 16) & 0xf);
946 	txbuf[1] = (msg->address >> 8) & 0xff;
947 	txbuf[2] = msg->address & 0xff;
948 	txbuf[3] = msg->size - 1;
949 
950 	switch (msg->request & ~DP_AUX_I2C_MOT) {
951 	case DP_AUX_NATIVE_WRITE:
952 	case DP_AUX_I2C_WRITE:
953 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
954 		rxsize = 2; /* 0 or 1 data bytes */
955 
956 		if (WARN_ON(txsize > 20))
957 			return -E2BIG;
958 
959 		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
960 
961 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
962 		if (ret > 0) {
963 			msg->reply = rxbuf[0] >> 4;
964 
965 			if (ret > 1) {
966 				/* Number of bytes written in a short write. */
967 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
968 			} else {
969 				/* Return payload size. */
970 				ret = msg->size;
971 			}
972 		}
973 		break;
974 
975 	case DP_AUX_NATIVE_READ:
976 	case DP_AUX_I2C_READ:
977 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
978 		rxsize = msg->size + 1;
979 
980 		if (WARN_ON(rxsize > 20))
981 			return -E2BIG;
982 
983 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
984 		if (ret > 0) {
985 			msg->reply = rxbuf[0] >> 4;
986 			/*
987 			 * Assume happy day, and copy the data. The caller is
988 			 * expected to check msg->reply before touching it.
989 			 *
990 			 * Return payload size.
991 			 */
992 			ret--;
993 			memcpy(msg->buffer, rxbuf + 1, ret);
994 		}
995 		break;
996 
997 	default:
998 		ret = -EINVAL;
999 		break;
1000 	}
1001 
1002 	return ret;
1003 }
1004 
1005 static void
intel_dp_aux_init(struct intel_dp * intel_dp,struct intel_connector * connector)1006 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1007 {
1008 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1009 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1010 	enum port port = intel_dig_port->port;
1011 	const char *name = NULL;
1012 	int ret;
1013 
1014 	switch (port) {
1015 	case PORT_A:
1016 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1017 		name = "DPDDC-A";
1018 		break;
1019 	case PORT_B:
1020 		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1021 		name = "DPDDC-B";
1022 		break;
1023 	case PORT_C:
1024 		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1025 		name = "DPDDC-C";
1026 		break;
1027 	case PORT_D:
1028 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1029 		name = "DPDDC-D";
1030 		break;
1031 	default:
1032 		BUG();
1033 	}
1034 
1035 	/*
1036 	 * The AUX_CTL register is usually DP_CTL + 0x10.
1037 	 *
1038 	 * On Haswell and Broadwell though:
1039 	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1040 	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1041 	 *
1042 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1043 	 */
1044 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1045 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1046 
1047 	intel_dp->aux.name = name;
1048 	intel_dp->aux.dev = dev->dev;
1049 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1050 
1051 	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1052 		      connector->base.kdev->kobj.name);
1053 
1054 	ret = drm_dp_aux_register(&intel_dp->aux);
1055 	if (ret < 0) {
1056 		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1057 			  name, ret);
1058 		return;
1059 	}
1060 
1061 	ret = sysfs_create_link(&connector->base.kdev->kobj,
1062 				&intel_dp->aux.ddc.dev.kobj,
1063 				intel_dp->aux.ddc.dev.kobj.name);
1064 	if (ret < 0) {
1065 		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1066 		drm_dp_aux_unregister(&intel_dp->aux);
1067 	}
1068 }
1069 
1070 static void
intel_dp_connector_unregister(struct intel_connector * intel_connector)1071 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1072 {
1073 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1074 
1075 	if (!intel_connector->mst_port)
1076 		sysfs_remove_link(&intel_connector->base.kdev->kobj,
1077 				  intel_dp->aux.ddc.dev.kobj.name);
1078 	intel_connector_unregister(intel_connector);
1079 }
1080 
1081 static void
skl_edp_set_pll_config(struct intel_crtc_state * pipe_config,int link_clock)1082 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1083 {
1084 	u32 ctrl1;
1085 
1086 	pipe_config->ddi_pll_sel = SKL_DPLL0;
1087 	pipe_config->dpll_hw_state.cfgcr1 = 0;
1088 	pipe_config->dpll_hw_state.cfgcr2 = 0;
1089 
1090 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1091 	switch (link_clock / 2) {
1092 	case 81000:
1093 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1094 					      SKL_DPLL0);
1095 		break;
1096 	case 135000:
1097 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1098 					      SKL_DPLL0);
1099 		break;
1100 	case 270000:
1101 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1102 					      SKL_DPLL0);
1103 		break;
1104 	case 162000:
1105 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1106 					      SKL_DPLL0);
1107 		break;
1108 	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1109 	results in CDCLK change. Need to handle the change of CDCLK by
1110 	disabling pipes and re-enabling them */
1111 	case 108000:
1112 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1113 					      SKL_DPLL0);
1114 		break;
1115 	case 216000:
1116 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1117 					      SKL_DPLL0);
1118 		break;
1119 
1120 	}
1121 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1122 }
1123 
1124 static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state * pipe_config,int link_bw)1125 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1126 {
1127 	switch (link_bw) {
1128 	case DP_LINK_BW_1_62:
1129 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1130 		break;
1131 	case DP_LINK_BW_2_7:
1132 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1133 		break;
1134 	case DP_LINK_BW_5_4:
1135 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1136 		break;
1137 	}
1138 }
1139 
1140 static int
intel_dp_sink_rates(struct intel_dp * intel_dp,const int ** sink_rates)1141 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1142 {
1143 	if (intel_dp->num_sink_rates) {
1144 		*sink_rates = intel_dp->sink_rates;
1145 		return intel_dp->num_sink_rates;
1146 	}
1147 
1148 	*sink_rates = default_rates;
1149 
1150 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1151 }
1152 
intel_dp_source_supports_hbr2(struct drm_device * dev)1153 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1154 {
1155 	/* WaDisableHBR2:skl */
1156 	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1157 		return false;
1158 
1159 	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1160 	    (INTEL_INFO(dev)->gen >= 9))
1161 		return true;
1162 	else
1163 		return false;
1164 }
1165 
1166 static int
intel_dp_source_rates(struct drm_device * dev,const int ** source_rates)1167 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1168 {
1169 	if (INTEL_INFO(dev)->gen >= 9) {
1170 		*source_rates = gen9_rates;
1171 		return ARRAY_SIZE(gen9_rates);
1172 	} else if (IS_CHERRYVIEW(dev)) {
1173 		*source_rates = chv_rates;
1174 		return ARRAY_SIZE(chv_rates);
1175 	}
1176 
1177 	*source_rates = default_rates;
1178 
1179 	/* This depends on the fact that 5.4 is last value in the array */
1180 	if (intel_dp_source_supports_hbr2(dev))
1181 		return (DP_LINK_BW_5_4 >> 3) + 1;
1182 	else
1183 		return (DP_LINK_BW_2_7 >> 3) + 1;
1184 }
1185 
1186 static void
intel_dp_set_clock(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,int link_bw)1187 intel_dp_set_clock(struct intel_encoder *encoder,
1188 		   struct intel_crtc_state *pipe_config, int link_bw)
1189 {
1190 	struct drm_device *dev = encoder->base.dev;
1191 	const struct dp_link_dpll *divisor = NULL;
1192 	int i, count = 0;
1193 
1194 	if (IS_G4X(dev)) {
1195 		divisor = gen4_dpll;
1196 		count = ARRAY_SIZE(gen4_dpll);
1197 	} else if (HAS_PCH_SPLIT(dev)) {
1198 		divisor = pch_dpll;
1199 		count = ARRAY_SIZE(pch_dpll);
1200 	} else if (IS_CHERRYVIEW(dev)) {
1201 		divisor = chv_dpll;
1202 		count = ARRAY_SIZE(chv_dpll);
1203 	} else if (IS_VALLEYVIEW(dev)) {
1204 		divisor = vlv_dpll;
1205 		count = ARRAY_SIZE(vlv_dpll);
1206 	}
1207 
1208 	if (divisor && count) {
1209 		for (i = 0; i < count; i++) {
1210 			if (link_bw == divisor[i].link_bw) {
1211 				pipe_config->dpll = divisor[i].dpll;
1212 				pipe_config->clock_set = true;
1213 				break;
1214 			}
1215 		}
1216 	}
1217 }
1218 
intersect_rates(const int * source_rates,int source_len,const int * sink_rates,int sink_len,int * common_rates)1219 static int intersect_rates(const int *source_rates, int source_len,
1220 			   const int *sink_rates, int sink_len,
1221 			   int *common_rates)
1222 {
1223 	int i = 0, j = 0, k = 0;
1224 
1225 	while (i < source_len && j < sink_len) {
1226 		if (source_rates[i] == sink_rates[j]) {
1227 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1228 				return k;
1229 			common_rates[k] = source_rates[i];
1230 			++k;
1231 			++i;
1232 			++j;
1233 		} else if (source_rates[i] < sink_rates[j]) {
1234 			++i;
1235 		} else {
1236 			++j;
1237 		}
1238 	}
1239 	return k;
1240 }
1241 
intel_dp_common_rates(struct intel_dp * intel_dp,int * common_rates)1242 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1243 				 int *common_rates)
1244 {
1245 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1246 	const int *source_rates, *sink_rates;
1247 	int source_len, sink_len;
1248 
1249 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1250 	source_len = intel_dp_source_rates(dev, &source_rates);
1251 
1252 	return intersect_rates(source_rates, source_len,
1253 			       sink_rates, sink_len,
1254 			       common_rates);
1255 }
1256 
snprintf_int_array(char * str,size_t len,const int * array,int nelem)1257 static void snprintf_int_array(char *str, size_t len,
1258 			       const int *array, int nelem)
1259 {
1260 	int i;
1261 
1262 	str[0] = '\0';
1263 
1264 	for (i = 0; i < nelem; i++) {
1265 		int r = snprintf(str, len, "%d,", array[i]);
1266 		if (r >= len)
1267 			return;
1268 		str += r;
1269 		len -= r;
1270 	}
1271 }
1272 
intel_dp_print_rates(struct intel_dp * intel_dp)1273 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1274 {
1275 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1276 	const int *source_rates, *sink_rates;
1277 	int source_len, sink_len, common_len;
1278 	int common_rates[DP_MAX_SUPPORTED_RATES];
1279 	char str[128]; /* FIXME: too big for stack? */
1280 
1281 	if ((drm_debug & DRM_UT_KMS) == 0)
1282 		return;
1283 
1284 	source_len = intel_dp_source_rates(dev, &source_rates);
1285 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1286 	DRM_DEBUG_KMS("source rates: %s\n", str);
1287 
1288 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1289 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1290 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1291 
1292 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1293 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1294 	DRM_DEBUG_KMS("common rates: %s\n", str);
1295 }
1296 
rate_to_index(int find,const int * rates)1297 static int rate_to_index(int find, const int *rates)
1298 {
1299 	int i = 0;
1300 
1301 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1302 		if (find == rates[i])
1303 			break;
1304 
1305 	return i;
1306 }
1307 
1308 int
intel_dp_max_link_rate(struct intel_dp * intel_dp)1309 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1310 {
1311 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1312 	int len;
1313 
1314 	len = intel_dp_common_rates(intel_dp, rates);
1315 	if (WARN_ON(len <= 0))
1316 		return 162000;
1317 
1318 	return rates[rate_to_index(0, rates) - 1];
1319 }
1320 
intel_dp_rate_select(struct intel_dp * intel_dp,int rate)1321 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1322 {
1323 	return rate_to_index(rate, intel_dp->sink_rates);
1324 }
1325 
1326 bool
intel_dp_compute_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1327 intel_dp_compute_config(struct intel_encoder *encoder,
1328 			struct intel_crtc_state *pipe_config)
1329 {
1330 	struct drm_device *dev = encoder->base.dev;
1331 	struct drm_i915_private *dev_priv = dev->dev_private;
1332 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1333 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1334 	enum port port = dp_to_dig_port(intel_dp)->port;
1335 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1336 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1337 	int lane_count, clock;
1338 	int min_lane_count = 1;
1339 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1340 	/* Conveniently, the link BW constants become indices with a shift...*/
1341 	int min_clock = 0;
1342 	int max_clock;
1343 	int bpp, mode_rate;
1344 	int link_avail, link_clock;
1345 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1346 	int common_len;
1347 
1348 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1349 
1350 	/* No common link rates between source and sink */
1351 	WARN_ON(common_len <= 0);
1352 
1353 	max_clock = common_len - 1;
1354 
1355 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1356 		pipe_config->has_pch_encoder = true;
1357 
1358 	pipe_config->has_dp_encoder = true;
1359 	pipe_config->has_drrs = false;
1360 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1361 
1362 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1363 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1364 				       adjusted_mode);
1365 		if (!HAS_PCH_SPLIT(dev))
1366 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1367 						 intel_connector->panel.fitting_mode);
1368 		else
1369 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1370 						intel_connector->panel.fitting_mode);
1371 	}
1372 
1373 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1374 		return false;
1375 
1376 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1377 		      "max bw %d pixel clock %iKHz\n",
1378 		      max_lane_count, common_rates[max_clock],
1379 		      adjusted_mode->crtc_clock);
1380 
1381 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1382 	 * bpc in between. */
1383 	bpp = pipe_config->pipe_bpp;
1384 	if (is_edp(intel_dp)) {
1385 		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1386 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1387 				      dev_priv->vbt.edp_bpp);
1388 			bpp = dev_priv->vbt.edp_bpp;
1389 		}
1390 
1391 		/*
1392 		 * Use the maximum clock and number of lanes the eDP panel
1393 		 * advertizes being capable of. The panels are generally
1394 		 * designed to support only a single clock and lane
1395 		 * configuration, and typically these values correspond to the
1396 		 * native resolution of the panel.
1397 		 */
1398 		min_lane_count = max_lane_count;
1399 		min_clock = max_clock;
1400 	}
1401 
1402 	for (; bpp >= 6*3; bpp -= 2*3) {
1403 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1404 						   bpp);
1405 
1406 		for (clock = min_clock; clock <= max_clock; clock++) {
1407 			for (lane_count = min_lane_count;
1408 				lane_count <= max_lane_count;
1409 				lane_count <<= 1) {
1410 
1411 				link_clock = common_rates[clock];
1412 				link_avail = intel_dp_max_data_rate(link_clock,
1413 								    lane_count);
1414 
1415 				if (mode_rate <= link_avail) {
1416 					goto found;
1417 				}
1418 			}
1419 		}
1420 	}
1421 
1422 	return false;
1423 
1424 found:
1425 	if (intel_dp->color_range_auto) {
1426 		/*
1427 		 * See:
1428 		 * CEA-861-E - 5.1 Default Encoding Parameters
1429 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1430 		 */
1431 		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1432 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
1433 		else
1434 			intel_dp->color_range = 0;
1435 	}
1436 
1437 	if (intel_dp->color_range)
1438 		pipe_config->limited_color_range = true;
1439 
1440 	intel_dp->lane_count = lane_count;
1441 
1442 	if (intel_dp->num_sink_rates) {
1443 		intel_dp->link_bw = 0;
1444 		intel_dp->rate_select =
1445 			intel_dp_rate_select(intel_dp, common_rates[clock]);
1446 	} else {
1447 		intel_dp->link_bw =
1448 			drm_dp_link_rate_to_bw_code(common_rates[clock]);
1449 		intel_dp->rate_select = 0;
1450 	}
1451 
1452 	pipe_config->pipe_bpp = bpp;
1453 	pipe_config->port_clock = common_rates[clock];
1454 
1455 	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1456 		      intel_dp->link_bw, intel_dp->lane_count,
1457 		      pipe_config->port_clock, bpp);
1458 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1459 		      mode_rate, link_avail);
1460 
1461 	intel_link_compute_m_n(bpp, lane_count,
1462 			       adjusted_mode->crtc_clock,
1463 			       pipe_config->port_clock,
1464 			       &pipe_config->dp_m_n);
1465 
1466 	if (intel_connector->panel.downclock_mode != NULL &&
1467 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1468 			pipe_config->has_drrs = true;
1469 			intel_link_compute_m_n(bpp, lane_count,
1470 				intel_connector->panel.downclock_mode->clock,
1471 				pipe_config->port_clock,
1472 				&pipe_config->dp_m2_n2);
1473 	}
1474 
1475 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1476 		skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1477 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1478 		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1479 	else
1480 		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1481 
1482 	return true;
1483 }
1484 
ironlake_set_pll_cpu_edp(struct intel_dp * intel_dp)1485 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1486 {
1487 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1488 	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1489 	struct drm_device *dev = crtc->base.dev;
1490 	struct drm_i915_private *dev_priv = dev->dev_private;
1491 	u32 dpa_ctl;
1492 
1493 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1494 		      crtc->config->port_clock);
1495 	dpa_ctl = I915_READ(DP_A);
1496 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1497 
1498 	if (crtc->config->port_clock == 162000) {
1499 		/* For a long time we've carried around a ILK-DevA w/a for the
1500 		 * 160MHz clock. If we're really unlucky, it's still required.
1501 		 */
1502 		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1503 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
1504 		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1505 	} else {
1506 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
1507 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1508 	}
1509 
1510 	I915_WRITE(DP_A, dpa_ctl);
1511 
1512 	POSTING_READ(DP_A);
1513 	udelay(500);
1514 }
1515 
intel_dp_prepare(struct intel_encoder * encoder)1516 static void intel_dp_prepare(struct intel_encoder *encoder)
1517 {
1518 	struct drm_device *dev = encoder->base.dev;
1519 	struct drm_i915_private *dev_priv = dev->dev_private;
1520 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1521 	enum port port = dp_to_dig_port(intel_dp)->port;
1522 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1523 	struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1524 
1525 	/*
1526 	 * There are four kinds of DP registers:
1527 	 *
1528 	 * 	IBX PCH
1529 	 * 	SNB CPU
1530 	 *	IVB CPU
1531 	 * 	CPT PCH
1532 	 *
1533 	 * IBX PCH and CPU are the same for almost everything,
1534 	 * except that the CPU DP PLL is configured in this
1535 	 * register
1536 	 *
1537 	 * CPT PCH is quite different, having many bits moved
1538 	 * to the TRANS_DP_CTL register instead. That
1539 	 * configuration happens (oddly) in ironlake_pch_enable
1540 	 */
1541 
1542 	/* Preserve the BIOS-computed detected bit. This is
1543 	 * supposed to be read-only.
1544 	 */
1545 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1546 
1547 	/* Handle DP bits in common between all three register formats */
1548 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1549 	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1550 
1551 	if (crtc->config->has_audio)
1552 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1553 
1554 	/* Split out the IBX/CPU vs CPT settings */
1555 
1556 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1557 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1558 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1559 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1560 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1561 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1562 
1563 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1564 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1565 
1566 		intel_dp->DP |= crtc->pipe << 29;
1567 	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1568 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1569 			intel_dp->DP |= intel_dp->color_range;
1570 
1571 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1572 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1573 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1574 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1575 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1576 
1577 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1578 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1579 
1580 		if (!IS_CHERRYVIEW(dev)) {
1581 			if (crtc->pipe == 1)
1582 				intel_dp->DP |= DP_PIPEB_SELECT;
1583 		} else {
1584 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1585 		}
1586 	} else {
1587 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1588 	}
1589 }
1590 
1591 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1592 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1593 
1594 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1595 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1596 
1597 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1598 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1599 
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)1600 static void wait_panel_status(struct intel_dp *intel_dp,
1601 				       u32 mask,
1602 				       u32 value)
1603 {
1604 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1605 	struct drm_i915_private *dev_priv = dev->dev_private;
1606 	u32 pp_stat_reg, pp_ctrl_reg;
1607 
1608 	lockdep_assert_held(&dev_priv->pps_mutex);
1609 
1610 	pp_stat_reg = _pp_stat_reg(intel_dp);
1611 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1612 
1613 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1614 			mask, value,
1615 			I915_READ(pp_stat_reg),
1616 			I915_READ(pp_ctrl_reg));
1617 
1618 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1619 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1620 				I915_READ(pp_stat_reg),
1621 				I915_READ(pp_ctrl_reg));
1622 	}
1623 
1624 	DRM_DEBUG_KMS("Wait complete\n");
1625 }
1626 
wait_panel_on(struct intel_dp * intel_dp)1627 static void wait_panel_on(struct intel_dp *intel_dp)
1628 {
1629 	DRM_DEBUG_KMS("Wait for panel power on\n");
1630 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1631 }
1632 
wait_panel_off(struct intel_dp * intel_dp)1633 static void wait_panel_off(struct intel_dp *intel_dp)
1634 {
1635 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1636 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1637 }
1638 
wait_panel_power_cycle(struct intel_dp * intel_dp)1639 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1640 {
1641 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1642 
1643 	/* When we disable the VDD override bit last we have to do the manual
1644 	 * wait. */
1645 	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1646 				       intel_dp->panel_power_cycle_delay);
1647 
1648 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1649 }
1650 
wait_backlight_on(struct intel_dp * intel_dp)1651 static void wait_backlight_on(struct intel_dp *intel_dp)
1652 {
1653 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1654 				       intel_dp->backlight_on_delay);
1655 }
1656 
edp_wait_backlight_off(struct intel_dp * intel_dp)1657 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1658 {
1659 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1660 				       intel_dp->backlight_off_delay);
1661 }
1662 
1663 /* Read the current pp_control value, unlocking the register if it
1664  * is locked
1665  */
1666 
ironlake_get_pp_control(struct intel_dp * intel_dp)1667 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1668 {
1669 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1670 	struct drm_i915_private *dev_priv = dev->dev_private;
1671 	u32 control;
1672 
1673 	lockdep_assert_held(&dev_priv->pps_mutex);
1674 
1675 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1676 	control &= ~PANEL_UNLOCK_MASK;
1677 	control |= PANEL_UNLOCK_REGS;
1678 	return control;
1679 }
1680 
1681 /*
1682  * Must be paired with edp_panel_vdd_off().
1683  * Must hold pps_mutex around the whole on/off sequence.
1684  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1685  */
edp_panel_vdd_on(struct intel_dp * intel_dp)1686 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1687 {
1688 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1689 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1690 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1691 	struct drm_i915_private *dev_priv = dev->dev_private;
1692 	enum intel_display_power_domain power_domain;
1693 	u32 pp;
1694 	u32 pp_stat_reg, pp_ctrl_reg;
1695 	bool need_to_disable = !intel_dp->want_panel_vdd;
1696 
1697 	lockdep_assert_held(&dev_priv->pps_mutex);
1698 
1699 	if (!is_edp(intel_dp))
1700 		return false;
1701 
1702 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1703 	intel_dp->want_panel_vdd = true;
1704 
1705 	if (edp_have_panel_vdd(intel_dp))
1706 		return need_to_disable;
1707 
1708 	power_domain = intel_display_port_power_domain(intel_encoder);
1709 	intel_display_power_get(dev_priv, power_domain);
1710 
1711 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1712 		      port_name(intel_dig_port->port));
1713 
1714 	if (!edp_have_panel_power(intel_dp))
1715 		wait_panel_power_cycle(intel_dp);
1716 
1717 	pp = ironlake_get_pp_control(intel_dp);
1718 	pp |= EDP_FORCE_VDD;
1719 
1720 	pp_stat_reg = _pp_stat_reg(intel_dp);
1721 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1722 
1723 	I915_WRITE(pp_ctrl_reg, pp);
1724 	POSTING_READ(pp_ctrl_reg);
1725 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1726 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1727 	/*
1728 	 * If the panel wasn't on, delay before accessing aux channel
1729 	 */
1730 	if (!edp_have_panel_power(intel_dp)) {
1731 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1732 			      port_name(intel_dig_port->port));
1733 		msleep(intel_dp->panel_power_up_delay);
1734 	}
1735 
1736 	return need_to_disable;
1737 }
1738 
1739 /*
1740  * Must be paired with intel_edp_panel_vdd_off() or
1741  * intel_edp_panel_off().
1742  * Nested calls to these functions are not allowed since
1743  * we drop the lock. Caller must use some higher level
1744  * locking to prevent nested calls from other threads.
1745  */
intel_edp_panel_vdd_on(struct intel_dp * intel_dp)1746 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1747 {
1748 	bool vdd;
1749 
1750 	if (!is_edp(intel_dp))
1751 		return;
1752 
1753 	pps_lock(intel_dp);
1754 	vdd = edp_panel_vdd_on(intel_dp);
1755 	pps_unlock(intel_dp);
1756 
1757 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1758 	     port_name(dp_to_dig_port(intel_dp)->port));
1759 }
1760 
edp_panel_vdd_off_sync(struct intel_dp * intel_dp)1761 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1762 {
1763 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1764 	struct drm_i915_private *dev_priv = dev->dev_private;
1765 	struct intel_digital_port *intel_dig_port =
1766 		dp_to_dig_port(intel_dp);
1767 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1768 	enum intel_display_power_domain power_domain;
1769 	u32 pp;
1770 	u32 pp_stat_reg, pp_ctrl_reg;
1771 
1772 	lockdep_assert_held(&dev_priv->pps_mutex);
1773 
1774 	WARN_ON(intel_dp->want_panel_vdd);
1775 
1776 	if (!edp_have_panel_vdd(intel_dp))
1777 		return;
1778 
1779 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1780 		      port_name(intel_dig_port->port));
1781 
1782 	pp = ironlake_get_pp_control(intel_dp);
1783 	pp &= ~EDP_FORCE_VDD;
1784 
1785 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1786 	pp_stat_reg = _pp_stat_reg(intel_dp);
1787 
1788 	I915_WRITE(pp_ctrl_reg, pp);
1789 	POSTING_READ(pp_ctrl_reg);
1790 
1791 	/* Make sure sequencer is idle before allowing subsequent activity */
1792 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1793 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1794 
1795 	if ((pp & POWER_TARGET_ON) == 0)
1796 		intel_dp->last_power_cycle = jiffies;
1797 
1798 	power_domain = intel_display_port_power_domain(intel_encoder);
1799 	intel_display_power_put(dev_priv, power_domain);
1800 }
1801 
edp_panel_vdd_work(struct work_struct * __work)1802 static void edp_panel_vdd_work(struct work_struct *__work)
1803 {
1804 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1805 						 struct intel_dp, panel_vdd_work);
1806 
1807 	pps_lock(intel_dp);
1808 	if (!intel_dp->want_panel_vdd)
1809 		edp_panel_vdd_off_sync(intel_dp);
1810 	pps_unlock(intel_dp);
1811 }
1812 
edp_panel_vdd_schedule_off(struct intel_dp * intel_dp)1813 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1814 {
1815 	unsigned long delay;
1816 
1817 	/*
1818 	 * Queue the timer to fire a long time from now (relative to the power
1819 	 * down delay) to keep the panel power up across a sequence of
1820 	 * operations.
1821 	 */
1822 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1823 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1824 }
1825 
1826 /*
1827  * Must be paired with edp_panel_vdd_on().
1828  * Must hold pps_mutex around the whole on/off sequence.
1829  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1830  */
edp_panel_vdd_off(struct intel_dp * intel_dp,bool sync)1831 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1832 {
1833 	struct drm_i915_private *dev_priv =
1834 		intel_dp_to_dev(intel_dp)->dev_private;
1835 
1836 	lockdep_assert_held(&dev_priv->pps_mutex);
1837 
1838 	if (!is_edp(intel_dp))
1839 		return;
1840 
1841 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1842 	     port_name(dp_to_dig_port(intel_dp)->port));
1843 
1844 	intel_dp->want_panel_vdd = false;
1845 
1846 	if (sync)
1847 		edp_panel_vdd_off_sync(intel_dp);
1848 	else
1849 		edp_panel_vdd_schedule_off(intel_dp);
1850 }
1851 
edp_panel_on(struct intel_dp * intel_dp)1852 static void edp_panel_on(struct intel_dp *intel_dp)
1853 {
1854 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1855 	struct drm_i915_private *dev_priv = dev->dev_private;
1856 	u32 pp;
1857 	u32 pp_ctrl_reg;
1858 
1859 	lockdep_assert_held(&dev_priv->pps_mutex);
1860 
1861 	if (!is_edp(intel_dp))
1862 		return;
1863 
1864 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1865 		      port_name(dp_to_dig_port(intel_dp)->port));
1866 
1867 	if (WARN(edp_have_panel_power(intel_dp),
1868 		 "eDP port %c panel power already on\n",
1869 		 port_name(dp_to_dig_port(intel_dp)->port)))
1870 		return;
1871 
1872 	wait_panel_power_cycle(intel_dp);
1873 
1874 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1875 	pp = ironlake_get_pp_control(intel_dp);
1876 	if (IS_GEN5(dev)) {
1877 		/* ILK workaround: disable reset around power sequence */
1878 		pp &= ~PANEL_POWER_RESET;
1879 		I915_WRITE(pp_ctrl_reg, pp);
1880 		POSTING_READ(pp_ctrl_reg);
1881 	}
1882 
1883 	pp |= POWER_TARGET_ON;
1884 	if (!IS_GEN5(dev))
1885 		pp |= PANEL_POWER_RESET;
1886 
1887 	I915_WRITE(pp_ctrl_reg, pp);
1888 	POSTING_READ(pp_ctrl_reg);
1889 
1890 	wait_panel_on(intel_dp);
1891 	intel_dp->last_power_on = jiffies;
1892 
1893 	if (IS_GEN5(dev)) {
1894 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1895 		I915_WRITE(pp_ctrl_reg, pp);
1896 		POSTING_READ(pp_ctrl_reg);
1897 	}
1898 }
1899 
intel_edp_panel_on(struct intel_dp * intel_dp)1900 void intel_edp_panel_on(struct intel_dp *intel_dp)
1901 {
1902 	if (!is_edp(intel_dp))
1903 		return;
1904 
1905 	pps_lock(intel_dp);
1906 	edp_panel_on(intel_dp);
1907 	pps_unlock(intel_dp);
1908 }
1909 
1910 
edp_panel_off(struct intel_dp * intel_dp)1911 static void edp_panel_off(struct intel_dp *intel_dp)
1912 {
1913 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1914 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1915 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1916 	struct drm_i915_private *dev_priv = dev->dev_private;
1917 	enum intel_display_power_domain power_domain;
1918 	u32 pp;
1919 	u32 pp_ctrl_reg;
1920 
1921 	lockdep_assert_held(&dev_priv->pps_mutex);
1922 
1923 	if (!is_edp(intel_dp))
1924 		return;
1925 
1926 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1927 		      port_name(dp_to_dig_port(intel_dp)->port));
1928 
1929 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1930 	     port_name(dp_to_dig_port(intel_dp)->port));
1931 
1932 	pp = ironlake_get_pp_control(intel_dp);
1933 	/* We need to switch off panel power _and_ force vdd, for otherwise some
1934 	 * panels get very unhappy and cease to work. */
1935 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1936 		EDP_BLC_ENABLE);
1937 
1938 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1939 
1940 	intel_dp->want_panel_vdd = false;
1941 
1942 	I915_WRITE(pp_ctrl_reg, pp);
1943 	POSTING_READ(pp_ctrl_reg);
1944 
1945 	intel_dp->last_power_cycle = jiffies;
1946 	wait_panel_off(intel_dp);
1947 
1948 	/* We got a reference when we enabled the VDD. */
1949 	power_domain = intel_display_port_power_domain(intel_encoder);
1950 	intel_display_power_put(dev_priv, power_domain);
1951 }
1952 
intel_edp_panel_off(struct intel_dp * intel_dp)1953 void intel_edp_panel_off(struct intel_dp *intel_dp)
1954 {
1955 	if (!is_edp(intel_dp))
1956 		return;
1957 
1958 	pps_lock(intel_dp);
1959 	edp_panel_off(intel_dp);
1960 	pps_unlock(intel_dp);
1961 }
1962 
1963 /* Enable backlight in the panel power control. */
_intel_edp_backlight_on(struct intel_dp * intel_dp)1964 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1965 {
1966 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1967 	struct drm_device *dev = intel_dig_port->base.base.dev;
1968 	struct drm_i915_private *dev_priv = dev->dev_private;
1969 	u32 pp;
1970 	u32 pp_ctrl_reg;
1971 
1972 	/*
1973 	 * If we enable the backlight right away following a panel power
1974 	 * on, we may see slight flicker as the panel syncs with the eDP
1975 	 * link.  So delay a bit to make sure the image is solid before
1976 	 * allowing it to appear.
1977 	 */
1978 	wait_backlight_on(intel_dp);
1979 
1980 	pps_lock(intel_dp);
1981 
1982 	pp = ironlake_get_pp_control(intel_dp);
1983 	pp |= EDP_BLC_ENABLE;
1984 
1985 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1986 
1987 	I915_WRITE(pp_ctrl_reg, pp);
1988 	POSTING_READ(pp_ctrl_reg);
1989 
1990 	pps_unlock(intel_dp);
1991 }
1992 
1993 /* Enable backlight PWM and backlight PP control. */
intel_edp_backlight_on(struct intel_dp * intel_dp)1994 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1995 {
1996 	if (!is_edp(intel_dp))
1997 		return;
1998 
1999 	DRM_DEBUG_KMS("\n");
2000 
2001 	intel_panel_enable_backlight(intel_dp->attached_connector);
2002 	_intel_edp_backlight_on(intel_dp);
2003 }
2004 
2005 /* Disable backlight in the panel power control. */
_intel_edp_backlight_off(struct intel_dp * intel_dp)2006 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2007 {
2008 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2009 	struct drm_i915_private *dev_priv = dev->dev_private;
2010 	u32 pp;
2011 	u32 pp_ctrl_reg;
2012 
2013 	if (!is_edp(intel_dp))
2014 		return;
2015 
2016 	pps_lock(intel_dp);
2017 
2018 	pp = ironlake_get_pp_control(intel_dp);
2019 	pp &= ~EDP_BLC_ENABLE;
2020 
2021 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2022 
2023 	I915_WRITE(pp_ctrl_reg, pp);
2024 	POSTING_READ(pp_ctrl_reg);
2025 
2026 	pps_unlock(intel_dp);
2027 
2028 	intel_dp->last_backlight_off = jiffies;
2029 	edp_wait_backlight_off(intel_dp);
2030 }
2031 
2032 /* Disable backlight PP control and backlight PWM. */
intel_edp_backlight_off(struct intel_dp * intel_dp)2033 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2034 {
2035 	if (!is_edp(intel_dp))
2036 		return;
2037 
2038 	DRM_DEBUG_KMS("\n");
2039 
2040 	_intel_edp_backlight_off(intel_dp);
2041 	intel_panel_disable_backlight(intel_dp->attached_connector);
2042 }
2043 
2044 /*
2045  * Hook for controlling the panel power control backlight through the bl_power
2046  * sysfs attribute. Take care to handle multiple calls.
2047  */
intel_edp_backlight_power(struct intel_connector * connector,bool enable)2048 static void intel_edp_backlight_power(struct intel_connector *connector,
2049 				      bool enable)
2050 {
2051 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2052 	bool is_enabled;
2053 
2054 	pps_lock(intel_dp);
2055 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2056 	pps_unlock(intel_dp);
2057 
2058 	if (is_enabled == enable)
2059 		return;
2060 
2061 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2062 		      enable ? "enable" : "disable");
2063 
2064 	if (enable)
2065 		_intel_edp_backlight_on(intel_dp);
2066 	else
2067 		_intel_edp_backlight_off(intel_dp);
2068 }
2069 
ironlake_edp_pll_on(struct intel_dp * intel_dp)2070 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2071 {
2072 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2073 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2074 	struct drm_device *dev = crtc->dev;
2075 	struct drm_i915_private *dev_priv = dev->dev_private;
2076 	u32 dpa_ctl;
2077 
2078 	assert_pipe_disabled(dev_priv,
2079 			     to_intel_crtc(crtc)->pipe);
2080 
2081 	DRM_DEBUG_KMS("\n");
2082 	dpa_ctl = I915_READ(DP_A);
2083 	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2084 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2085 
2086 	/* We don't adjust intel_dp->DP while tearing down the link, to
2087 	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2088 	 * enable bits here to ensure that we don't enable too much. */
2089 	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2090 	intel_dp->DP |= DP_PLL_ENABLE;
2091 	I915_WRITE(DP_A, intel_dp->DP);
2092 	POSTING_READ(DP_A);
2093 	udelay(200);
2094 }
2095 
ironlake_edp_pll_off(struct intel_dp * intel_dp)2096 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2097 {
2098 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2099 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2100 	struct drm_device *dev = crtc->dev;
2101 	struct drm_i915_private *dev_priv = dev->dev_private;
2102 	u32 dpa_ctl;
2103 
2104 	assert_pipe_disabled(dev_priv,
2105 			     to_intel_crtc(crtc)->pipe);
2106 
2107 	dpa_ctl = I915_READ(DP_A);
2108 	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2109 	     "dp pll off, should be on\n");
2110 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2111 
2112 	/* We can't rely on the value tracked for the DP register in
2113 	 * intel_dp->DP because link_down must not change that (otherwise link
2114 	 * re-training will fail. */
2115 	dpa_ctl &= ~DP_PLL_ENABLE;
2116 	I915_WRITE(DP_A, dpa_ctl);
2117 	POSTING_READ(DP_A);
2118 	udelay(200);
2119 }
2120 
2121 /* If the sink supports it, try to set the power state appropriately */
intel_dp_sink_dpms(struct intel_dp * intel_dp,int mode)2122 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2123 {
2124 	int ret, i;
2125 
2126 	/* Should have a valid DPCD by this point */
2127 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2128 		return;
2129 
2130 	if (mode != DRM_MODE_DPMS_ON) {
2131 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2132 					 DP_SET_POWER_D3);
2133 	} else {
2134 		/*
2135 		 * When turning on, we need to retry for 1ms to give the sink
2136 		 * time to wake up.
2137 		 */
2138 		for (i = 0; i < 3; i++) {
2139 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2140 						 DP_SET_POWER_D0);
2141 			if (ret == 1)
2142 				break;
2143 			msleep(1);
2144 		}
2145 	}
2146 
2147 	if (ret != 1)
2148 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2149 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2150 }
2151 
intel_dp_get_hw_state(struct intel_encoder * encoder,enum pipe * pipe)2152 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2153 				  enum pipe *pipe)
2154 {
2155 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2156 	enum port port = dp_to_dig_port(intel_dp)->port;
2157 	struct drm_device *dev = encoder->base.dev;
2158 	struct drm_i915_private *dev_priv = dev->dev_private;
2159 	enum intel_display_power_domain power_domain;
2160 	u32 tmp;
2161 
2162 	power_domain = intel_display_port_power_domain(encoder);
2163 	if (!intel_display_power_is_enabled(dev_priv, power_domain))
2164 		return false;
2165 
2166 	tmp = I915_READ(intel_dp->output_reg);
2167 
2168 	if (!(tmp & DP_PORT_EN))
2169 		return false;
2170 
2171 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2172 		*pipe = PORT_TO_PIPE_CPT(tmp);
2173 	} else if (IS_CHERRYVIEW(dev)) {
2174 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2175 	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2176 		*pipe = PORT_TO_PIPE(tmp);
2177 	} else {
2178 		u32 trans_sel;
2179 		u32 trans_dp;
2180 		int i;
2181 
2182 		switch (intel_dp->output_reg) {
2183 		case PCH_DP_B:
2184 			trans_sel = TRANS_DP_PORT_SEL_B;
2185 			break;
2186 		case PCH_DP_C:
2187 			trans_sel = TRANS_DP_PORT_SEL_C;
2188 			break;
2189 		case PCH_DP_D:
2190 			trans_sel = TRANS_DP_PORT_SEL_D;
2191 			break;
2192 		default:
2193 			return true;
2194 		}
2195 
2196 		for_each_pipe(dev_priv, i) {
2197 			trans_dp = I915_READ(TRANS_DP_CTL(i));
2198 			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2199 				*pipe = i;
2200 				return true;
2201 			}
2202 		}
2203 
2204 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2205 			      intel_dp->output_reg);
2206 	}
2207 
2208 	return true;
2209 }
2210 
intel_dp_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)2211 static void intel_dp_get_config(struct intel_encoder *encoder,
2212 				struct intel_crtc_state *pipe_config)
2213 {
2214 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2215 	u32 tmp, flags = 0;
2216 	struct drm_device *dev = encoder->base.dev;
2217 	struct drm_i915_private *dev_priv = dev->dev_private;
2218 	enum port port = dp_to_dig_port(intel_dp)->port;
2219 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2220 	int dotclock;
2221 
2222 	tmp = I915_READ(intel_dp->output_reg);
2223 
2224 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2225 
2226 	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2227 		if (tmp & DP_SYNC_HS_HIGH)
2228 			flags |= DRM_MODE_FLAG_PHSYNC;
2229 		else
2230 			flags |= DRM_MODE_FLAG_NHSYNC;
2231 
2232 		if (tmp & DP_SYNC_VS_HIGH)
2233 			flags |= DRM_MODE_FLAG_PVSYNC;
2234 		else
2235 			flags |= DRM_MODE_FLAG_NVSYNC;
2236 	} else {
2237 		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2238 		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2239 			flags |= DRM_MODE_FLAG_PHSYNC;
2240 		else
2241 			flags |= DRM_MODE_FLAG_NHSYNC;
2242 
2243 		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2244 			flags |= DRM_MODE_FLAG_PVSYNC;
2245 		else
2246 			flags |= DRM_MODE_FLAG_NVSYNC;
2247 	}
2248 
2249 	pipe_config->base.adjusted_mode.flags |= flags;
2250 
2251 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2252 	    tmp & DP_COLOR_RANGE_16_235)
2253 		pipe_config->limited_color_range = true;
2254 
2255 	pipe_config->has_dp_encoder = true;
2256 
2257 	intel_dp_get_m_n(crtc, pipe_config);
2258 
2259 	if (port == PORT_A) {
2260 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2261 			pipe_config->port_clock = 162000;
2262 		else
2263 			pipe_config->port_clock = 270000;
2264 	}
2265 
2266 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2267 					    &pipe_config->dp_m_n);
2268 
2269 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2270 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2271 
2272 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2273 
2274 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2275 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2276 		/*
2277 		 * This is a big fat ugly hack.
2278 		 *
2279 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2280 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2281 		 * unknown we fail to light up. Yet the same BIOS boots up with
2282 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2283 		 * max, not what it tells us to use.
2284 		 *
2285 		 * Note: This will still be broken if the eDP panel is not lit
2286 		 * up by the BIOS, and thus we can't get the mode at module
2287 		 * load.
2288 		 */
2289 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2290 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2291 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2292 	}
2293 }
2294 
intel_disable_dp(struct intel_encoder * encoder)2295 static void intel_disable_dp(struct intel_encoder *encoder)
2296 {
2297 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2298 	struct drm_device *dev = encoder->base.dev;
2299 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2300 
2301 	if (crtc->config->has_audio)
2302 		intel_audio_codec_disable(encoder);
2303 
2304 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2305 		intel_psr_disable(intel_dp);
2306 
2307 	/* Make sure the panel is off before trying to change the mode. But also
2308 	 * ensure that we have vdd while we switch off the panel. */
2309 	intel_edp_panel_vdd_on(intel_dp);
2310 	intel_edp_backlight_off(intel_dp);
2311 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2312 	intel_edp_panel_off(intel_dp);
2313 
2314 	/* disable the port before the pipe on g4x */
2315 	if (INTEL_INFO(dev)->gen < 5)
2316 		intel_dp_link_down(intel_dp);
2317 }
2318 
ilk_post_disable_dp(struct intel_encoder * encoder)2319 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2320 {
2321 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2322 	enum port port = dp_to_dig_port(intel_dp)->port;
2323 
2324 	intel_dp_link_down(intel_dp);
2325 	if (port == PORT_A)
2326 		ironlake_edp_pll_off(intel_dp);
2327 }
2328 
vlv_post_disable_dp(struct intel_encoder * encoder)2329 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2330 {
2331 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2332 
2333 	intel_dp_link_down(intel_dp);
2334 }
2335 
chv_post_disable_dp(struct intel_encoder * encoder)2336 static void chv_post_disable_dp(struct intel_encoder *encoder)
2337 {
2338 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2339 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2340 	struct drm_device *dev = encoder->base.dev;
2341 	struct drm_i915_private *dev_priv = dev->dev_private;
2342 	struct intel_crtc *intel_crtc =
2343 		to_intel_crtc(encoder->base.crtc);
2344 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2345 	enum pipe pipe = intel_crtc->pipe;
2346 	u32 val;
2347 
2348 	intel_dp_link_down(intel_dp);
2349 
2350 	mutex_lock(&dev_priv->dpio_lock);
2351 
2352 	/* Propagate soft reset to data lane reset */
2353 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2354 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2355 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2356 
2357 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2358 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2359 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2360 
2361 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2362 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2363 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2364 
2365 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2366 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2367 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2368 
2369 	mutex_unlock(&dev_priv->dpio_lock);
2370 }
2371 
2372 static void
_intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)2373 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2374 			 uint32_t *DP,
2375 			 uint8_t dp_train_pat)
2376 {
2377 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2378 	struct drm_device *dev = intel_dig_port->base.base.dev;
2379 	struct drm_i915_private *dev_priv = dev->dev_private;
2380 	enum port port = intel_dig_port->port;
2381 
2382 	if (HAS_DDI(dev)) {
2383 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2384 
2385 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2386 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2387 		else
2388 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2389 
2390 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2391 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2392 		case DP_TRAINING_PATTERN_DISABLE:
2393 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2394 
2395 			break;
2396 		case DP_TRAINING_PATTERN_1:
2397 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2398 			break;
2399 		case DP_TRAINING_PATTERN_2:
2400 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2401 			break;
2402 		case DP_TRAINING_PATTERN_3:
2403 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2404 			break;
2405 		}
2406 		I915_WRITE(DP_TP_CTL(port), temp);
2407 
2408 	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2409 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2410 
2411 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2412 		case DP_TRAINING_PATTERN_DISABLE:
2413 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2414 			break;
2415 		case DP_TRAINING_PATTERN_1:
2416 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2417 			break;
2418 		case DP_TRAINING_PATTERN_2:
2419 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2420 			break;
2421 		case DP_TRAINING_PATTERN_3:
2422 			DRM_ERROR("DP training pattern 3 not supported\n");
2423 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2424 			break;
2425 		}
2426 
2427 	} else {
2428 		if (IS_CHERRYVIEW(dev))
2429 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2430 		else
2431 			*DP &= ~DP_LINK_TRAIN_MASK;
2432 
2433 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2434 		case DP_TRAINING_PATTERN_DISABLE:
2435 			*DP |= DP_LINK_TRAIN_OFF;
2436 			break;
2437 		case DP_TRAINING_PATTERN_1:
2438 			*DP |= DP_LINK_TRAIN_PAT_1;
2439 			break;
2440 		case DP_TRAINING_PATTERN_2:
2441 			*DP |= DP_LINK_TRAIN_PAT_2;
2442 			break;
2443 		case DP_TRAINING_PATTERN_3:
2444 			if (IS_CHERRYVIEW(dev)) {
2445 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2446 			} else {
2447 				DRM_ERROR("DP training pattern 3 not supported\n");
2448 				*DP |= DP_LINK_TRAIN_PAT_2;
2449 			}
2450 			break;
2451 		}
2452 	}
2453 }
2454 
intel_dp_enable_port(struct intel_dp * intel_dp)2455 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2456 {
2457 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2458 	struct drm_i915_private *dev_priv = dev->dev_private;
2459 
2460 	/* enable with pattern 1 (as per spec) */
2461 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2462 				 DP_TRAINING_PATTERN_1);
2463 
2464 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2465 	POSTING_READ(intel_dp->output_reg);
2466 
2467 	/*
2468 	 * Magic for VLV/CHV. We _must_ first set up the register
2469 	 * without actually enabling the port, and then do another
2470 	 * write to enable the port. Otherwise link training will
2471 	 * fail when the power sequencer is freshly used for this port.
2472 	 */
2473 	intel_dp->DP |= DP_PORT_EN;
2474 
2475 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2476 	POSTING_READ(intel_dp->output_reg);
2477 }
2478 
intel_enable_dp(struct intel_encoder * encoder)2479 static void intel_enable_dp(struct intel_encoder *encoder)
2480 {
2481 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2482 	struct drm_device *dev = encoder->base.dev;
2483 	struct drm_i915_private *dev_priv = dev->dev_private;
2484 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2485 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2486 
2487 	if (WARN_ON(dp_reg & DP_PORT_EN))
2488 		return;
2489 
2490 	pps_lock(intel_dp);
2491 
2492 	if (IS_VALLEYVIEW(dev))
2493 		vlv_init_panel_power_sequencer(intel_dp);
2494 
2495 	intel_dp_enable_port(intel_dp);
2496 
2497 	edp_panel_vdd_on(intel_dp);
2498 	edp_panel_on(intel_dp);
2499 	edp_panel_vdd_off(intel_dp, true);
2500 
2501 	pps_unlock(intel_dp);
2502 
2503 	if (IS_VALLEYVIEW(dev))
2504 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2505 
2506 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2507 	intel_dp_start_link_train(intel_dp);
2508 	intel_dp_complete_link_train(intel_dp);
2509 	intel_dp_stop_link_train(intel_dp);
2510 
2511 	if (crtc->config->has_audio) {
2512 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2513 				 pipe_name(crtc->pipe));
2514 		intel_audio_codec_enable(encoder);
2515 	}
2516 }
2517 
g4x_enable_dp(struct intel_encoder * encoder)2518 static void g4x_enable_dp(struct intel_encoder *encoder)
2519 {
2520 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2521 
2522 	intel_enable_dp(encoder);
2523 	intel_edp_backlight_on(intel_dp);
2524 }
2525 
vlv_enable_dp(struct intel_encoder * encoder)2526 static void vlv_enable_dp(struct intel_encoder *encoder)
2527 {
2528 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2529 
2530 	intel_edp_backlight_on(intel_dp);
2531 	intel_psr_enable(intel_dp);
2532 }
2533 
g4x_pre_enable_dp(struct intel_encoder * encoder)2534 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2535 {
2536 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2537 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2538 
2539 	intel_dp_prepare(encoder);
2540 
2541 	/* Only ilk+ has port A */
2542 	if (dport->port == PORT_A) {
2543 		ironlake_set_pll_cpu_edp(intel_dp);
2544 		ironlake_edp_pll_on(intel_dp);
2545 	}
2546 }
2547 
vlv_detach_power_sequencer(struct intel_dp * intel_dp)2548 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2549 {
2550 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2551 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2552 	enum pipe pipe = intel_dp->pps_pipe;
2553 	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2554 
2555 	edp_panel_vdd_off_sync(intel_dp);
2556 
2557 	/*
2558 	 * VLV seems to get confused when multiple power seqeuencers
2559 	 * have the same port selected (even if only one has power/vdd
2560 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2561 	 * CHV on the other hand doesn't seem to mind having the same port
2562 	 * selected in multiple power seqeuencers, but let's clear the
2563 	 * port select always when logically disconnecting a power sequencer
2564 	 * from a port.
2565 	 */
2566 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2567 		      pipe_name(pipe), port_name(intel_dig_port->port));
2568 	I915_WRITE(pp_on_reg, 0);
2569 	POSTING_READ(pp_on_reg);
2570 
2571 	intel_dp->pps_pipe = INVALID_PIPE;
2572 }
2573 
vlv_steal_power_sequencer(struct drm_device * dev,enum pipe pipe)2574 static void vlv_steal_power_sequencer(struct drm_device *dev,
2575 				      enum pipe pipe)
2576 {
2577 	struct drm_i915_private *dev_priv = dev->dev_private;
2578 	struct intel_encoder *encoder;
2579 
2580 	lockdep_assert_held(&dev_priv->pps_mutex);
2581 
2582 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2583 		return;
2584 
2585 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2586 			    base.head) {
2587 		struct intel_dp *intel_dp;
2588 		enum port port;
2589 
2590 		if (encoder->type != INTEL_OUTPUT_EDP)
2591 			continue;
2592 
2593 		intel_dp = enc_to_intel_dp(&encoder->base);
2594 		port = dp_to_dig_port(intel_dp)->port;
2595 
2596 		if (intel_dp->pps_pipe != pipe)
2597 			continue;
2598 
2599 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2600 			      pipe_name(pipe), port_name(port));
2601 
2602 		WARN(encoder->connectors_active,
2603 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2604 		     pipe_name(pipe), port_name(port));
2605 
2606 		/* make sure vdd is off before we steal it */
2607 		vlv_detach_power_sequencer(intel_dp);
2608 	}
2609 }
2610 
vlv_init_panel_power_sequencer(struct intel_dp * intel_dp)2611 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2612 {
2613 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2614 	struct intel_encoder *encoder = &intel_dig_port->base;
2615 	struct drm_device *dev = encoder->base.dev;
2616 	struct drm_i915_private *dev_priv = dev->dev_private;
2617 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2618 
2619 	lockdep_assert_held(&dev_priv->pps_mutex);
2620 
2621 	if (!is_edp(intel_dp))
2622 		return;
2623 
2624 	if (intel_dp->pps_pipe == crtc->pipe)
2625 		return;
2626 
2627 	/*
2628 	 * If another power sequencer was being used on this
2629 	 * port previously make sure to turn off vdd there while
2630 	 * we still have control of it.
2631 	 */
2632 	if (intel_dp->pps_pipe != INVALID_PIPE)
2633 		vlv_detach_power_sequencer(intel_dp);
2634 
2635 	/*
2636 	 * We may be stealing the power
2637 	 * sequencer from another port.
2638 	 */
2639 	vlv_steal_power_sequencer(dev, crtc->pipe);
2640 
2641 	/* now it's all ours */
2642 	intel_dp->pps_pipe = crtc->pipe;
2643 
2644 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2645 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2646 
2647 	/* init power sequencer on this pipe and port */
2648 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2649 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2650 }
2651 
vlv_pre_enable_dp(struct intel_encoder * encoder)2652 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2653 {
2654 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2655 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2656 	struct drm_device *dev = encoder->base.dev;
2657 	struct drm_i915_private *dev_priv = dev->dev_private;
2658 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2659 	enum dpio_channel port = vlv_dport_to_channel(dport);
2660 	int pipe = intel_crtc->pipe;
2661 	u32 val;
2662 
2663 	mutex_lock(&dev_priv->dpio_lock);
2664 
2665 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2666 	val = 0;
2667 	if (pipe)
2668 		val |= (1<<21);
2669 	else
2670 		val &= ~(1<<21);
2671 	val |= 0x001000c4;
2672 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2673 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2674 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2675 
2676 	mutex_unlock(&dev_priv->dpio_lock);
2677 
2678 	intel_enable_dp(encoder);
2679 }
2680 
vlv_dp_pre_pll_enable(struct intel_encoder * encoder)2681 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2682 {
2683 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2684 	struct drm_device *dev = encoder->base.dev;
2685 	struct drm_i915_private *dev_priv = dev->dev_private;
2686 	struct intel_crtc *intel_crtc =
2687 		to_intel_crtc(encoder->base.crtc);
2688 	enum dpio_channel port = vlv_dport_to_channel(dport);
2689 	int pipe = intel_crtc->pipe;
2690 
2691 	intel_dp_prepare(encoder);
2692 
2693 	/* Program Tx lane resets to default */
2694 	mutex_lock(&dev_priv->dpio_lock);
2695 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2696 			 DPIO_PCS_TX_LANE2_RESET |
2697 			 DPIO_PCS_TX_LANE1_RESET);
2698 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2699 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2700 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2701 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2702 				 DPIO_PCS_CLK_SOFT_RESET);
2703 
2704 	/* Fix up inter-pair skew failure */
2705 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2706 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2707 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2708 	mutex_unlock(&dev_priv->dpio_lock);
2709 }
2710 
chv_pre_enable_dp(struct intel_encoder * encoder)2711 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2712 {
2713 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2714 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2715 	struct drm_device *dev = encoder->base.dev;
2716 	struct drm_i915_private *dev_priv = dev->dev_private;
2717 	struct intel_crtc *intel_crtc =
2718 		to_intel_crtc(encoder->base.crtc);
2719 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2720 	int pipe = intel_crtc->pipe;
2721 	int data, i;
2722 	u32 val;
2723 
2724 	mutex_lock(&dev_priv->dpio_lock);
2725 
2726 	/* allow hardware to manage TX FIFO reset source */
2727 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2728 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2729 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2730 
2731 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2732 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2733 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2734 
2735 	/* Deassert soft data lane reset*/
2736 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2737 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2738 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2739 
2740 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2741 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2742 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2743 
2744 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2745 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2746 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2747 
2748 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2749 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2750 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2751 
2752 	/* Program Tx lane latency optimal setting*/
2753 	for (i = 0; i < 4; i++) {
2754 		/* Set the upar bit */
2755 		data = (i == 1) ? 0x0 : 0x1;
2756 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2757 				data << DPIO_UPAR_SHIFT);
2758 	}
2759 
2760 	/* Data lane stagger programming */
2761 	/* FIXME: Fix up value only after power analysis */
2762 
2763 	mutex_unlock(&dev_priv->dpio_lock);
2764 
2765 	intel_enable_dp(encoder);
2766 }
2767 
chv_dp_pre_pll_enable(struct intel_encoder * encoder)2768 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2769 {
2770 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2771 	struct drm_device *dev = encoder->base.dev;
2772 	struct drm_i915_private *dev_priv = dev->dev_private;
2773 	struct intel_crtc *intel_crtc =
2774 		to_intel_crtc(encoder->base.crtc);
2775 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2776 	enum pipe pipe = intel_crtc->pipe;
2777 	u32 val;
2778 
2779 	intel_dp_prepare(encoder);
2780 
2781 	mutex_lock(&dev_priv->dpio_lock);
2782 
2783 	/* program left/right clock distribution */
2784 	if (pipe != PIPE_B) {
2785 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2786 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2787 		if (ch == DPIO_CH0)
2788 			val |= CHV_BUFLEFTENA1_FORCE;
2789 		if (ch == DPIO_CH1)
2790 			val |= CHV_BUFRIGHTENA1_FORCE;
2791 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2792 	} else {
2793 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2794 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2795 		if (ch == DPIO_CH0)
2796 			val |= CHV_BUFLEFTENA2_FORCE;
2797 		if (ch == DPIO_CH1)
2798 			val |= CHV_BUFRIGHTENA2_FORCE;
2799 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2800 	}
2801 
2802 	/* program clock channel usage */
2803 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2804 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2805 	if (pipe != PIPE_B)
2806 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2807 	else
2808 		val |= CHV_PCS_USEDCLKCHANNEL;
2809 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2810 
2811 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2812 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2813 	if (pipe != PIPE_B)
2814 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2815 	else
2816 		val |= CHV_PCS_USEDCLKCHANNEL;
2817 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2818 
2819 	/*
2820 	 * This a a bit weird since generally CL
2821 	 * matches the pipe, but here we need to
2822 	 * pick the CL based on the port.
2823 	 */
2824 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2825 	if (pipe != PIPE_B)
2826 		val &= ~CHV_CMN_USEDCLKCHANNEL;
2827 	else
2828 		val |= CHV_CMN_USEDCLKCHANNEL;
2829 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2830 
2831 	mutex_unlock(&dev_priv->dpio_lock);
2832 }
2833 
2834 /*
2835  * Native read with retry for link status and receiver capability reads for
2836  * cases where the sink may still be asleep.
2837  *
2838  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2839  * supposed to retry 3 times per the spec.
2840  */
2841 static ssize_t
intel_dp_dpcd_read_wake(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)2842 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2843 			void *buffer, size_t size)
2844 {
2845 	ssize_t ret;
2846 	int i;
2847 
2848 	/*
2849 	 * Sometime we just get the same incorrect byte repeated
2850 	 * over the entire buffer. Doing just one throw away read
2851 	 * initially seems to "solve" it.
2852 	 */
2853 	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2854 
2855 	for (i = 0; i < 3; i++) {
2856 		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2857 		if (ret == size)
2858 			return ret;
2859 		msleep(1);
2860 	}
2861 
2862 	return ret;
2863 }
2864 
2865 /*
2866  * Fetch AUX CH registers 0x202 - 0x207 which contain
2867  * link status information
2868  */
2869 static bool
intel_dp_get_link_status(struct intel_dp * intel_dp,uint8_t link_status[DP_LINK_STATUS_SIZE])2870 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2871 {
2872 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
2873 				       DP_LANE0_1_STATUS,
2874 				       link_status,
2875 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2876 }
2877 
2878 /* These are source-specific values. */
2879 static uint8_t
intel_dp_voltage_max(struct intel_dp * intel_dp)2880 intel_dp_voltage_max(struct intel_dp *intel_dp)
2881 {
2882 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2883 	struct drm_i915_private *dev_priv = dev->dev_private;
2884 	enum port port = dp_to_dig_port(intel_dp)->port;
2885 
2886 	if (INTEL_INFO(dev)->gen >= 9) {
2887 		if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2888 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2889 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2890 	} else if (IS_VALLEYVIEW(dev))
2891 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2892 	else if (IS_GEN7(dev) && port == PORT_A)
2893 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2894 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
2895 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2896 	else
2897 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2898 }
2899 
2900 static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp * intel_dp,uint8_t voltage_swing)2901 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2902 {
2903 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2904 	enum port port = dp_to_dig_port(intel_dp)->port;
2905 
2906 	if (INTEL_INFO(dev)->gen >= 9) {
2907 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2908 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2909 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
2910 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2911 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
2912 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2913 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2914 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2915 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2916 		default:
2917 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2918 		}
2919 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2920 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2921 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2922 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
2923 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2924 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
2925 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2926 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2927 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2928 		default:
2929 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2930 		}
2931 	} else if (IS_VALLEYVIEW(dev)) {
2932 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2933 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2934 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
2935 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2936 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
2937 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2938 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2939 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2940 		default:
2941 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2942 		}
2943 	} else if (IS_GEN7(dev) && port == PORT_A) {
2944 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2945 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2946 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
2947 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2948 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2949 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2950 		default:
2951 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2952 		}
2953 	} else {
2954 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2955 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2956 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
2957 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2958 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
2959 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2960 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2961 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2962 		default:
2963 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2964 		}
2965 	}
2966 }
2967 
intel_vlv_signal_levels(struct intel_dp * intel_dp)2968 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2969 {
2970 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2971 	struct drm_i915_private *dev_priv = dev->dev_private;
2972 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2973 	struct intel_crtc *intel_crtc =
2974 		to_intel_crtc(dport->base.base.crtc);
2975 	unsigned long demph_reg_value, preemph_reg_value,
2976 		uniqtranscale_reg_value;
2977 	uint8_t train_set = intel_dp->train_set[0];
2978 	enum dpio_channel port = vlv_dport_to_channel(dport);
2979 	int pipe = intel_crtc->pipe;
2980 
2981 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2982 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
2983 		preemph_reg_value = 0x0004000;
2984 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2985 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2986 			demph_reg_value = 0x2B405555;
2987 			uniqtranscale_reg_value = 0x552AB83A;
2988 			break;
2989 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2990 			demph_reg_value = 0x2B404040;
2991 			uniqtranscale_reg_value = 0x5548B83A;
2992 			break;
2993 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2994 			demph_reg_value = 0x2B245555;
2995 			uniqtranscale_reg_value = 0x5560B83A;
2996 			break;
2997 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2998 			demph_reg_value = 0x2B405555;
2999 			uniqtranscale_reg_value = 0x5598DA3A;
3000 			break;
3001 		default:
3002 			return 0;
3003 		}
3004 		break;
3005 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3006 		preemph_reg_value = 0x0002000;
3007 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3008 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3009 			demph_reg_value = 0x2B404040;
3010 			uniqtranscale_reg_value = 0x5552B83A;
3011 			break;
3012 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3013 			demph_reg_value = 0x2B404848;
3014 			uniqtranscale_reg_value = 0x5580B83A;
3015 			break;
3016 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3017 			demph_reg_value = 0x2B404040;
3018 			uniqtranscale_reg_value = 0x55ADDA3A;
3019 			break;
3020 		default:
3021 			return 0;
3022 		}
3023 		break;
3024 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3025 		preemph_reg_value = 0x0000000;
3026 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3027 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3028 			demph_reg_value = 0x2B305555;
3029 			uniqtranscale_reg_value = 0x5570B83A;
3030 			break;
3031 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3032 			demph_reg_value = 0x2B2B4040;
3033 			uniqtranscale_reg_value = 0x55ADDA3A;
3034 			break;
3035 		default:
3036 			return 0;
3037 		}
3038 		break;
3039 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3040 		preemph_reg_value = 0x0006000;
3041 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3042 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3043 			demph_reg_value = 0x1B405555;
3044 			uniqtranscale_reg_value = 0x55ADDA3A;
3045 			break;
3046 		default:
3047 			return 0;
3048 		}
3049 		break;
3050 	default:
3051 		return 0;
3052 	}
3053 
3054 	mutex_lock(&dev_priv->dpio_lock);
3055 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3056 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3057 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3058 			 uniqtranscale_reg_value);
3059 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3060 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3061 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3062 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3063 	mutex_unlock(&dev_priv->dpio_lock);
3064 
3065 	return 0;
3066 }
3067 
intel_chv_signal_levels(struct intel_dp * intel_dp)3068 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3069 {
3070 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3071 	struct drm_i915_private *dev_priv = dev->dev_private;
3072 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3073 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3074 	u32 deemph_reg_value, margin_reg_value, val;
3075 	uint8_t train_set = intel_dp->train_set[0];
3076 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3077 	enum pipe pipe = intel_crtc->pipe;
3078 	int i;
3079 
3080 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3081 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3082 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3083 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3084 			deemph_reg_value = 128;
3085 			margin_reg_value = 52;
3086 			break;
3087 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3088 			deemph_reg_value = 128;
3089 			margin_reg_value = 77;
3090 			break;
3091 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3092 			deemph_reg_value = 128;
3093 			margin_reg_value = 102;
3094 			break;
3095 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3096 			deemph_reg_value = 128;
3097 			margin_reg_value = 154;
3098 			/* FIXME extra to set for 1200 */
3099 			break;
3100 		default:
3101 			return 0;
3102 		}
3103 		break;
3104 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3105 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3106 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3107 			deemph_reg_value = 85;
3108 			margin_reg_value = 78;
3109 			break;
3110 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3111 			deemph_reg_value = 85;
3112 			margin_reg_value = 116;
3113 			break;
3114 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3115 			deemph_reg_value = 85;
3116 			margin_reg_value = 154;
3117 			break;
3118 		default:
3119 			return 0;
3120 		}
3121 		break;
3122 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3123 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3124 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3125 			deemph_reg_value = 64;
3126 			margin_reg_value = 104;
3127 			break;
3128 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3129 			deemph_reg_value = 64;
3130 			margin_reg_value = 154;
3131 			break;
3132 		default:
3133 			return 0;
3134 		}
3135 		break;
3136 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3137 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3138 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3139 			deemph_reg_value = 43;
3140 			margin_reg_value = 154;
3141 			break;
3142 		default:
3143 			return 0;
3144 		}
3145 		break;
3146 	default:
3147 		return 0;
3148 	}
3149 
3150 	mutex_lock(&dev_priv->dpio_lock);
3151 
3152 	/* Clear calc init */
3153 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3154 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3155 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3156 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3157 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3158 
3159 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3160 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3161 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3162 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3163 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3164 
3165 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3166 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3167 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3168 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3169 
3170 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3171 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3172 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3173 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3174 
3175 	/* Program swing deemph */
3176 	for (i = 0; i < 4; i++) {
3177 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3178 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3179 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3180 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3181 	}
3182 
3183 	/* Program swing margin */
3184 	for (i = 0; i < 4; i++) {
3185 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3186 		val &= ~DPIO_SWING_MARGIN000_MASK;
3187 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3188 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3189 	}
3190 
3191 	/* Disable unique transition scale */
3192 	for (i = 0; i < 4; i++) {
3193 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3194 		val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3195 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3196 	}
3197 
3198 	if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3199 			== DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3200 		((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3201 			== DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3202 
3203 		/*
3204 		 * The document said it needs to set bit 27 for ch0 and bit 26
3205 		 * for ch1. Might be a typo in the doc.
3206 		 * For now, for this unique transition scale selection, set bit
3207 		 * 27 for ch0 and ch1.
3208 		 */
3209 		for (i = 0; i < 4; i++) {
3210 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3211 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3212 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3213 		}
3214 
3215 		for (i = 0; i < 4; i++) {
3216 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3217 			val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3218 			val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3219 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3220 		}
3221 	}
3222 
3223 	/* Start swing calculation */
3224 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3225 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3226 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3227 
3228 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3229 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3230 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3231 
3232 	/* LRC Bypass */
3233 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3234 	val |= DPIO_LRC_BYPASS;
3235 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3236 
3237 	mutex_unlock(&dev_priv->dpio_lock);
3238 
3239 	return 0;
3240 }
3241 
3242 static void
intel_get_adjust_train(struct intel_dp * intel_dp,const uint8_t link_status[DP_LINK_STATUS_SIZE])3243 intel_get_adjust_train(struct intel_dp *intel_dp,
3244 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
3245 {
3246 	uint8_t v = 0;
3247 	uint8_t p = 0;
3248 	int lane;
3249 	uint8_t voltage_max;
3250 	uint8_t preemph_max;
3251 
3252 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3253 		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3254 		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3255 
3256 		if (this_v > v)
3257 			v = this_v;
3258 		if (this_p > p)
3259 			p = this_p;
3260 	}
3261 
3262 	voltage_max = intel_dp_voltage_max(intel_dp);
3263 	if (v >= voltage_max)
3264 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3265 
3266 	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3267 	if (p >= preemph_max)
3268 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3269 
3270 	for (lane = 0; lane < 4; lane++)
3271 		intel_dp->train_set[lane] = v | p;
3272 }
3273 
3274 static uint32_t
intel_gen4_signal_levels(uint8_t train_set)3275 intel_gen4_signal_levels(uint8_t train_set)
3276 {
3277 	uint32_t	signal_levels = 0;
3278 
3279 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3280 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3281 	default:
3282 		signal_levels |= DP_VOLTAGE_0_4;
3283 		break;
3284 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3285 		signal_levels |= DP_VOLTAGE_0_6;
3286 		break;
3287 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3288 		signal_levels |= DP_VOLTAGE_0_8;
3289 		break;
3290 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3291 		signal_levels |= DP_VOLTAGE_1_2;
3292 		break;
3293 	}
3294 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3295 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3296 	default:
3297 		signal_levels |= DP_PRE_EMPHASIS_0;
3298 		break;
3299 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3300 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3301 		break;
3302 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3303 		signal_levels |= DP_PRE_EMPHASIS_6;
3304 		break;
3305 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3306 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3307 		break;
3308 	}
3309 	return signal_levels;
3310 }
3311 
3312 /* Gen6's DP voltage swing and pre-emphasis control */
3313 static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set)3314 intel_gen6_edp_signal_levels(uint8_t train_set)
3315 {
3316 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3317 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3318 	switch (signal_levels) {
3319 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3320 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3321 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3322 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3323 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3324 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3325 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3326 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3327 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3328 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3329 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3330 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3331 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3332 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3333 	default:
3334 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3335 			      "0x%x\n", signal_levels);
3336 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3337 	}
3338 }
3339 
3340 /* Gen7's DP voltage swing and pre-emphasis control */
3341 static uint32_t
intel_gen7_edp_signal_levels(uint8_t train_set)3342 intel_gen7_edp_signal_levels(uint8_t train_set)
3343 {
3344 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3345 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3346 	switch (signal_levels) {
3347 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3348 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3349 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3350 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3351 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3352 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3353 
3354 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3355 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3356 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3357 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3358 
3359 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3360 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3361 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3362 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3363 
3364 	default:
3365 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3366 			      "0x%x\n", signal_levels);
3367 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3368 	}
3369 }
3370 
3371 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3372 static uint32_t
intel_hsw_signal_levels(uint8_t train_set)3373 intel_hsw_signal_levels(uint8_t train_set)
3374 {
3375 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3376 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3377 	switch (signal_levels) {
3378 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3379 		return DDI_BUF_TRANS_SELECT(0);
3380 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3381 		return DDI_BUF_TRANS_SELECT(1);
3382 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3383 		return DDI_BUF_TRANS_SELECT(2);
3384 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3385 		return DDI_BUF_TRANS_SELECT(3);
3386 
3387 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3388 		return DDI_BUF_TRANS_SELECT(4);
3389 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3390 		return DDI_BUF_TRANS_SELECT(5);
3391 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3392 		return DDI_BUF_TRANS_SELECT(6);
3393 
3394 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3395 		return DDI_BUF_TRANS_SELECT(7);
3396 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3397 		return DDI_BUF_TRANS_SELECT(8);
3398 
3399 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3400 		return DDI_BUF_TRANS_SELECT(9);
3401 	default:
3402 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3403 			      "0x%x\n", signal_levels);
3404 		return DDI_BUF_TRANS_SELECT(0);
3405 	}
3406 }
3407 
3408 /* Properly updates "DP" with the correct signal levels. */
3409 static void
intel_dp_set_signal_levels(struct intel_dp * intel_dp,uint32_t * DP)3410 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3411 {
3412 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3413 	enum port port = intel_dig_port->port;
3414 	struct drm_device *dev = intel_dig_port->base.base.dev;
3415 	uint32_t signal_levels, mask;
3416 	uint8_t train_set = intel_dp->train_set[0];
3417 
3418 	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3419 		signal_levels = intel_hsw_signal_levels(train_set);
3420 		mask = DDI_BUF_EMP_MASK;
3421 	} else if (IS_CHERRYVIEW(dev)) {
3422 		signal_levels = intel_chv_signal_levels(intel_dp);
3423 		mask = 0;
3424 	} else if (IS_VALLEYVIEW(dev)) {
3425 		signal_levels = intel_vlv_signal_levels(intel_dp);
3426 		mask = 0;
3427 	} else if (IS_GEN7(dev) && port == PORT_A) {
3428 		signal_levels = intel_gen7_edp_signal_levels(train_set);
3429 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3430 	} else if (IS_GEN6(dev) && port == PORT_A) {
3431 		signal_levels = intel_gen6_edp_signal_levels(train_set);
3432 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3433 	} else {
3434 		signal_levels = intel_gen4_signal_levels(train_set);
3435 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3436 	}
3437 
3438 	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3439 
3440 	*DP = (*DP & ~mask) | signal_levels;
3441 }
3442 
3443 static bool
intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)3444 intel_dp_set_link_train(struct intel_dp *intel_dp,
3445 			uint32_t *DP,
3446 			uint8_t dp_train_pat)
3447 {
3448 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3449 	struct drm_device *dev = intel_dig_port->base.base.dev;
3450 	struct drm_i915_private *dev_priv = dev->dev_private;
3451 	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3452 	int ret, len;
3453 
3454 	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3455 
3456 	I915_WRITE(intel_dp->output_reg, *DP);
3457 	POSTING_READ(intel_dp->output_reg);
3458 
3459 	buf[0] = dp_train_pat;
3460 	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3461 	    DP_TRAINING_PATTERN_DISABLE) {
3462 		/* don't write DP_TRAINING_LANEx_SET on disable */
3463 		len = 1;
3464 	} else {
3465 		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3466 		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3467 		len = intel_dp->lane_count + 1;
3468 	}
3469 
3470 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3471 				buf, len);
3472 
3473 	return ret == len;
3474 }
3475 
3476 static bool
intel_dp_reset_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)3477 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3478 			uint8_t dp_train_pat)
3479 {
3480 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3481 	intel_dp_set_signal_levels(intel_dp, DP);
3482 	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3483 }
3484 
3485 static bool
intel_dp_update_link_train(struct intel_dp * intel_dp,uint32_t * DP,const uint8_t link_status[DP_LINK_STATUS_SIZE])3486 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3487 			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3488 {
3489 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3490 	struct drm_device *dev = intel_dig_port->base.base.dev;
3491 	struct drm_i915_private *dev_priv = dev->dev_private;
3492 	int ret;
3493 
3494 	intel_get_adjust_train(intel_dp, link_status);
3495 	intel_dp_set_signal_levels(intel_dp, DP);
3496 
3497 	I915_WRITE(intel_dp->output_reg, *DP);
3498 	POSTING_READ(intel_dp->output_reg);
3499 
3500 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3501 				intel_dp->train_set, intel_dp->lane_count);
3502 
3503 	return ret == intel_dp->lane_count;
3504 }
3505 
intel_dp_set_idle_link_train(struct intel_dp * intel_dp)3506 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3507 {
3508 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3509 	struct drm_device *dev = intel_dig_port->base.base.dev;
3510 	struct drm_i915_private *dev_priv = dev->dev_private;
3511 	enum port port = intel_dig_port->port;
3512 	uint32_t val;
3513 
3514 	if (!HAS_DDI(dev))
3515 		return;
3516 
3517 	val = I915_READ(DP_TP_CTL(port));
3518 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3519 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3520 	I915_WRITE(DP_TP_CTL(port), val);
3521 
3522 	/*
3523 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3524 	 * we need to set idle transmission mode is to work around a HW issue
3525 	 * where we enable the pipe while not in idle link-training mode.
3526 	 * In this case there is requirement to wait for a minimum number of
3527 	 * idle patterns to be sent.
3528 	 */
3529 	if (port == PORT_A)
3530 		return;
3531 
3532 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3533 		     1))
3534 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3535 }
3536 
3537 /* Enable corresponding port and start training pattern 1 */
3538 void
intel_dp_start_link_train(struct intel_dp * intel_dp)3539 intel_dp_start_link_train(struct intel_dp *intel_dp)
3540 {
3541 	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3542 	struct drm_device *dev = encoder->dev;
3543 	int i;
3544 	uint8_t voltage;
3545 	int voltage_tries, loop_tries;
3546 	uint32_t DP = intel_dp->DP;
3547 	uint8_t link_config[2];
3548 
3549 	if (HAS_DDI(dev))
3550 		intel_ddi_prepare_link_retrain(encoder);
3551 
3552 	/* Write the link configuration data */
3553 	link_config[0] = intel_dp->link_bw;
3554 	link_config[1] = intel_dp->lane_count;
3555 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3556 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3557 	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3558 	if (intel_dp->num_sink_rates)
3559 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3560 				&intel_dp->rate_select, 1);
3561 
3562 	link_config[0] = 0;
3563 	link_config[1] = DP_SET_ANSI_8B10B;
3564 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3565 
3566 	DP |= DP_PORT_EN;
3567 
3568 	/* clock recovery */
3569 	if (!intel_dp_reset_link_train(intel_dp, &DP,
3570 				       DP_TRAINING_PATTERN_1 |
3571 				       DP_LINK_SCRAMBLING_DISABLE)) {
3572 		DRM_ERROR("failed to enable link training\n");
3573 		return;
3574 	}
3575 
3576 	voltage = 0xff;
3577 	voltage_tries = 0;
3578 	loop_tries = 0;
3579 	for (;;) {
3580 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3581 
3582 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3583 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3584 			DRM_ERROR("failed to get link status\n");
3585 			break;
3586 		}
3587 
3588 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3589 			DRM_DEBUG_KMS("clock recovery OK\n");
3590 			break;
3591 		}
3592 
3593 		/* Check to see if we've tried the max voltage */
3594 		for (i = 0; i < intel_dp->lane_count; i++)
3595 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3596 				break;
3597 		if (i == intel_dp->lane_count) {
3598 			++loop_tries;
3599 			if (loop_tries == 5) {
3600 				DRM_ERROR("too many full retries, give up\n");
3601 				break;
3602 			}
3603 			intel_dp_reset_link_train(intel_dp, &DP,
3604 						  DP_TRAINING_PATTERN_1 |
3605 						  DP_LINK_SCRAMBLING_DISABLE);
3606 			voltage_tries = 0;
3607 			continue;
3608 		}
3609 
3610 		/* Check to see if we've tried the same voltage 5 times */
3611 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3612 			++voltage_tries;
3613 			if (voltage_tries == 5) {
3614 				DRM_ERROR("too many voltage retries, give up\n");
3615 				break;
3616 			}
3617 		} else
3618 			voltage_tries = 0;
3619 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3620 
3621 		/* Update training set as requested by target */
3622 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3623 			DRM_ERROR("failed to update link training\n");
3624 			break;
3625 		}
3626 	}
3627 
3628 	intel_dp->DP = DP;
3629 }
3630 
3631 void
intel_dp_complete_link_train(struct intel_dp * intel_dp)3632 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3633 {
3634 	bool channel_eq = false;
3635 	int tries, cr_tries;
3636 	uint32_t DP = intel_dp->DP;
3637 	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3638 
3639 	/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3640 	if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3641 		training_pattern = DP_TRAINING_PATTERN_3;
3642 
3643 	/* channel equalization */
3644 	if (!intel_dp_set_link_train(intel_dp, &DP,
3645 				     training_pattern |
3646 				     DP_LINK_SCRAMBLING_DISABLE)) {
3647 		DRM_ERROR("failed to start channel equalization\n");
3648 		return;
3649 	}
3650 
3651 	tries = 0;
3652 	cr_tries = 0;
3653 	channel_eq = false;
3654 	for (;;) {
3655 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3656 
3657 		if (cr_tries > 5) {
3658 			DRM_ERROR("failed to train DP, aborting\n");
3659 			break;
3660 		}
3661 
3662 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3663 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3664 			DRM_ERROR("failed to get link status\n");
3665 			break;
3666 		}
3667 
3668 		/* Make sure clock is still ok */
3669 		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3670 			intel_dp_start_link_train(intel_dp);
3671 			intel_dp_set_link_train(intel_dp, &DP,
3672 						training_pattern |
3673 						DP_LINK_SCRAMBLING_DISABLE);
3674 			cr_tries++;
3675 			continue;
3676 		}
3677 
3678 		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3679 			channel_eq = true;
3680 			break;
3681 		}
3682 
3683 		/* Try 5 times, then try clock recovery if that fails */
3684 		if (tries > 5) {
3685 			intel_dp_start_link_train(intel_dp);
3686 			intel_dp_set_link_train(intel_dp, &DP,
3687 						training_pattern |
3688 						DP_LINK_SCRAMBLING_DISABLE);
3689 			tries = 0;
3690 			cr_tries++;
3691 			continue;
3692 		}
3693 
3694 		/* Update training set as requested by target */
3695 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3696 			DRM_ERROR("failed to update link training\n");
3697 			break;
3698 		}
3699 		++tries;
3700 	}
3701 
3702 	intel_dp_set_idle_link_train(intel_dp);
3703 
3704 	intel_dp->DP = DP;
3705 
3706 	if (channel_eq)
3707 		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3708 
3709 }
3710 
intel_dp_stop_link_train(struct intel_dp * intel_dp)3711 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3712 {
3713 	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3714 				DP_TRAINING_PATTERN_DISABLE);
3715 }
3716 
3717 static void
intel_dp_link_down(struct intel_dp * intel_dp)3718 intel_dp_link_down(struct intel_dp *intel_dp)
3719 {
3720 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3721 	enum port port = intel_dig_port->port;
3722 	struct drm_device *dev = intel_dig_port->base.base.dev;
3723 	struct drm_i915_private *dev_priv = dev->dev_private;
3724 	uint32_t DP = intel_dp->DP;
3725 
3726 	if (WARN_ON(HAS_DDI(dev)))
3727 		return;
3728 
3729 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3730 		return;
3731 
3732 	DRM_DEBUG_KMS("\n");
3733 
3734 	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3735 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3736 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3737 	} else {
3738 		if (IS_CHERRYVIEW(dev))
3739 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3740 		else
3741 			DP &= ~DP_LINK_TRAIN_MASK;
3742 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3743 	}
3744 	POSTING_READ(intel_dp->output_reg);
3745 
3746 	if (HAS_PCH_IBX(dev) &&
3747 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3748 		/* Hardware workaround: leaving our transcoder select
3749 		 * set to transcoder B while it's off will prevent the
3750 		 * corresponding HDMI output on transcoder A.
3751 		 *
3752 		 * Combine this with another hardware workaround:
3753 		 * transcoder select bit can only be cleared while the
3754 		 * port is enabled.
3755 		 */
3756 		DP &= ~DP_PIPEB_SELECT;
3757 		I915_WRITE(intel_dp->output_reg, DP);
3758 		POSTING_READ(intel_dp->output_reg);
3759 	}
3760 
3761 	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3762 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3763 	POSTING_READ(intel_dp->output_reg);
3764 	msleep(intel_dp->panel_power_down_delay);
3765 }
3766 
3767 static bool
intel_dp_get_dpcd(struct intel_dp * intel_dp)3768 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3769 {
3770 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3771 	struct drm_device *dev = dig_port->base.base.dev;
3772 	struct drm_i915_private *dev_priv = dev->dev_private;
3773 	uint8_t rev;
3774 
3775 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3776 				    sizeof(intel_dp->dpcd)) < 0)
3777 		return false; /* aux transfer failed */
3778 
3779 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3780 
3781 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3782 		return false; /* DPCD not present */
3783 
3784 	/* Check if the panel supports PSR */
3785 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3786 	if (is_edp(intel_dp)) {
3787 		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3788 					intel_dp->psr_dpcd,
3789 					sizeof(intel_dp->psr_dpcd));
3790 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3791 			dev_priv->psr.sink_support = true;
3792 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3793 		}
3794 	}
3795 
3796 	/* Training Pattern 3 support, Intel platforms that support HBR2 alone
3797 	 * have support for TP3 hence that check is used along with dpcd check
3798 	 * to ensure TP3 can be enabled.
3799 	 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3800 	 * supported but still not enabled.
3801 	 */
3802 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3803 	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3804 	    intel_dp_source_supports_hbr2(dev)) {
3805 		intel_dp->use_tps3 = true;
3806 		DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3807 	} else
3808 		intel_dp->use_tps3 = false;
3809 
3810 	/* Intermediate frequency support */
3811 	if (is_edp(intel_dp) &&
3812 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3813 	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3814 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3815 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3816 		int i;
3817 
3818 		intel_dp_dpcd_read_wake(&intel_dp->aux,
3819 				DP_SUPPORTED_LINK_RATES,
3820 				sink_rates,
3821 				sizeof(sink_rates));
3822 
3823 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3824 			int val = le16_to_cpu(sink_rates[i]);
3825 
3826 			if (val == 0)
3827 				break;
3828 
3829 			/* Value read is in kHz while drm clock is saved in deca-kHz */
3830 			intel_dp->sink_rates[i] = (val * 200) / 10;
3831 		}
3832 		intel_dp->num_sink_rates = i;
3833 	}
3834 
3835 	intel_dp_print_rates(intel_dp);
3836 
3837 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3838 	      DP_DWN_STRM_PORT_PRESENT))
3839 		return true; /* native DP sink */
3840 
3841 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3842 		return true; /* no per-port downstream info */
3843 
3844 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3845 				    intel_dp->downstream_ports,
3846 				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3847 		return false; /* downstream port status fetch failed */
3848 
3849 	return true;
3850 }
3851 
3852 static void
intel_dp_probe_oui(struct intel_dp * intel_dp)3853 intel_dp_probe_oui(struct intel_dp *intel_dp)
3854 {
3855 	u8 buf[3];
3856 
3857 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3858 		return;
3859 
3860 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3861 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3862 			      buf[0], buf[1], buf[2]);
3863 
3864 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3865 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3866 			      buf[0], buf[1], buf[2]);
3867 }
3868 
3869 static bool
intel_dp_probe_mst(struct intel_dp * intel_dp)3870 intel_dp_probe_mst(struct intel_dp *intel_dp)
3871 {
3872 	u8 buf[1];
3873 
3874 	if (!intel_dp->can_mst)
3875 		return false;
3876 
3877 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3878 		return false;
3879 
3880 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3881 		if (buf[0] & DP_MST_CAP) {
3882 			DRM_DEBUG_KMS("Sink is MST capable\n");
3883 			intel_dp->is_mst = true;
3884 		} else {
3885 			DRM_DEBUG_KMS("Sink is not MST capable\n");
3886 			intel_dp->is_mst = false;
3887 		}
3888 	}
3889 
3890 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3891 	return intel_dp->is_mst;
3892 }
3893 
intel_dp_sink_crc(struct intel_dp * intel_dp,u8 * crc)3894 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3895 {
3896 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3897 	struct drm_device *dev = intel_dig_port->base.base.dev;
3898 	struct intel_crtc *intel_crtc =
3899 		to_intel_crtc(intel_dig_port->base.base.crtc);
3900 	u8 buf;
3901 	int test_crc_count;
3902 	int attempts = 6;
3903 
3904 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3905 		return -EIO;
3906 
3907 	if (!(buf & DP_TEST_CRC_SUPPORTED))
3908 		return -ENOTTY;
3909 
3910 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3911 		return -EIO;
3912 
3913 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3914 				buf | DP_TEST_SINK_START) < 0)
3915 		return -EIO;
3916 
3917 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3918 		return -EIO;
3919 	test_crc_count = buf & DP_TEST_COUNT_MASK;
3920 
3921 	do {
3922 		if (drm_dp_dpcd_readb(&intel_dp->aux,
3923 				      DP_TEST_SINK_MISC, &buf) < 0)
3924 			return -EIO;
3925 		intel_wait_for_vblank(dev, intel_crtc->pipe);
3926 	} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3927 
3928 	if (attempts == 0) {
3929 		DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3930 		return -ETIMEDOUT;
3931 	}
3932 
3933 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3934 		return -EIO;
3935 
3936 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3937 		return -EIO;
3938 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3939 			       buf & ~DP_TEST_SINK_START) < 0)
3940 		return -EIO;
3941 
3942 	return 0;
3943 }
3944 
3945 static bool
intel_dp_get_sink_irq(struct intel_dp * intel_dp,u8 * sink_irq_vector)3946 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3947 {
3948 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3949 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
3950 				       sink_irq_vector, 1) == 1;
3951 }
3952 
3953 static bool
intel_dp_get_sink_irq_esi(struct intel_dp * intel_dp,u8 * sink_irq_vector)3954 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3955 {
3956 	int ret;
3957 
3958 	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3959 					     DP_SINK_COUNT_ESI,
3960 					     sink_irq_vector, 14);
3961 	if (ret != 14)
3962 		return false;
3963 
3964 	return true;
3965 }
3966 
3967 static void
intel_dp_handle_test_request(struct intel_dp * intel_dp)3968 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3969 {
3970 	/* NAK by default */
3971 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3972 }
3973 
3974 static int
intel_dp_check_mst_status(struct intel_dp * intel_dp)3975 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3976 {
3977 	bool bret;
3978 
3979 	if (intel_dp->is_mst) {
3980 		u8 esi[16] = { 0 };
3981 		int ret = 0;
3982 		int retry;
3983 		bool handled;
3984 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3985 go_again:
3986 		if (bret == true) {
3987 
3988 			/* check link status - esi[10] = 0x200c */
3989 			if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3990 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3991 				intel_dp_start_link_train(intel_dp);
3992 				intel_dp_complete_link_train(intel_dp);
3993 				intel_dp_stop_link_train(intel_dp);
3994 			}
3995 
3996 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
3997 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3998 
3999 			if (handled) {
4000 				for (retry = 0; retry < 3; retry++) {
4001 					int wret;
4002 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4003 								 DP_SINK_COUNT_ESI+1,
4004 								 &esi[1], 3);
4005 					if (wret == 3) {
4006 						break;
4007 					}
4008 				}
4009 
4010 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4011 				if (bret == true) {
4012 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4013 					goto go_again;
4014 				}
4015 			} else
4016 				ret = 0;
4017 
4018 			return ret;
4019 		} else {
4020 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4021 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4022 			intel_dp->is_mst = false;
4023 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4024 			/* send a hotplug event */
4025 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4026 		}
4027 	}
4028 	return -EINVAL;
4029 }
4030 
4031 /*
4032  * According to DP spec
4033  * 5.1.2:
4034  *  1. Read DPCD
4035  *  2. Configure link according to Receiver Capabilities
4036  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4037  *  4. Check link status on receipt of hot-plug interrupt
4038  */
4039 static void
intel_dp_check_link_status(struct intel_dp * intel_dp)4040 intel_dp_check_link_status(struct intel_dp *intel_dp)
4041 {
4042 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4043 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4044 	u8 sink_irq_vector;
4045 	u8 link_status[DP_LINK_STATUS_SIZE];
4046 
4047 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4048 
4049 	if (!intel_encoder->connectors_active)
4050 		return;
4051 
4052 	if (WARN_ON(!intel_encoder->base.crtc))
4053 		return;
4054 
4055 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4056 		return;
4057 
4058 	/* Try to read receiver status if the link appears to be up */
4059 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4060 		return;
4061 	}
4062 
4063 	/* Now read the DPCD to see if it's actually running */
4064 	if (!intel_dp_get_dpcd(intel_dp)) {
4065 		return;
4066 	}
4067 
4068 	/* Try to read the source of the interrupt */
4069 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4070 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4071 		/* Clear interrupt source */
4072 		drm_dp_dpcd_writeb(&intel_dp->aux,
4073 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4074 				   sink_irq_vector);
4075 
4076 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4077 			intel_dp_handle_test_request(intel_dp);
4078 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4079 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4080 	}
4081 
4082 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4083 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4084 			      intel_encoder->base.name);
4085 		intel_dp_start_link_train(intel_dp);
4086 		intel_dp_complete_link_train(intel_dp);
4087 		intel_dp_stop_link_train(intel_dp);
4088 	}
4089 }
4090 
4091 /* XXX this is probably wrong for multiple downstream ports */
4092 static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp * intel_dp)4093 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4094 {
4095 	uint8_t *dpcd = intel_dp->dpcd;
4096 	uint8_t type;
4097 
4098 	if (!intel_dp_get_dpcd(intel_dp))
4099 		return connector_status_disconnected;
4100 
4101 	/* if there's no downstream port, we're done */
4102 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4103 		return connector_status_connected;
4104 
4105 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4106 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4107 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4108 		uint8_t reg;
4109 
4110 		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4111 					    &reg, 1) < 0)
4112 			return connector_status_unknown;
4113 
4114 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4115 					      : connector_status_disconnected;
4116 	}
4117 
4118 	/* If no HPD, poke DDC gently */
4119 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4120 		return connector_status_connected;
4121 
4122 	/* Well we tried, say unknown for unreliable port types */
4123 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4124 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4125 		if (type == DP_DS_PORT_TYPE_VGA ||
4126 		    type == DP_DS_PORT_TYPE_NON_EDID)
4127 			return connector_status_unknown;
4128 	} else {
4129 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4130 			DP_DWN_STRM_PORT_TYPE_MASK;
4131 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4132 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4133 			return connector_status_unknown;
4134 	}
4135 
4136 	/* Anything else is out of spec, warn and ignore */
4137 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4138 	return connector_status_disconnected;
4139 }
4140 
4141 static enum drm_connector_status
edp_detect(struct intel_dp * intel_dp)4142 edp_detect(struct intel_dp *intel_dp)
4143 {
4144 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4145 	enum drm_connector_status status;
4146 
4147 	status = intel_panel_detect(dev);
4148 	if (status == connector_status_unknown)
4149 		status = connector_status_connected;
4150 
4151 	return status;
4152 }
4153 
4154 static enum drm_connector_status
ironlake_dp_detect(struct intel_dp * intel_dp)4155 ironlake_dp_detect(struct intel_dp *intel_dp)
4156 {
4157 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4158 	struct drm_i915_private *dev_priv = dev->dev_private;
4159 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4160 
4161 	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4162 		return connector_status_disconnected;
4163 
4164 	return intel_dp_detect_dpcd(intel_dp);
4165 }
4166 
g4x_digital_port_connected(struct drm_device * dev,struct intel_digital_port * intel_dig_port)4167 static int g4x_digital_port_connected(struct drm_device *dev,
4168 				       struct intel_digital_port *intel_dig_port)
4169 {
4170 	struct drm_i915_private *dev_priv = dev->dev_private;
4171 	uint32_t bit;
4172 
4173 	if (IS_VALLEYVIEW(dev)) {
4174 		switch (intel_dig_port->port) {
4175 		case PORT_B:
4176 			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4177 			break;
4178 		case PORT_C:
4179 			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4180 			break;
4181 		case PORT_D:
4182 			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4183 			break;
4184 		default:
4185 			return -EINVAL;
4186 		}
4187 	} else {
4188 		switch (intel_dig_port->port) {
4189 		case PORT_B:
4190 			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4191 			break;
4192 		case PORT_C:
4193 			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4194 			break;
4195 		case PORT_D:
4196 			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4197 			break;
4198 		default:
4199 			return -EINVAL;
4200 		}
4201 	}
4202 
4203 	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4204 		return 0;
4205 	return 1;
4206 }
4207 
4208 static enum drm_connector_status
g4x_dp_detect(struct intel_dp * intel_dp)4209 g4x_dp_detect(struct intel_dp *intel_dp)
4210 {
4211 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4212 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4213 	int ret;
4214 
4215 	/* Can't disconnect eDP, but you can close the lid... */
4216 	if (is_edp(intel_dp)) {
4217 		enum drm_connector_status status;
4218 
4219 		status = intel_panel_detect(dev);
4220 		if (status == connector_status_unknown)
4221 			status = connector_status_connected;
4222 		return status;
4223 	}
4224 
4225 	ret = g4x_digital_port_connected(dev, intel_dig_port);
4226 	if (ret == -EINVAL)
4227 		return connector_status_unknown;
4228 	else if (ret == 0)
4229 		return connector_status_disconnected;
4230 
4231 	return intel_dp_detect_dpcd(intel_dp);
4232 }
4233 
4234 static struct edid *
intel_dp_get_edid(struct intel_dp * intel_dp)4235 intel_dp_get_edid(struct intel_dp *intel_dp)
4236 {
4237 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4238 
4239 	/* use cached edid if we have one */
4240 	if (intel_connector->edid) {
4241 		/* invalid edid */
4242 		if (IS_ERR(intel_connector->edid))
4243 			return NULL;
4244 
4245 		return drm_edid_duplicate(intel_connector->edid);
4246 	} else
4247 		return drm_get_edid(&intel_connector->base,
4248 				    &intel_dp->aux.ddc);
4249 }
4250 
4251 static void
intel_dp_set_edid(struct intel_dp * intel_dp)4252 intel_dp_set_edid(struct intel_dp *intel_dp)
4253 {
4254 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4255 	struct edid *edid;
4256 
4257 	edid = intel_dp_get_edid(intel_dp);
4258 	intel_connector->detect_edid = edid;
4259 
4260 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4261 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4262 	else
4263 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4264 }
4265 
4266 static void
intel_dp_unset_edid(struct intel_dp * intel_dp)4267 intel_dp_unset_edid(struct intel_dp *intel_dp)
4268 {
4269 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4270 
4271 	kfree(intel_connector->detect_edid);
4272 	intel_connector->detect_edid = NULL;
4273 
4274 	intel_dp->has_audio = false;
4275 }
4276 
4277 static enum intel_display_power_domain
intel_dp_power_get(struct intel_dp * dp)4278 intel_dp_power_get(struct intel_dp *dp)
4279 {
4280 	struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4281 	enum intel_display_power_domain power_domain;
4282 
4283 	power_domain = intel_display_port_power_domain(encoder);
4284 	intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4285 
4286 	return power_domain;
4287 }
4288 
4289 static void
intel_dp_power_put(struct intel_dp * dp,enum intel_display_power_domain power_domain)4290 intel_dp_power_put(struct intel_dp *dp,
4291 		   enum intel_display_power_domain power_domain)
4292 {
4293 	struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4294 	intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4295 }
4296 
4297 static enum drm_connector_status
intel_dp_detect(struct drm_connector * connector,bool force)4298 intel_dp_detect(struct drm_connector *connector, bool force)
4299 {
4300 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4301 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4302 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4303 	struct drm_device *dev = connector->dev;
4304 	enum drm_connector_status status;
4305 	enum intel_display_power_domain power_domain;
4306 	bool ret;
4307 
4308 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4309 		      connector->base.id, connector->name);
4310 	intel_dp_unset_edid(intel_dp);
4311 
4312 	if (intel_dp->is_mst) {
4313 		/* MST devices are disconnected from a monitor POV */
4314 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4315 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4316 		return connector_status_disconnected;
4317 	}
4318 
4319 	power_domain = intel_dp_power_get(intel_dp);
4320 
4321 	/* Can't disconnect eDP, but you can close the lid... */
4322 	if (is_edp(intel_dp))
4323 		status = edp_detect(intel_dp);
4324 	else if (HAS_PCH_SPLIT(dev))
4325 		status = ironlake_dp_detect(intel_dp);
4326 	else
4327 		status = g4x_dp_detect(intel_dp);
4328 	if (status != connector_status_connected)
4329 		goto out;
4330 
4331 	intel_dp_probe_oui(intel_dp);
4332 
4333 	ret = intel_dp_probe_mst(intel_dp);
4334 	if (ret) {
4335 		/* if we are in MST mode then this connector
4336 		   won't appear connected or have anything with EDID on it */
4337 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4338 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4339 		status = connector_status_disconnected;
4340 		goto out;
4341 	}
4342 
4343 	intel_dp_set_edid(intel_dp);
4344 
4345 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4346 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4347 	status = connector_status_connected;
4348 
4349 out:
4350 	intel_dp_power_put(intel_dp, power_domain);
4351 	return status;
4352 }
4353 
4354 static void
intel_dp_force(struct drm_connector * connector)4355 intel_dp_force(struct drm_connector *connector)
4356 {
4357 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4358 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4359 	enum intel_display_power_domain power_domain;
4360 
4361 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4362 		      connector->base.id, connector->name);
4363 	intel_dp_unset_edid(intel_dp);
4364 
4365 	if (connector->status != connector_status_connected)
4366 		return;
4367 
4368 	power_domain = intel_dp_power_get(intel_dp);
4369 
4370 	intel_dp_set_edid(intel_dp);
4371 
4372 	intel_dp_power_put(intel_dp, power_domain);
4373 
4374 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4375 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4376 }
4377 
intel_dp_get_modes(struct drm_connector * connector)4378 static int intel_dp_get_modes(struct drm_connector *connector)
4379 {
4380 	struct intel_connector *intel_connector = to_intel_connector(connector);
4381 	struct edid *edid;
4382 
4383 	edid = intel_connector->detect_edid;
4384 	if (edid) {
4385 		int ret = intel_connector_update_modes(connector, edid);
4386 		if (ret)
4387 			return ret;
4388 	}
4389 
4390 	/* if eDP has no EDID, fall back to fixed mode */
4391 	if (is_edp(intel_attached_dp(connector)) &&
4392 	    intel_connector->panel.fixed_mode) {
4393 		struct drm_display_mode *mode;
4394 
4395 		mode = drm_mode_duplicate(connector->dev,
4396 					  intel_connector->panel.fixed_mode);
4397 		if (mode) {
4398 			drm_mode_probed_add(connector, mode);
4399 			return 1;
4400 		}
4401 	}
4402 
4403 	return 0;
4404 }
4405 
4406 static bool
intel_dp_detect_audio(struct drm_connector * connector)4407 intel_dp_detect_audio(struct drm_connector *connector)
4408 {
4409 	bool has_audio = false;
4410 	struct edid *edid;
4411 
4412 	edid = to_intel_connector(connector)->detect_edid;
4413 	if (edid)
4414 		has_audio = drm_detect_monitor_audio(edid);
4415 
4416 	return has_audio;
4417 }
4418 
4419 static int
intel_dp_set_property(struct drm_connector * connector,struct drm_property * property,uint64_t val)4420 intel_dp_set_property(struct drm_connector *connector,
4421 		      struct drm_property *property,
4422 		      uint64_t val)
4423 {
4424 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4425 	struct intel_connector *intel_connector = to_intel_connector(connector);
4426 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4427 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4428 	int ret;
4429 
4430 	ret = drm_object_property_set_value(&connector->base, property, val);
4431 	if (ret)
4432 		return ret;
4433 
4434 	if (property == dev_priv->force_audio_property) {
4435 		int i = val;
4436 		bool has_audio;
4437 
4438 		if (i == intel_dp->force_audio)
4439 			return 0;
4440 
4441 		intel_dp->force_audio = i;
4442 
4443 		if (i == HDMI_AUDIO_AUTO)
4444 			has_audio = intel_dp_detect_audio(connector);
4445 		else
4446 			has_audio = (i == HDMI_AUDIO_ON);
4447 
4448 		if (has_audio == intel_dp->has_audio)
4449 			return 0;
4450 
4451 		intel_dp->has_audio = has_audio;
4452 		goto done;
4453 	}
4454 
4455 	if (property == dev_priv->broadcast_rgb_property) {
4456 		bool old_auto = intel_dp->color_range_auto;
4457 		uint32_t old_range = intel_dp->color_range;
4458 
4459 		switch (val) {
4460 		case INTEL_BROADCAST_RGB_AUTO:
4461 			intel_dp->color_range_auto = true;
4462 			break;
4463 		case INTEL_BROADCAST_RGB_FULL:
4464 			intel_dp->color_range_auto = false;
4465 			intel_dp->color_range = 0;
4466 			break;
4467 		case INTEL_BROADCAST_RGB_LIMITED:
4468 			intel_dp->color_range_auto = false;
4469 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
4470 			break;
4471 		default:
4472 			return -EINVAL;
4473 		}
4474 
4475 		if (old_auto == intel_dp->color_range_auto &&
4476 		    old_range == intel_dp->color_range)
4477 			return 0;
4478 
4479 		goto done;
4480 	}
4481 
4482 	if (is_edp(intel_dp) &&
4483 	    property == connector->dev->mode_config.scaling_mode_property) {
4484 		if (val == DRM_MODE_SCALE_NONE) {
4485 			DRM_DEBUG_KMS("no scaling not supported\n");
4486 			return -EINVAL;
4487 		}
4488 
4489 		if (intel_connector->panel.fitting_mode == val) {
4490 			/* the eDP scaling property is not changed */
4491 			return 0;
4492 		}
4493 		intel_connector->panel.fitting_mode = val;
4494 
4495 		goto done;
4496 	}
4497 
4498 	return -EINVAL;
4499 
4500 done:
4501 	if (intel_encoder->base.crtc)
4502 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4503 
4504 	return 0;
4505 }
4506 
4507 static void
intel_dp_connector_destroy(struct drm_connector * connector)4508 intel_dp_connector_destroy(struct drm_connector *connector)
4509 {
4510 	struct intel_connector *intel_connector = to_intel_connector(connector);
4511 
4512 	kfree(intel_connector->detect_edid);
4513 
4514 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4515 		kfree(intel_connector->edid);
4516 
4517 	/* Can't call is_edp() since the encoder may have been destroyed
4518 	 * already. */
4519 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4520 		intel_panel_fini(&intel_connector->panel);
4521 
4522 	drm_connector_cleanup(connector);
4523 	kfree(connector);
4524 }
4525 
intel_dp_encoder_destroy(struct drm_encoder * encoder)4526 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4527 {
4528 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4529 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4530 
4531 	drm_dp_aux_unregister(&intel_dp->aux);
4532 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4533 	if (is_edp(intel_dp)) {
4534 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4535 		/*
4536 		 * vdd might still be enabled do to the delayed vdd off.
4537 		 * Make sure vdd is actually turned off here.
4538 		 */
4539 		pps_lock(intel_dp);
4540 		edp_panel_vdd_off_sync(intel_dp);
4541 		pps_unlock(intel_dp);
4542 
4543 		if (intel_dp->edp_notifier.notifier_call) {
4544 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4545 			intel_dp->edp_notifier.notifier_call = NULL;
4546 		}
4547 	}
4548 	drm_encoder_cleanup(encoder);
4549 	kfree(intel_dig_port);
4550 }
4551 
intel_dp_encoder_suspend(struct intel_encoder * intel_encoder)4552 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4553 {
4554 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4555 
4556 	if (!is_edp(intel_dp))
4557 		return;
4558 
4559 	/*
4560 	 * vdd might still be enabled do to the delayed vdd off.
4561 	 * Make sure vdd is actually turned off here.
4562 	 */
4563 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4564 	pps_lock(intel_dp);
4565 	edp_panel_vdd_off_sync(intel_dp);
4566 	pps_unlock(intel_dp);
4567 }
4568 
intel_edp_panel_vdd_sanitize(struct intel_dp * intel_dp)4569 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4570 {
4571 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4572 	struct drm_device *dev = intel_dig_port->base.base.dev;
4573 	struct drm_i915_private *dev_priv = dev->dev_private;
4574 	enum intel_display_power_domain power_domain;
4575 
4576 	lockdep_assert_held(&dev_priv->pps_mutex);
4577 
4578 	if (!edp_have_panel_vdd(intel_dp))
4579 		return;
4580 
4581 	/*
4582 	 * The VDD bit needs a power domain reference, so if the bit is
4583 	 * already enabled when we boot or resume, grab this reference and
4584 	 * schedule a vdd off, so we don't hold on to the reference
4585 	 * indefinitely.
4586 	 */
4587 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4588 	power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4589 	intel_display_power_get(dev_priv, power_domain);
4590 
4591 	edp_panel_vdd_schedule_off(intel_dp);
4592 }
4593 
intel_dp_encoder_reset(struct drm_encoder * encoder)4594 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4595 {
4596 	struct intel_dp *intel_dp;
4597 
4598 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4599 		return;
4600 
4601 	intel_dp = enc_to_intel_dp(encoder);
4602 
4603 	pps_lock(intel_dp);
4604 
4605 	/*
4606 	 * Read out the current power sequencer assignment,
4607 	 * in case the BIOS did something with it.
4608 	 */
4609 	if (IS_VALLEYVIEW(encoder->dev))
4610 		vlv_initial_power_sequencer_setup(intel_dp);
4611 
4612 	intel_edp_panel_vdd_sanitize(intel_dp);
4613 
4614 	pps_unlock(intel_dp);
4615 }
4616 
4617 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4618 	.dpms = intel_connector_dpms,
4619 	.detect = intel_dp_detect,
4620 	.force = intel_dp_force,
4621 	.fill_modes = drm_helper_probe_single_connector_modes,
4622 	.set_property = intel_dp_set_property,
4623 	.atomic_get_property = intel_connector_atomic_get_property,
4624 	.destroy = intel_dp_connector_destroy,
4625 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4626 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4627 };
4628 
4629 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4630 	.get_modes = intel_dp_get_modes,
4631 	.mode_valid = intel_dp_mode_valid,
4632 	.best_encoder = intel_best_encoder,
4633 };
4634 
4635 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4636 	.reset = intel_dp_encoder_reset,
4637 	.destroy = intel_dp_encoder_destroy,
4638 };
4639 
4640 void
intel_dp_hot_plug(struct intel_encoder * intel_encoder)4641 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4642 {
4643 	return;
4644 }
4645 
4646 enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port * intel_dig_port,bool long_hpd)4647 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4648 {
4649 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4650 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4651 	struct drm_device *dev = intel_dig_port->base.base.dev;
4652 	struct drm_i915_private *dev_priv = dev->dev_private;
4653 	enum intel_display_power_domain power_domain;
4654 	enum irqreturn ret = IRQ_NONE;
4655 
4656 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4657 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4658 
4659 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4660 		/*
4661 		 * vdd off can generate a long pulse on eDP which
4662 		 * would require vdd on to handle it, and thus we
4663 		 * would end up in an endless cycle of
4664 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4665 		 */
4666 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4667 			      port_name(intel_dig_port->port));
4668 		return IRQ_HANDLED;
4669 	}
4670 
4671 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4672 		      port_name(intel_dig_port->port),
4673 		      long_hpd ? "long" : "short");
4674 
4675 	power_domain = intel_display_port_power_domain(intel_encoder);
4676 	intel_display_power_get(dev_priv, power_domain);
4677 
4678 	if (long_hpd) {
4679 
4680 		if (HAS_PCH_SPLIT(dev)) {
4681 			if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4682 				goto mst_fail;
4683 		} else {
4684 			if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4685 				goto mst_fail;
4686 		}
4687 
4688 		if (!intel_dp_get_dpcd(intel_dp)) {
4689 			goto mst_fail;
4690 		}
4691 
4692 		intel_dp_probe_oui(intel_dp);
4693 
4694 		if (!intel_dp_probe_mst(intel_dp)) {
4695 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4696 			intel_dp_check_link_status(intel_dp);
4697 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4698 			goto mst_fail;
4699 		}
4700 	} else {
4701 		if (intel_dp->is_mst) {
4702 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4703 				goto mst_fail;
4704 		}
4705 
4706 		if (!intel_dp->is_mst) {
4707 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4708 			intel_dp_check_link_status(intel_dp);
4709 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4710 		}
4711 	}
4712 
4713 	ret = IRQ_HANDLED;
4714 
4715 	goto put_power;
4716 mst_fail:
4717 	/* if we were in MST mode, and device is not there get out of MST mode */
4718 	if (intel_dp->is_mst) {
4719 		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4720 		intel_dp->is_mst = false;
4721 		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4722 	}
4723 put_power:
4724 	intel_display_power_put(dev_priv, power_domain);
4725 
4726 	return ret;
4727 }
4728 
4729 /* Return which DP Port should be selected for Transcoder DP control */
4730 int
intel_trans_dp_port_sel(struct drm_crtc * crtc)4731 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4732 {
4733 	struct drm_device *dev = crtc->dev;
4734 	struct intel_encoder *intel_encoder;
4735 	struct intel_dp *intel_dp;
4736 
4737 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4738 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
4739 
4740 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4741 		    intel_encoder->type == INTEL_OUTPUT_EDP)
4742 			return intel_dp->output_reg;
4743 	}
4744 
4745 	return -1;
4746 }
4747 
4748 /* check the VBT to see whether the eDP is on DP-D port */
intel_dp_is_edp(struct drm_device * dev,enum port port)4749 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4750 {
4751 	struct drm_i915_private *dev_priv = dev->dev_private;
4752 	union child_device_config *p_child;
4753 	int i;
4754 	static const short port_mapping[] = {
4755 		[PORT_B] = PORT_IDPB,
4756 		[PORT_C] = PORT_IDPC,
4757 		[PORT_D] = PORT_IDPD,
4758 	};
4759 
4760 	if (port == PORT_A)
4761 		return true;
4762 
4763 	if (!dev_priv->vbt.child_dev_num)
4764 		return false;
4765 
4766 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4767 		p_child = dev_priv->vbt.child_dev + i;
4768 
4769 		if (p_child->common.dvo_port == port_mapping[port] &&
4770 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4771 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4772 			return true;
4773 	}
4774 	return false;
4775 }
4776 
4777 void
intel_dp_add_properties(struct intel_dp * intel_dp,struct drm_connector * connector)4778 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4779 {
4780 	struct intel_connector *intel_connector = to_intel_connector(connector);
4781 
4782 	intel_attach_force_audio_property(connector);
4783 	intel_attach_broadcast_rgb_property(connector);
4784 	intel_dp->color_range_auto = true;
4785 
4786 	if (is_edp(intel_dp)) {
4787 		drm_mode_create_scaling_mode_property(connector->dev);
4788 		drm_object_attach_property(
4789 			&connector->base,
4790 			connector->dev->mode_config.scaling_mode_property,
4791 			DRM_MODE_SCALE_ASPECT);
4792 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4793 	}
4794 }
4795 
intel_dp_init_panel_power_timestamps(struct intel_dp * intel_dp)4796 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4797 {
4798 	intel_dp->last_power_cycle = jiffies;
4799 	intel_dp->last_power_on = jiffies;
4800 	intel_dp->last_backlight_off = jiffies;
4801 }
4802 
4803 static void
intel_dp_init_panel_power_sequencer(struct drm_device * dev,struct intel_dp * intel_dp)4804 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4805 				    struct intel_dp *intel_dp)
4806 {
4807 	struct drm_i915_private *dev_priv = dev->dev_private;
4808 	struct edp_power_seq cur, vbt, spec,
4809 		*final = &intel_dp->pps_delays;
4810 	u32 pp_on, pp_off, pp_div, pp;
4811 	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4812 
4813 	lockdep_assert_held(&dev_priv->pps_mutex);
4814 
4815 	/* already initialized? */
4816 	if (final->t11_t12 != 0)
4817 		return;
4818 
4819 	if (HAS_PCH_SPLIT(dev)) {
4820 		pp_ctrl_reg = PCH_PP_CONTROL;
4821 		pp_on_reg = PCH_PP_ON_DELAYS;
4822 		pp_off_reg = PCH_PP_OFF_DELAYS;
4823 		pp_div_reg = PCH_PP_DIVISOR;
4824 	} else {
4825 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4826 
4827 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4828 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4829 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4830 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4831 	}
4832 
4833 	/* Workaround: Need to write PP_CONTROL with the unlock key as
4834 	 * the very first thing. */
4835 	pp = ironlake_get_pp_control(intel_dp);
4836 	I915_WRITE(pp_ctrl_reg, pp);
4837 
4838 	pp_on = I915_READ(pp_on_reg);
4839 	pp_off = I915_READ(pp_off_reg);
4840 	pp_div = I915_READ(pp_div_reg);
4841 
4842 	/* Pull timing values out of registers */
4843 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4844 		PANEL_POWER_UP_DELAY_SHIFT;
4845 
4846 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4847 		PANEL_LIGHT_ON_DELAY_SHIFT;
4848 
4849 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4850 		PANEL_LIGHT_OFF_DELAY_SHIFT;
4851 
4852 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4853 		PANEL_POWER_DOWN_DELAY_SHIFT;
4854 
4855 	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4856 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4857 
4858 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4859 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4860 
4861 	vbt = dev_priv->vbt.edp_pps;
4862 
4863 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4864 	 * our hw here, which are all in 100usec. */
4865 	spec.t1_t3 = 210 * 10;
4866 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4867 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4868 	spec.t10 = 500 * 10;
4869 	/* This one is special and actually in units of 100ms, but zero
4870 	 * based in the hw (so we need to add 100 ms). But the sw vbt
4871 	 * table multiplies it with 1000 to make it in units of 100usec,
4872 	 * too. */
4873 	spec.t11_t12 = (510 + 100) * 10;
4874 
4875 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4876 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4877 
4878 	/* Use the max of the register settings and vbt. If both are
4879 	 * unset, fall back to the spec limits. */
4880 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
4881 				       spec.field : \
4882 				       max(cur.field, vbt.field))
4883 	assign_final(t1_t3);
4884 	assign_final(t8);
4885 	assign_final(t9);
4886 	assign_final(t10);
4887 	assign_final(t11_t12);
4888 #undef assign_final
4889 
4890 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
4891 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
4892 	intel_dp->backlight_on_delay = get_delay(t8);
4893 	intel_dp->backlight_off_delay = get_delay(t9);
4894 	intel_dp->panel_power_down_delay = get_delay(t10);
4895 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4896 #undef get_delay
4897 
4898 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4899 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4900 		      intel_dp->panel_power_cycle_delay);
4901 
4902 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4903 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4904 }
4905 
4906 static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device * dev,struct intel_dp * intel_dp)4907 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4908 					      struct intel_dp *intel_dp)
4909 {
4910 	struct drm_i915_private *dev_priv = dev->dev_private;
4911 	u32 pp_on, pp_off, pp_div, port_sel = 0;
4912 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4913 	int pp_on_reg, pp_off_reg, pp_div_reg;
4914 	enum port port = dp_to_dig_port(intel_dp)->port;
4915 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
4916 
4917 	lockdep_assert_held(&dev_priv->pps_mutex);
4918 
4919 	if (HAS_PCH_SPLIT(dev)) {
4920 		pp_on_reg = PCH_PP_ON_DELAYS;
4921 		pp_off_reg = PCH_PP_OFF_DELAYS;
4922 		pp_div_reg = PCH_PP_DIVISOR;
4923 	} else {
4924 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4925 
4926 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4927 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4928 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4929 	}
4930 
4931 	/*
4932 	 * And finally store the new values in the power sequencer. The
4933 	 * backlight delays are set to 1 because we do manual waits on them. For
4934 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4935 	 * we'll end up waiting for the backlight off delay twice: once when we
4936 	 * do the manual sleep, and once when we disable the panel and wait for
4937 	 * the PP_STATUS bit to become zero.
4938 	 */
4939 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4940 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4941 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4942 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4943 	/* Compute the divisor for the pp clock, simply match the Bspec
4944 	 * formula. */
4945 	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4946 	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4947 			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
4948 
4949 	/* Haswell doesn't have any port selection bits for the panel
4950 	 * power sequencer any more. */
4951 	if (IS_VALLEYVIEW(dev)) {
4952 		port_sel = PANEL_PORT_SELECT_VLV(port);
4953 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4954 		if (port == PORT_A)
4955 			port_sel = PANEL_PORT_SELECT_DPA;
4956 		else
4957 			port_sel = PANEL_PORT_SELECT_DPD;
4958 	}
4959 
4960 	pp_on |= port_sel;
4961 
4962 	I915_WRITE(pp_on_reg, pp_on);
4963 	I915_WRITE(pp_off_reg, pp_off);
4964 	I915_WRITE(pp_div_reg, pp_div);
4965 
4966 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4967 		      I915_READ(pp_on_reg),
4968 		      I915_READ(pp_off_reg),
4969 		      I915_READ(pp_div_reg));
4970 }
4971 
4972 /**
4973  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4974  * @dev: DRM device
4975  * @refresh_rate: RR to be programmed
4976  *
4977  * This function gets called when refresh rate (RR) has to be changed from
4978  * one frequency to another. Switches can be between high and low RR
4979  * supported by the panel or to any other RR based on media playback (in
4980  * this case, RR value needs to be passed from user space).
4981  *
4982  * The caller of this function needs to take a lock on dev_priv->drrs.
4983  */
intel_dp_set_drrs_state(struct drm_device * dev,int refresh_rate)4984 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4985 {
4986 	struct drm_i915_private *dev_priv = dev->dev_private;
4987 	struct intel_encoder *encoder;
4988 	struct intel_digital_port *dig_port = NULL;
4989 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
4990 	struct intel_crtc_state *config = NULL;
4991 	struct intel_crtc *intel_crtc = NULL;
4992 	u32 reg, val;
4993 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4994 
4995 	if (refresh_rate <= 0) {
4996 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4997 		return;
4998 	}
4999 
5000 	if (intel_dp == NULL) {
5001 		DRM_DEBUG_KMS("DRRS not supported.\n");
5002 		return;
5003 	}
5004 
5005 	/*
5006 	 * FIXME: This needs proper synchronization with psr state for some
5007 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5008 	 */
5009 
5010 	dig_port = dp_to_dig_port(intel_dp);
5011 	encoder = &dig_port->base;
5012 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5013 
5014 	if (!intel_crtc) {
5015 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5016 		return;
5017 	}
5018 
5019 	config = intel_crtc->config;
5020 
5021 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5022 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5023 		return;
5024 	}
5025 
5026 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5027 			refresh_rate)
5028 		index = DRRS_LOW_RR;
5029 
5030 	if (index == dev_priv->drrs.refresh_rate_type) {
5031 		DRM_DEBUG_KMS(
5032 			"DRRS requested for previously set RR...ignoring\n");
5033 		return;
5034 	}
5035 
5036 	if (!intel_crtc->active) {
5037 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5038 		return;
5039 	}
5040 
5041 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5042 		switch (index) {
5043 		case DRRS_HIGH_RR:
5044 			intel_dp_set_m_n(intel_crtc, M1_N1);
5045 			break;
5046 		case DRRS_LOW_RR:
5047 			intel_dp_set_m_n(intel_crtc, M2_N2);
5048 			break;
5049 		case DRRS_MAX_RR:
5050 		default:
5051 			DRM_ERROR("Unsupported refreshrate type\n");
5052 		}
5053 	} else if (INTEL_INFO(dev)->gen > 6) {
5054 		reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5055 		val = I915_READ(reg);
5056 
5057 		if (index > DRRS_HIGH_RR) {
5058 			if (IS_VALLEYVIEW(dev))
5059 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5060 			else
5061 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5062 		} else {
5063 			if (IS_VALLEYVIEW(dev))
5064 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5065 			else
5066 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5067 		}
5068 		I915_WRITE(reg, val);
5069 	}
5070 
5071 	dev_priv->drrs.refresh_rate_type = index;
5072 
5073 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5074 }
5075 
5076 /**
5077  * intel_edp_drrs_enable - init drrs struct if supported
5078  * @intel_dp: DP struct
5079  *
5080  * Initializes frontbuffer_bits and drrs.dp
5081  */
intel_edp_drrs_enable(struct intel_dp * intel_dp)5082 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5083 {
5084 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5085 	struct drm_i915_private *dev_priv = dev->dev_private;
5086 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5087 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5088 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5089 
5090 	if (!intel_crtc->config->has_drrs) {
5091 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5092 		return;
5093 	}
5094 
5095 	mutex_lock(&dev_priv->drrs.mutex);
5096 	if (WARN_ON(dev_priv->drrs.dp)) {
5097 		DRM_ERROR("DRRS already enabled\n");
5098 		goto unlock;
5099 	}
5100 
5101 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5102 
5103 	dev_priv->drrs.dp = intel_dp;
5104 
5105 unlock:
5106 	mutex_unlock(&dev_priv->drrs.mutex);
5107 }
5108 
5109 /**
5110  * intel_edp_drrs_disable - Disable DRRS
5111  * @intel_dp: DP struct
5112  *
5113  */
intel_edp_drrs_disable(struct intel_dp * intel_dp)5114 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5115 {
5116 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5117 	struct drm_i915_private *dev_priv = dev->dev_private;
5118 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5119 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5120 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5121 
5122 	if (!intel_crtc->config->has_drrs)
5123 		return;
5124 
5125 	mutex_lock(&dev_priv->drrs.mutex);
5126 	if (!dev_priv->drrs.dp) {
5127 		mutex_unlock(&dev_priv->drrs.mutex);
5128 		return;
5129 	}
5130 
5131 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5132 		intel_dp_set_drrs_state(dev_priv->dev,
5133 			intel_dp->attached_connector->panel.
5134 			fixed_mode->vrefresh);
5135 
5136 	dev_priv->drrs.dp = NULL;
5137 	mutex_unlock(&dev_priv->drrs.mutex);
5138 
5139 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5140 }
5141 
intel_edp_drrs_downclock_work(struct work_struct * work)5142 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5143 {
5144 	struct drm_i915_private *dev_priv =
5145 		container_of(work, typeof(*dev_priv), drrs.work.work);
5146 	struct intel_dp *intel_dp;
5147 
5148 	mutex_lock(&dev_priv->drrs.mutex);
5149 
5150 	intel_dp = dev_priv->drrs.dp;
5151 
5152 	if (!intel_dp)
5153 		goto unlock;
5154 
5155 	/*
5156 	 * The delayed work can race with an invalidate hence we need to
5157 	 * recheck.
5158 	 */
5159 
5160 	if (dev_priv->drrs.busy_frontbuffer_bits)
5161 		goto unlock;
5162 
5163 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5164 		intel_dp_set_drrs_state(dev_priv->dev,
5165 			intel_dp->attached_connector->panel.
5166 			downclock_mode->vrefresh);
5167 
5168 unlock:
5169 	mutex_unlock(&dev_priv->drrs.mutex);
5170 }
5171 
5172 /**
5173  * intel_edp_drrs_invalidate - Invalidate DRRS
5174  * @dev: DRM device
5175  * @frontbuffer_bits: frontbuffer plane tracking bits
5176  *
5177  * When there is a disturbance on screen (due to cursor movement/time
5178  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5179  * high RR.
5180  *
5181  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5182  */
intel_edp_drrs_invalidate(struct drm_device * dev,unsigned frontbuffer_bits)5183 void intel_edp_drrs_invalidate(struct drm_device *dev,
5184 		unsigned frontbuffer_bits)
5185 {
5186 	struct drm_i915_private *dev_priv = dev->dev_private;
5187 	struct drm_crtc *crtc;
5188 	enum pipe pipe;
5189 
5190 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5191 		return;
5192 
5193 	cancel_delayed_work(&dev_priv->drrs.work);
5194 
5195 	mutex_lock(&dev_priv->drrs.mutex);
5196 	if (!dev_priv->drrs.dp) {
5197 		mutex_unlock(&dev_priv->drrs.mutex);
5198 		return;
5199 	}
5200 
5201 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5202 	pipe = to_intel_crtc(crtc)->pipe;
5203 
5204 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5205 		intel_dp_set_drrs_state(dev_priv->dev,
5206 				dev_priv->drrs.dp->attached_connector->panel.
5207 				fixed_mode->vrefresh);
5208 	}
5209 
5210 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5211 
5212 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5213 	mutex_unlock(&dev_priv->drrs.mutex);
5214 }
5215 
5216 /**
5217  * intel_edp_drrs_flush - Flush DRRS
5218  * @dev: DRM device
5219  * @frontbuffer_bits: frontbuffer plane tracking bits
5220  *
5221  * When there is no movement on screen, DRRS work can be scheduled.
5222  * This DRRS work is responsible for setting relevant registers after a
5223  * timeout of 1 second.
5224  *
5225  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5226  */
intel_edp_drrs_flush(struct drm_device * dev,unsigned frontbuffer_bits)5227 void intel_edp_drrs_flush(struct drm_device *dev,
5228 		unsigned frontbuffer_bits)
5229 {
5230 	struct drm_i915_private *dev_priv = dev->dev_private;
5231 	struct drm_crtc *crtc;
5232 	enum pipe pipe;
5233 
5234 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5235 		return;
5236 
5237 	cancel_delayed_work(&dev_priv->drrs.work);
5238 
5239 	mutex_lock(&dev_priv->drrs.mutex);
5240 	if (!dev_priv->drrs.dp) {
5241 		mutex_unlock(&dev_priv->drrs.mutex);
5242 		return;
5243 	}
5244 
5245 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5246 	pipe = to_intel_crtc(crtc)->pipe;
5247 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5248 
5249 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5250 			!dev_priv->drrs.busy_frontbuffer_bits)
5251 		schedule_delayed_work(&dev_priv->drrs.work,
5252 				msecs_to_jiffies(1000));
5253 	mutex_unlock(&dev_priv->drrs.mutex);
5254 }
5255 
5256 /**
5257  * DOC: Display Refresh Rate Switching (DRRS)
5258  *
5259  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5260  * which enables swtching between low and high refresh rates,
5261  * dynamically, based on the usage scenario. This feature is applicable
5262  * for internal panels.
5263  *
5264  * Indication that the panel supports DRRS is given by the panel EDID, which
5265  * would list multiple refresh rates for one resolution.
5266  *
5267  * DRRS is of 2 types - static and seamless.
5268  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5269  * (may appear as a blink on screen) and is used in dock-undock scenario.
5270  * Seamless DRRS involves changing RR without any visual effect to the user
5271  * and can be used during normal system usage. This is done by programming
5272  * certain registers.
5273  *
5274  * Support for static/seamless DRRS may be indicated in the VBT based on
5275  * inputs from the panel spec.
5276  *
5277  * DRRS saves power by switching to low RR based on usage scenarios.
5278  *
5279  * eDP DRRS:-
5280  *        The implementation is based on frontbuffer tracking implementation.
5281  * When there is a disturbance on the screen triggered by user activity or a
5282  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5283  * When there is no movement on screen, after a timeout of 1 second, a switch
5284  * to low RR is made.
5285  *        For integration with frontbuffer tracking code,
5286  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5287  *
5288  * DRRS can be further extended to support other internal panels and also
5289  * the scenario of video playback wherein RR is set based on the rate
5290  * requested by userspace.
5291  */
5292 
5293 /**
5294  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5295  * @intel_connector: eDP connector
5296  * @fixed_mode: preferred mode of panel
5297  *
5298  * This function is  called only once at driver load to initialize basic
5299  * DRRS stuff.
5300  *
5301  * Returns:
5302  * Downclock mode if panel supports it, else return NULL.
5303  * DRRS support is determined by the presence of downclock mode (apart
5304  * from VBT setting).
5305  */
5306 static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector * intel_connector,struct drm_display_mode * fixed_mode)5307 intel_dp_drrs_init(struct intel_connector *intel_connector,
5308 		struct drm_display_mode *fixed_mode)
5309 {
5310 	struct drm_connector *connector = &intel_connector->base;
5311 	struct drm_device *dev = connector->dev;
5312 	struct drm_i915_private *dev_priv = dev->dev_private;
5313 	struct drm_display_mode *downclock_mode = NULL;
5314 
5315 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5316 	mutex_init(&dev_priv->drrs.mutex);
5317 
5318 	if (INTEL_INFO(dev)->gen <= 6) {
5319 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5320 		return NULL;
5321 	}
5322 
5323 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5324 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5325 		return NULL;
5326 	}
5327 
5328 	downclock_mode = intel_find_panel_downclock
5329 					(dev, fixed_mode, connector);
5330 
5331 	if (!downclock_mode) {
5332 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5333 		return NULL;
5334 	}
5335 
5336 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5337 
5338 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5339 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5340 	return downclock_mode;
5341 }
5342 
intel_edp_init_connector(struct intel_dp * intel_dp,struct intel_connector * intel_connector)5343 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5344 				     struct intel_connector *intel_connector)
5345 {
5346 	struct drm_connector *connector = &intel_connector->base;
5347 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5348 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5349 	struct drm_device *dev = intel_encoder->base.dev;
5350 	struct drm_i915_private *dev_priv = dev->dev_private;
5351 	struct drm_display_mode *fixed_mode = NULL;
5352 	struct drm_display_mode *downclock_mode = NULL;
5353 	bool has_dpcd;
5354 	struct drm_display_mode *scan;
5355 	struct edid *edid;
5356 	enum pipe pipe = INVALID_PIPE;
5357 
5358 	if (!is_edp(intel_dp))
5359 		return true;
5360 
5361 	pps_lock(intel_dp);
5362 	intel_edp_panel_vdd_sanitize(intel_dp);
5363 	pps_unlock(intel_dp);
5364 
5365 	/* Cache DPCD and EDID for edp. */
5366 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5367 
5368 	if (has_dpcd) {
5369 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5370 			dev_priv->no_aux_handshake =
5371 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5372 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5373 	} else {
5374 		/* if this fails, presume the device is a ghost */
5375 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5376 		return false;
5377 	}
5378 
5379 	/* We now know it's not a ghost, init power sequence regs. */
5380 	pps_lock(intel_dp);
5381 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5382 	pps_unlock(intel_dp);
5383 
5384 	mutex_lock(&dev->mode_config.mutex);
5385 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5386 	if (edid) {
5387 		if (drm_add_edid_modes(connector, edid)) {
5388 			drm_mode_connector_update_edid_property(connector,
5389 								edid);
5390 			drm_edid_to_eld(connector, edid);
5391 		} else {
5392 			kfree(edid);
5393 			edid = ERR_PTR(-EINVAL);
5394 		}
5395 	} else {
5396 		edid = ERR_PTR(-ENOENT);
5397 	}
5398 	intel_connector->edid = edid;
5399 
5400 	/* prefer fixed mode from EDID if available */
5401 	list_for_each_entry(scan, &connector->probed_modes, head) {
5402 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5403 			fixed_mode = drm_mode_duplicate(dev, scan);
5404 			downclock_mode = intel_dp_drrs_init(
5405 						intel_connector, fixed_mode);
5406 			break;
5407 		}
5408 	}
5409 
5410 	/* fallback to VBT if available for eDP */
5411 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5412 		fixed_mode = drm_mode_duplicate(dev,
5413 					dev_priv->vbt.lfp_lvds_vbt_mode);
5414 		if (fixed_mode)
5415 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5416 	}
5417 	mutex_unlock(&dev->mode_config.mutex);
5418 
5419 	if (IS_VALLEYVIEW(dev)) {
5420 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5421 		register_reboot_notifier(&intel_dp->edp_notifier);
5422 
5423 		/*
5424 		 * Figure out the current pipe for the initial backlight setup.
5425 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5426 		 * fails just assume pipe A.
5427 		 */
5428 		if (IS_CHERRYVIEW(dev))
5429 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5430 		else
5431 			pipe = PORT_TO_PIPE(intel_dp->DP);
5432 
5433 		if (pipe != PIPE_A && pipe != PIPE_B)
5434 			pipe = intel_dp->pps_pipe;
5435 
5436 		if (pipe != PIPE_A && pipe != PIPE_B)
5437 			pipe = PIPE_A;
5438 
5439 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5440 			      pipe_name(pipe));
5441 	}
5442 
5443 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5444 	intel_connector->panel.backlight_power = intel_edp_backlight_power;
5445 	intel_panel_setup_backlight(connector, pipe);
5446 
5447 	return true;
5448 }
5449 
5450 bool
intel_dp_init_connector(struct intel_digital_port * intel_dig_port,struct intel_connector * intel_connector)5451 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5452 			struct intel_connector *intel_connector)
5453 {
5454 	struct drm_connector *connector = &intel_connector->base;
5455 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5456 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5457 	struct drm_device *dev = intel_encoder->base.dev;
5458 	struct drm_i915_private *dev_priv = dev->dev_private;
5459 	enum port port = intel_dig_port->port;
5460 	int type;
5461 
5462 	intel_dp->pps_pipe = INVALID_PIPE;
5463 
5464 	/* intel_dp vfuncs */
5465 	if (INTEL_INFO(dev)->gen >= 9)
5466 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5467 	else if (IS_VALLEYVIEW(dev))
5468 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5469 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5470 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5471 	else if (HAS_PCH_SPLIT(dev))
5472 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5473 	else
5474 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5475 
5476 	if (INTEL_INFO(dev)->gen >= 9)
5477 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5478 	else
5479 		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5480 
5481 	/* Preserve the current hw state. */
5482 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5483 	intel_dp->attached_connector = intel_connector;
5484 
5485 	if (intel_dp_is_edp(dev, port))
5486 		type = DRM_MODE_CONNECTOR_eDP;
5487 	else
5488 		type = DRM_MODE_CONNECTOR_DisplayPort;
5489 
5490 	/*
5491 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5492 	 * for DP the encoder type can be set by the caller to
5493 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5494 	 */
5495 	if (type == DRM_MODE_CONNECTOR_eDP)
5496 		intel_encoder->type = INTEL_OUTPUT_EDP;
5497 
5498 	/* eDP only on port B and/or C on vlv/chv */
5499 	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5500 		    port != PORT_B && port != PORT_C))
5501 		return false;
5502 
5503 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5504 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5505 			port_name(port));
5506 
5507 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5508 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5509 
5510 	connector->interlace_allowed = true;
5511 	connector->doublescan_allowed = 0;
5512 
5513 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5514 			  edp_panel_vdd_work);
5515 
5516 	intel_connector_attach_encoder(intel_connector, intel_encoder);
5517 	drm_connector_register(connector);
5518 
5519 	if (HAS_DDI(dev))
5520 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5521 	else
5522 		intel_connector->get_hw_state = intel_connector_get_hw_state;
5523 	intel_connector->unregister = intel_dp_connector_unregister;
5524 
5525 	/* Set up the hotplug pin. */
5526 	switch (port) {
5527 	case PORT_A:
5528 		intel_encoder->hpd_pin = HPD_PORT_A;
5529 		break;
5530 	case PORT_B:
5531 		intel_encoder->hpd_pin = HPD_PORT_B;
5532 		break;
5533 	case PORT_C:
5534 		intel_encoder->hpd_pin = HPD_PORT_C;
5535 		break;
5536 	case PORT_D:
5537 		intel_encoder->hpd_pin = HPD_PORT_D;
5538 		break;
5539 	default:
5540 		BUG();
5541 	}
5542 
5543 	if (is_edp(intel_dp)) {
5544 		pps_lock(intel_dp);
5545 		intel_dp_init_panel_power_timestamps(intel_dp);
5546 		if (IS_VALLEYVIEW(dev))
5547 			vlv_initial_power_sequencer_setup(intel_dp);
5548 		else
5549 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5550 		pps_unlock(intel_dp);
5551 	}
5552 
5553 	intel_dp_aux_init(intel_dp, intel_connector);
5554 
5555 	/* init MST on ports that can support it */
5556 	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5557 		if (port == PORT_B || port == PORT_C || port == PORT_D) {
5558 			intel_dp_mst_encoder_init(intel_dig_port,
5559 						  intel_connector->base.base.id);
5560 		}
5561 	}
5562 
5563 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5564 		drm_dp_aux_unregister(&intel_dp->aux);
5565 		if (is_edp(intel_dp)) {
5566 			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5567 			/*
5568 			 * vdd might still be enabled do to the delayed vdd off.
5569 			 * Make sure vdd is actually turned off here.
5570 			 */
5571 			pps_lock(intel_dp);
5572 			edp_panel_vdd_off_sync(intel_dp);
5573 			pps_unlock(intel_dp);
5574 		}
5575 		drm_connector_unregister(connector);
5576 		drm_connector_cleanup(connector);
5577 		return false;
5578 	}
5579 
5580 	intel_dp_add_properties(intel_dp, connector);
5581 
5582 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5583 	 * 0xd.  Failure to do so will result in spurious interrupts being
5584 	 * generated on the port when a cable is not attached.
5585 	 */
5586 	if (IS_G4X(dev) && !IS_GM45(dev)) {
5587 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5588 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5589 	}
5590 
5591 	return true;
5592 }
5593 
5594 void
intel_dp_init(struct drm_device * dev,int output_reg,enum port port)5595 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5596 {
5597 	struct drm_i915_private *dev_priv = dev->dev_private;
5598 	struct intel_digital_port *intel_dig_port;
5599 	struct intel_encoder *intel_encoder;
5600 	struct drm_encoder *encoder;
5601 	struct intel_connector *intel_connector;
5602 
5603 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5604 	if (!intel_dig_port)
5605 		return;
5606 
5607 	intel_connector = intel_connector_alloc();
5608 	if (!intel_connector) {
5609 		kfree(intel_dig_port);
5610 		return;
5611 	}
5612 
5613 	intel_encoder = &intel_dig_port->base;
5614 	encoder = &intel_encoder->base;
5615 
5616 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5617 			 DRM_MODE_ENCODER_TMDS);
5618 
5619 	intel_encoder->compute_config = intel_dp_compute_config;
5620 	intel_encoder->disable = intel_disable_dp;
5621 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
5622 	intel_encoder->get_config = intel_dp_get_config;
5623 	intel_encoder->suspend = intel_dp_encoder_suspend;
5624 	if (IS_CHERRYVIEW(dev)) {
5625 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5626 		intel_encoder->pre_enable = chv_pre_enable_dp;
5627 		intel_encoder->enable = vlv_enable_dp;
5628 		intel_encoder->post_disable = chv_post_disable_dp;
5629 	} else if (IS_VALLEYVIEW(dev)) {
5630 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5631 		intel_encoder->pre_enable = vlv_pre_enable_dp;
5632 		intel_encoder->enable = vlv_enable_dp;
5633 		intel_encoder->post_disable = vlv_post_disable_dp;
5634 	} else {
5635 		intel_encoder->pre_enable = g4x_pre_enable_dp;
5636 		intel_encoder->enable = g4x_enable_dp;
5637 		if (INTEL_INFO(dev)->gen >= 5)
5638 			intel_encoder->post_disable = ilk_post_disable_dp;
5639 	}
5640 
5641 	intel_dig_port->port = port;
5642 	intel_dig_port->dp.output_reg = output_reg;
5643 
5644 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5645 	if (IS_CHERRYVIEW(dev)) {
5646 		if (port == PORT_D)
5647 			intel_encoder->crtc_mask = 1 << 2;
5648 		else
5649 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5650 	} else {
5651 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5652 	}
5653 	intel_encoder->cloneable = 0;
5654 	intel_encoder->hot_plug = intel_dp_hot_plug;
5655 
5656 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5657 	dev_priv->hpd_irq_port[port] = intel_dig_port;
5658 
5659 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5660 		drm_encoder_cleanup(encoder);
5661 		kfree(intel_dig_port);
5662 		kfree(intel_connector);
5663 	}
5664 }
5665 
intel_dp_mst_suspend(struct drm_device * dev)5666 void intel_dp_mst_suspend(struct drm_device *dev)
5667 {
5668 	struct drm_i915_private *dev_priv = dev->dev_private;
5669 	int i;
5670 
5671 	/* disable MST */
5672 	for (i = 0; i < I915_MAX_PORTS; i++) {
5673 		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5674 		if (!intel_dig_port)
5675 			continue;
5676 
5677 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5678 			if (!intel_dig_port->dp.can_mst)
5679 				continue;
5680 			if (intel_dig_port->dp.is_mst)
5681 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5682 		}
5683 	}
5684 }
5685 
intel_dp_mst_resume(struct drm_device * dev)5686 void intel_dp_mst_resume(struct drm_device *dev)
5687 {
5688 	struct drm_i915_private *dev_priv = dev->dev_private;
5689 	int i;
5690 
5691 	for (i = 0; i < I915_MAX_PORTS; i++) {
5692 		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5693 		if (!intel_dig_port)
5694 			continue;
5695 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5696 			int ret;
5697 
5698 			if (!intel_dig_port->dp.can_mst)
5699 				continue;
5700 
5701 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5702 			if (ret != 0) {
5703 				intel_dp_check_mst_status(&intel_dig_port->dp);
5704 			}
5705 		}
5706 	}
5707 }
5708