1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41 
42 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
43 
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
46 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 
50 struct dp_link_dpll {
51 	int clock;
52 	struct dpll dpll;
53 };
54 
55 static const struct dp_link_dpll gen4_dpll[] = {
56 	{ 162000,
57 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 	{ 270000,
59 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61 
62 static const struct dp_link_dpll pch_dpll[] = {
63 	{ 162000,
64 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 	{ 270000,
66 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68 
69 static const struct dp_link_dpll vlv_dpll[] = {
70 	{ 162000,
71 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 	{ 270000,
73 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75 
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81 	/*
82 	 * CHV requires to program fractional division for m2.
83 	 * m2 is stored in fixed point format using formula below
84 	 * (m2_int << 22) | m2_fraction
85 	 */
86 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
87 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
89 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
91 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93 
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 				  324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 				  324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99 
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
is_edp(struct intel_dp * intel_dp)107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110 
111 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113 
intel_dp_to_dev(struct intel_dp * intel_dp)114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117 
118 	return intel_dig_port->base.base.dev;
119 }
120 
intel_attached_dp(struct drm_connector * connector)121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125 
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131 				      enum pipe pipe);
132 
intel_dp_unused_lane_mask(int lane_count)133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135 	return ~((1 << lane_count) - 1) & 0xf;
136 }
137 
138 static int
intel_dp_max_link_bw(struct intel_dp * intel_dp)139 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
140 {
141 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142 
143 	switch (max_link_bw) {
144 	case DP_LINK_BW_1_62:
145 	case DP_LINK_BW_2_7:
146 	case DP_LINK_BW_5_4:
147 		break;
148 	default:
149 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 		     max_link_bw);
151 		max_link_bw = DP_LINK_BW_1_62;
152 		break;
153 	}
154 	return max_link_bw;
155 }
156 
intel_dp_max_lane_count(struct intel_dp * intel_dp)157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 	struct drm_device *dev = intel_dig_port->base.base.dev;
161 	u8 source_max, sink_max;
162 
163 	source_max = 4;
164 	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 		source_max = 2;
167 
168 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169 
170 	return min(source_max, sink_max);
171 }
172 
173 /*
174  * The units on the numbers in the next two are... bizarre.  Examples will
175  * make it clearer; this one parallels an example in the eDP spec.
176  *
177  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178  *
179  *     270000 * 1 * 8 / 10 == 216000
180  *
181  * The actual data capacity of that configuration is 2.16Gbit/s, so the
182  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
183  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184  * 119000.  At 18bpp that's 2142000 kilobits per second.
185  *
186  * Thus the strange-looking division by 10 in intel_dp_link_required, to
187  * get the result in decakilobits instead of kilobits.
188  */
189 
190 static int
intel_dp_link_required(int pixel_clock,int bpp)191 intel_dp_link_required(int pixel_clock, int bpp)
192 {
193 	return (pixel_clock * bpp + 9) / 10;
194 }
195 
196 static int
intel_dp_max_data_rate(int max_link_clock,int max_lanes)197 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198 {
199 	return (max_link_clock * max_lanes * 8) / 10;
200 }
201 
202 static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)203 intel_dp_mode_valid(struct drm_connector *connector,
204 		    struct drm_display_mode *mode)
205 {
206 	struct intel_dp *intel_dp = intel_attached_dp(connector);
207 	struct intel_connector *intel_connector = to_intel_connector(connector);
208 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
209 	int target_clock = mode->clock;
210 	int max_rate, mode_rate, max_lanes, max_link_clock;
211 
212 	if (is_edp(intel_dp) && fixed_mode) {
213 		if (mode->hdisplay > fixed_mode->hdisplay)
214 			return MODE_PANEL;
215 
216 		if (mode->vdisplay > fixed_mode->vdisplay)
217 			return MODE_PANEL;
218 
219 		target_clock = fixed_mode->clock;
220 	}
221 
222 	max_link_clock = intel_dp_max_link_rate(intel_dp);
223 	max_lanes = intel_dp_max_lane_count(intel_dp);
224 
225 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 	mode_rate = intel_dp_link_required(target_clock, 18);
227 
228 	if (mode_rate > max_rate)
229 		return MODE_CLOCK_HIGH;
230 
231 	if (mode->clock < 10000)
232 		return MODE_CLOCK_LOW;
233 
234 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 		return MODE_H_ILLEGAL;
236 
237 	return MODE_OK;
238 }
239 
intel_dp_pack_aux(const uint8_t * src,int src_bytes)240 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
241 {
242 	int	i;
243 	uint32_t v = 0;
244 
245 	if (src_bytes > 4)
246 		src_bytes = 4;
247 	for (i = 0; i < src_bytes; i++)
248 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 	return v;
250 }
251 
intel_dp_unpack_aux(uint32_t src,uint8_t * dst,int dst_bytes)252 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 {
254 	int i;
255 	if (dst_bytes > 4)
256 		dst_bytes = 4;
257 	for (i = 0; i < dst_bytes; i++)
258 		dst[i] = src >> ((3-i) * 8);
259 }
260 
261 static void
262 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
263 				    struct intel_dp *intel_dp);
264 static void
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
266 					      struct intel_dp *intel_dp);
267 
pps_lock(struct intel_dp * intel_dp)268 static void pps_lock(struct intel_dp *intel_dp)
269 {
270 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 	struct intel_encoder *encoder = &intel_dig_port->base;
272 	struct drm_device *dev = encoder->base.dev;
273 	struct drm_i915_private *dev_priv = dev->dev_private;
274 	enum intel_display_power_domain power_domain;
275 
276 	/*
277 	 * See vlv_power_sequencer_reset() why we need
278 	 * a power domain reference here.
279 	 */
280 	power_domain = intel_display_port_aux_power_domain(encoder);
281 	intel_display_power_get(dev_priv, power_domain);
282 
283 	mutex_lock(&dev_priv->pps_mutex);
284 }
285 
pps_unlock(struct intel_dp * intel_dp)286 static void pps_unlock(struct intel_dp *intel_dp)
287 {
288 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 	struct intel_encoder *encoder = &intel_dig_port->base;
290 	struct drm_device *dev = encoder->base.dev;
291 	struct drm_i915_private *dev_priv = dev->dev_private;
292 	enum intel_display_power_domain power_domain;
293 
294 	mutex_unlock(&dev_priv->pps_mutex);
295 
296 	power_domain = intel_display_port_aux_power_domain(encoder);
297 	intel_display_power_put(dev_priv, power_domain);
298 }
299 
300 static void
vlv_power_sequencer_kick(struct intel_dp * intel_dp)301 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302 {
303 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 	struct drm_device *dev = intel_dig_port->base.base.dev;
305 	struct drm_i915_private *dev_priv = dev->dev_private;
306 	enum pipe pipe = intel_dp->pps_pipe;
307 	bool pll_enabled, release_cl_override = false;
308 	enum dpio_phy phy = DPIO_PHY(pipe);
309 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
310 	uint32_t DP;
311 
312 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 		 pipe_name(pipe), port_name(intel_dig_port->port)))
315 		return;
316 
317 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 		      pipe_name(pipe), port_name(intel_dig_port->port));
319 
320 	/* Preserve the BIOS-computed detected bit. This is
321 	 * supposed to be read-only.
322 	 */
323 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 	DP |= DP_PORT_WIDTH(1);
326 	DP |= DP_LINK_TRAIN_PAT_1;
327 
328 	if (IS_CHERRYVIEW(dev))
329 		DP |= DP_PIPE_SELECT_CHV(pipe);
330 	else if (pipe == PIPE_B)
331 		DP |= DP_PIPEB_SELECT;
332 
333 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334 
335 	/*
336 	 * The DPLL for the pipe must be enabled for this to work.
337 	 * So enable temporarily it if it's not already enabled.
338 	 */
339 	if (!pll_enabled) {
340 		release_cl_override = IS_CHERRYVIEW(dev) &&
341 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
342 
343 		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
345 	}
346 
347 	/*
348 	 * Similar magic as in intel_dp_enable_port().
349 	 * We _must_ do this port enable + disable trick
350 	 * to make this power seqeuencer lock onto the port.
351 	 * Otherwise even VDD force bit won't work.
352 	 */
353 	I915_WRITE(intel_dp->output_reg, DP);
354 	POSTING_READ(intel_dp->output_reg);
355 
356 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 	POSTING_READ(intel_dp->output_reg);
358 
359 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 	POSTING_READ(intel_dp->output_reg);
361 
362 	if (!pll_enabled) {
363 		vlv_force_pll_off(dev, pipe);
364 
365 		if (release_cl_override)
366 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 	}
368 }
369 
370 static enum pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 {
373 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 	struct drm_device *dev = intel_dig_port->base.base.dev;
375 	struct drm_i915_private *dev_priv = dev->dev_private;
376 	struct intel_encoder *encoder;
377 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378 	enum pipe pipe;
379 
380 	lockdep_assert_held(&dev_priv->pps_mutex);
381 
382 	/* We should never land here with regular DP ports */
383 	WARN_ON(!is_edp(intel_dp));
384 
385 	if (intel_dp->pps_pipe != INVALID_PIPE)
386 		return intel_dp->pps_pipe;
387 
388 	/*
389 	 * We don't have power sequencer currently.
390 	 * Pick one that's not used by other ports.
391 	 */
392 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 			    base.head) {
394 		struct intel_dp *tmp;
395 
396 		if (encoder->type != INTEL_OUTPUT_EDP)
397 			continue;
398 
399 		tmp = enc_to_intel_dp(&encoder->base);
400 
401 		if (tmp->pps_pipe != INVALID_PIPE)
402 			pipes &= ~(1 << tmp->pps_pipe);
403 	}
404 
405 	/*
406 	 * Didn't find one. This should not happen since there
407 	 * are two power sequencers and up to two eDP ports.
408 	 */
409 	if (WARN_ON(pipes == 0))
410 		pipe = PIPE_A;
411 	else
412 		pipe = ffs(pipes) - 1;
413 
414 	vlv_steal_power_sequencer(dev, pipe);
415 	intel_dp->pps_pipe = pipe;
416 
417 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 		      pipe_name(intel_dp->pps_pipe),
419 		      port_name(intel_dig_port->port));
420 
421 	/* init power sequencer on this pipe and port */
422 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
424 
425 	/*
426 	 * Even vdd force doesn't work until we've made
427 	 * the power sequencer lock in on the port.
428 	 */
429 	vlv_power_sequencer_kick(intel_dp);
430 
431 	return intel_dp->pps_pipe;
432 }
433 
434 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 			       enum pipe pipe);
436 
vlv_pipe_has_pp_on(struct drm_i915_private * dev_priv,enum pipe pipe)437 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 			       enum pipe pipe)
439 {
440 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441 }
442 
vlv_pipe_has_vdd_on(struct drm_i915_private * dev_priv,enum pipe pipe)443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 				enum pipe pipe)
445 {
446 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447 }
448 
vlv_pipe_any(struct drm_i915_private * dev_priv,enum pipe pipe)449 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 			 enum pipe pipe)
451 {
452 	return true;
453 }
454 
455 static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private * dev_priv,enum port port,vlv_pipe_check pipe_check)456 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 		     enum port port,
458 		     vlv_pipe_check pipe_check)
459 {
460 	enum pipe pipe;
461 
462 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 			PANEL_PORT_SELECT_MASK;
465 
466 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 			continue;
468 
469 		if (!pipe_check(dev_priv, pipe))
470 			continue;
471 
472 		return pipe;
473 	}
474 
475 	return INVALID_PIPE;
476 }
477 
478 static void
vlv_initial_power_sequencer_setup(struct intel_dp * intel_dp)479 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480 {
481 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 	struct drm_device *dev = intel_dig_port->base.base.dev;
483 	struct drm_i915_private *dev_priv = dev->dev_private;
484 	enum port port = intel_dig_port->port;
485 
486 	lockdep_assert_held(&dev_priv->pps_mutex);
487 
488 	/* try to find a pipe with this port selected */
489 	/* first pick one where the panel is on */
490 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 						  vlv_pipe_has_pp_on);
492 	/* didn't find one? pick one where vdd is on */
493 	if (intel_dp->pps_pipe == INVALID_PIPE)
494 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 							  vlv_pipe_has_vdd_on);
496 	/* didn't find one? pick one with just the correct port */
497 	if (intel_dp->pps_pipe == INVALID_PIPE)
498 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 							  vlv_pipe_any);
500 
501 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 	if (intel_dp->pps_pipe == INVALID_PIPE) {
503 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 			      port_name(port));
505 		return;
506 	}
507 
508 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 		      port_name(port), pipe_name(intel_dp->pps_pipe));
510 
511 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513 }
514 
vlv_power_sequencer_reset(struct drm_i915_private * dev_priv)515 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516 {
517 	struct drm_device *dev = dev_priv->dev;
518 	struct intel_encoder *encoder;
519 
520 	if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 		return;
522 
523 	/*
524 	 * We can't grab pps_mutex here due to deadlock with power_domain
525 	 * mutex when power_domain functions are called while holding pps_mutex.
526 	 * That also means that in order to use pps_pipe the code needs to
527 	 * hold both a power domain reference and pps_mutex, and the power domain
528 	 * reference get/put must be done while _not_ holding pps_mutex.
529 	 * pps_{lock,unlock}() do these steps in the correct order, so one
530 	 * should use them always.
531 	 */
532 
533 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 		struct intel_dp *intel_dp;
535 
536 		if (encoder->type != INTEL_OUTPUT_EDP)
537 			continue;
538 
539 		intel_dp = enc_to_intel_dp(&encoder->base);
540 		intel_dp->pps_pipe = INVALID_PIPE;
541 	}
542 }
543 
_pp_ctrl_reg(struct intel_dp * intel_dp)544 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545 {
546 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
547 
548 	if (IS_BROXTON(dev))
549 		return BXT_PP_CONTROL(0);
550 	else if (HAS_PCH_SPLIT(dev))
551 		return PCH_PP_CONTROL;
552 	else
553 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554 }
555 
_pp_stat_reg(struct intel_dp * intel_dp)556 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557 {
558 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
559 
560 	if (IS_BROXTON(dev))
561 		return BXT_PP_STATUS(0);
562 	else if (HAS_PCH_SPLIT(dev))
563 		return PCH_PP_STATUS;
564 	else
565 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566 }
567 
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569    This function only applicable when panel PM state is not to be tracked */
edp_notify_handler(struct notifier_block * this,unsigned long code,void * unused)570 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 			      void *unused)
572 {
573 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 						 edp_notifier);
575 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 	struct drm_i915_private *dev_priv = dev->dev_private;
577 
578 	if (!is_edp(intel_dp) || code != SYS_RESTART)
579 		return 0;
580 
581 	pps_lock(intel_dp);
582 
583 	if (IS_VALLEYVIEW(dev)) {
584 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 		u32 pp_ctrl_reg, pp_div_reg;
586 		u32 pp_div;
587 
588 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
590 		pp_div = I915_READ(pp_div_reg);
591 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
592 
593 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
594 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 		msleep(intel_dp->panel_power_cycle_delay);
597 	}
598 
599 	pps_unlock(intel_dp);
600 
601 	return 0;
602 }
603 
edp_have_panel_power(struct intel_dp * intel_dp)604 static bool edp_have_panel_power(struct intel_dp *intel_dp)
605 {
606 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
607 	struct drm_i915_private *dev_priv = dev->dev_private;
608 
609 	lockdep_assert_held(&dev_priv->pps_mutex);
610 
611 	if (IS_VALLEYVIEW(dev) &&
612 	    intel_dp->pps_pipe == INVALID_PIPE)
613 		return false;
614 
615 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
616 }
617 
edp_have_panel_vdd(struct intel_dp * intel_dp)618 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
619 {
620 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
621 	struct drm_i915_private *dev_priv = dev->dev_private;
622 
623 	lockdep_assert_held(&dev_priv->pps_mutex);
624 
625 	if (IS_VALLEYVIEW(dev) &&
626 	    intel_dp->pps_pipe == INVALID_PIPE)
627 		return false;
628 
629 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
630 }
631 
632 static void
intel_dp_check_edp(struct intel_dp * intel_dp)633 intel_dp_check_edp(struct intel_dp *intel_dp)
634 {
635 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
636 	struct drm_i915_private *dev_priv = dev->dev_private;
637 
638 	if (!is_edp(intel_dp))
639 		return;
640 
641 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
642 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644 			      I915_READ(_pp_stat_reg(intel_dp)),
645 			      I915_READ(_pp_ctrl_reg(intel_dp)));
646 	}
647 }
648 
649 static uint32_t
intel_dp_aux_wait_done(struct intel_dp * intel_dp,bool has_aux_irq)650 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651 {
652 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 	struct drm_device *dev = intel_dig_port->base.base.dev;
654 	struct drm_i915_private *dev_priv = dev->dev_private;
655 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656 	uint32_t status;
657 	bool done;
658 
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
660 	if (has_aux_irq)
661 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
662 					  msecs_to_jiffies_timeout(10));
663 	else
664 		done = wait_for_atomic(C, 10) == 0;
665 	if (!done)
666 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 			  has_aux_irq);
668 #undef C
669 
670 	return status;
671 }
672 
i9xx_get_aux_clock_divider(struct intel_dp * intel_dp,int index)673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674 {
675 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 	struct drm_device *dev = intel_dig_port->base.base.dev;
677 
678 	/*
679 	 * The clock divider is based off the hrawclk, and would like to run at
680 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
681 	 */
682 	return index ? 0 : intel_hrawclk(dev) / 2;
683 }
684 
ilk_get_aux_clock_divider(struct intel_dp * intel_dp,int index)685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686 {
687 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 	struct drm_device *dev = intel_dig_port->base.base.dev;
689 	struct drm_i915_private *dev_priv = dev->dev_private;
690 
691 	if (index)
692 		return 0;
693 
694 	if (intel_dig_port->port == PORT_A) {
695 		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696 
697 	} else {
698 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 	}
700 }
701 
hsw_get_aux_clock_divider(struct intel_dp * intel_dp,int index)702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 {
704 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 	struct drm_device *dev = intel_dig_port->base.base.dev;
706 	struct drm_i915_private *dev_priv = dev->dev_private;
707 
708 	if (intel_dig_port->port == PORT_A) {
709 		if (index)
710 			return 0;
711 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 		/* Workaround for non-ULT HSW */
714 		switch (index) {
715 		case 0: return 63;
716 		case 1: return 72;
717 		default: return 0;
718 		}
719 	} else  {
720 		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 	}
722 }
723 
vlv_get_aux_clock_divider(struct intel_dp * intel_dp,int index)724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726 	return index ? 0 : 100;
727 }
728 
skl_get_aux_clock_divider(struct intel_dp * intel_dp,int index)729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 {
731 	/*
732 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 	 * derive the clock from CDCLK automatically). We still implement the
734 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 	 */
736 	return index ? 0 : 1;
737 }
738 
i9xx_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t aux_clock_divider)739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 				      bool has_aux_irq,
741 				      int send_bytes,
742 				      uint32_t aux_clock_divider)
743 {
744 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 	struct drm_device *dev = intel_dig_port->base.base.dev;
746 	uint32_t precharge, timeout;
747 
748 	if (IS_GEN6(dev))
749 		precharge = 3;
750 	else
751 		precharge = 5;
752 
753 	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 	else
756 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757 
758 	return DP_AUX_CH_CTL_SEND_BUSY |
759 	       DP_AUX_CH_CTL_DONE |
760 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
762 	       timeout |
763 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
764 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
767 }
768 
skl_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t unused)769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 				      bool has_aux_irq,
771 				      int send_bytes,
772 				      uint32_t unused)
773 {
774 	return DP_AUX_CH_CTL_SEND_BUSY |
775 	       DP_AUX_CH_CTL_DONE |
776 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
779 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
780 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782 }
783 
784 static int
intel_dp_aux_ch(struct intel_dp * intel_dp,const uint8_t * send,int send_bytes,uint8_t * recv,int recv_size)785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786 		const uint8_t *send, int send_bytes,
787 		uint8_t *recv, int recv_size)
788 {
789 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 	struct drm_device *dev = intel_dig_port->base.base.dev;
791 	struct drm_i915_private *dev_priv = dev->dev_private;
792 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 	uint32_t ch_data = ch_ctl + 4;
794 	uint32_t aux_clock_divider;
795 	int i, ret, recv_bytes;
796 	uint32_t status;
797 	int try, clock = 0;
798 	bool has_aux_irq = HAS_AUX_IRQ(dev);
799 	bool vdd;
800 
801 	pps_lock(intel_dp);
802 
803 	/*
804 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 	 * In such cases we want to leave VDD enabled and it's up to upper layers
806 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 	 * ourselves.
808 	 */
809 	vdd = edp_panel_vdd_on(intel_dp);
810 
811 	/* dp aux is extremely sensitive to irq latency, hence request the
812 	 * lowest possible wakeup latency and so prevent the cpu from going into
813 	 * deep sleep states.
814 	 */
815 	pm_qos_update_request(&dev_priv->pm_qos, 0);
816 
817 	intel_dp_check_edp(intel_dp);
818 
819 	/* Try to wait for any previous AUX channel activity */
820 	for (try = 0; try < 3; try++) {
821 		status = I915_READ_NOTRACE(ch_ctl);
822 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 			break;
824 		msleep(1);
825 	}
826 
827 	if (try == 3) {
828 		static u32 last_status = -1;
829 		const u32 status = I915_READ(ch_ctl);
830 
831 		if (status != last_status) {
832 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 			     status);
834 			last_status = status;
835 		}
836 
837 		ret = -EBUSY;
838 		goto out;
839 	}
840 
841 	/* Only 5 data registers! */
842 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 		ret = -E2BIG;
844 		goto out;
845 	}
846 
847 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
848 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 							  has_aux_irq,
850 							  send_bytes,
851 							  aux_clock_divider);
852 
853 		/* Must try at least 3 times according to DP spec */
854 		for (try = 0; try < 5; try++) {
855 			/* Load the send data into the aux channel data registers */
856 			for (i = 0; i < send_bytes; i += 4)
857 				I915_WRITE(ch_data + i,
858 					   intel_dp_pack_aux(send + i,
859 							     send_bytes - i));
860 
861 			/* Send the command and wait for it to complete */
862 			I915_WRITE(ch_ctl, send_ctl);
863 
864 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865 
866 			/* Clear done status and any errors */
867 			I915_WRITE(ch_ctl,
868 				   status |
869 				   DP_AUX_CH_CTL_DONE |
870 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
872 
873 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
874 				continue;
875 
876 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 			 *   400us delay required for errors and timeouts
878 			 *   Timeout errors from the HW already meet this
879 			 *   requirement so skip to next iteration
880 			 */
881 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 				usleep_range(400, 500);
883 				continue;
884 			}
885 			if (status & DP_AUX_CH_CTL_DONE)
886 				goto done;
887 		}
888 	}
889 
890 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
892 		ret = -EBUSY;
893 		goto out;
894 	}
895 
896 done:
897 	/* Check for timeout or receive error.
898 	 * Timeouts occur when the sink is not connected
899 	 */
900 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
902 		ret = -EIO;
903 		goto out;
904 	}
905 
906 	/* Timeouts occur when the device isn't connected, so they're
907 	 * "normal" -- don't fill the kernel log with these */
908 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
910 		ret = -ETIMEDOUT;
911 		goto out;
912 	}
913 
914 	/* Unload any bytes sent back from the other side */
915 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
917 	if (recv_bytes > recv_size)
918 		recv_bytes = recv_size;
919 
920 	for (i = 0; i < recv_bytes; i += 4)
921 		intel_dp_unpack_aux(I915_READ(ch_data + i),
922 				    recv + i, recv_bytes - i);
923 
924 	ret = recv_bytes;
925 out:
926 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
927 
928 	if (vdd)
929 		edp_panel_vdd_off(intel_dp, false);
930 
931 	pps_unlock(intel_dp);
932 
933 	return ret;
934 }
935 
936 #define BARE_ADDRESS_SIZE	3
937 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
938 static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)939 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
940 {
941 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
942 	uint8_t txbuf[20], rxbuf[20];
943 	size_t txsize, rxsize;
944 	int ret;
945 
946 	txbuf[0] = (msg->request << 4) |
947 		((msg->address >> 16) & 0xf);
948 	txbuf[1] = (msg->address >> 8) & 0xff;
949 	txbuf[2] = msg->address & 0xff;
950 	txbuf[3] = msg->size - 1;
951 
952 	switch (msg->request & ~DP_AUX_I2C_MOT) {
953 	case DP_AUX_NATIVE_WRITE:
954 	case DP_AUX_I2C_WRITE:
955 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
956 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
957 		rxsize = 2; /* 0 or 1 data bytes */
958 
959 		if (WARN_ON(txsize > 20))
960 			return -E2BIG;
961 
962 		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
963 
964 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
965 		if (ret > 0) {
966 			msg->reply = rxbuf[0] >> 4;
967 
968 			if (ret > 1) {
969 				/* Number of bytes written in a short write. */
970 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
971 			} else {
972 				/* Return payload size. */
973 				ret = msg->size;
974 			}
975 		}
976 		break;
977 
978 	case DP_AUX_NATIVE_READ:
979 	case DP_AUX_I2C_READ:
980 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
981 		rxsize = msg->size + 1;
982 
983 		if (WARN_ON(rxsize > 20))
984 			return -E2BIG;
985 
986 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987 		if (ret > 0) {
988 			msg->reply = rxbuf[0] >> 4;
989 			/*
990 			 * Assume happy day, and copy the data. The caller is
991 			 * expected to check msg->reply before touching it.
992 			 *
993 			 * Return payload size.
994 			 */
995 			ret--;
996 			memcpy(msg->buffer, rxbuf + 1, ret);
997 		}
998 		break;
999 
1000 	default:
1001 		ret = -EINVAL;
1002 		break;
1003 	}
1004 
1005 	return ret;
1006 }
1007 
1008 static void
intel_dp_aux_init(struct intel_dp * intel_dp,struct intel_connector * connector)1009 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1010 {
1011 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1012 	struct drm_i915_private *dev_priv = dev->dev_private;
1013 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1014 	enum port port = intel_dig_port->port;
1015 	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1016 	const char *name = NULL;
1017 	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1018 	int ret;
1019 
1020 	/* On SKL we don't have Aux for port E so we rely on VBT to set
1021 	 * a proper alternate aux channel.
1022 	 */
1023 	if (IS_SKYLAKE(dev) && port == PORT_E) {
1024 		switch (info->alternate_aux_channel) {
1025 		case DP_AUX_B:
1026 			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1027 			break;
1028 		case DP_AUX_C:
1029 			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1030 			break;
1031 		case DP_AUX_D:
1032 			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1033 			break;
1034 		case DP_AUX_A:
1035 		default:
1036 			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1037 		}
1038 	}
1039 
1040 	switch (port) {
1041 	case PORT_A:
1042 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1043 		name = "DPDDC-A";
1044 		break;
1045 	case PORT_B:
1046 		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1047 		name = "DPDDC-B";
1048 		break;
1049 	case PORT_C:
1050 		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1051 		name = "DPDDC-C";
1052 		break;
1053 	case PORT_D:
1054 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1055 		name = "DPDDC-D";
1056 		break;
1057 	case PORT_E:
1058 		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1059 		name = "DPDDC-E";
1060 		break;
1061 	default:
1062 		BUG();
1063 	}
1064 
1065 	/*
1066 	 * The AUX_CTL register is usually DP_CTL + 0x10.
1067 	 *
1068 	 * On Haswell and Broadwell though:
1069 	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1070 	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1071 	 *
1072 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1073 	 */
1074 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1075 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1076 
1077 	intel_dp->aux.name = name;
1078 	intel_dp->aux.dev = dev->dev;
1079 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1080 
1081 	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1082 		      connector->base.kdev->kobj.name);
1083 
1084 	ret = drm_dp_aux_register(&intel_dp->aux);
1085 	if (ret < 0) {
1086 		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1087 			  name, ret);
1088 		return;
1089 	}
1090 
1091 	ret = sysfs_create_link(&connector->base.kdev->kobj,
1092 				&intel_dp->aux.ddc.dev.kobj,
1093 				intel_dp->aux.ddc.dev.kobj.name);
1094 	if (ret < 0) {
1095 		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1096 		drm_dp_aux_unregister(&intel_dp->aux);
1097 	}
1098 }
1099 
1100 static void
intel_dp_connector_unregister(struct intel_connector * intel_connector)1101 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1102 {
1103 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1104 
1105 	if (!intel_connector->mst_port)
1106 		sysfs_remove_link(&intel_connector->base.kdev->kobj,
1107 				  intel_dp->aux.ddc.dev.kobj.name);
1108 	intel_connector_unregister(intel_connector);
1109 }
1110 
1111 static void
skl_edp_set_pll_config(struct intel_crtc_state * pipe_config)1112 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1113 {
1114 	u32 ctrl1;
1115 
1116 	memset(&pipe_config->dpll_hw_state, 0,
1117 	       sizeof(pipe_config->dpll_hw_state));
1118 
1119 	pipe_config->ddi_pll_sel = SKL_DPLL0;
1120 	pipe_config->dpll_hw_state.cfgcr1 = 0;
1121 	pipe_config->dpll_hw_state.cfgcr2 = 0;
1122 
1123 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1124 	switch (pipe_config->port_clock / 2) {
1125 	case 81000:
1126 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1127 					      SKL_DPLL0);
1128 		break;
1129 	case 135000:
1130 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1131 					      SKL_DPLL0);
1132 		break;
1133 	case 270000:
1134 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1135 					      SKL_DPLL0);
1136 		break;
1137 	case 162000:
1138 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1139 					      SKL_DPLL0);
1140 		break;
1141 	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1142 	results in CDCLK change. Need to handle the change of CDCLK by
1143 	disabling pipes and re-enabling them */
1144 	case 108000:
1145 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1146 					      SKL_DPLL0);
1147 		break;
1148 	case 216000:
1149 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1150 					      SKL_DPLL0);
1151 		break;
1152 
1153 	}
1154 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1155 }
1156 
1157 void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state * pipe_config)1158 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1159 {
1160 	memset(&pipe_config->dpll_hw_state, 0,
1161 	       sizeof(pipe_config->dpll_hw_state));
1162 
1163 	switch (pipe_config->port_clock / 2) {
1164 	case 81000:
1165 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1166 		break;
1167 	case 135000:
1168 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1169 		break;
1170 	case 270000:
1171 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1172 		break;
1173 	}
1174 }
1175 
1176 static int
intel_dp_sink_rates(struct intel_dp * intel_dp,const int ** sink_rates)1177 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1178 {
1179 	if (intel_dp->num_sink_rates) {
1180 		*sink_rates = intel_dp->sink_rates;
1181 		return intel_dp->num_sink_rates;
1182 	}
1183 
1184 	*sink_rates = default_rates;
1185 
1186 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1187 }
1188 
intel_dp_source_supports_hbr2(struct drm_device * dev)1189 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1190 {
1191 	/* WaDisableHBR2:skl */
1192 	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1193 		return false;
1194 
1195 	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1196 	    (INTEL_INFO(dev)->gen >= 9))
1197 		return true;
1198 	else
1199 		return false;
1200 }
1201 
1202 static int
intel_dp_source_rates(struct drm_device * dev,const int ** source_rates)1203 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1204 {
1205 	int size;
1206 
1207 	if (IS_BROXTON(dev)) {
1208 		*source_rates = bxt_rates;
1209 		size = ARRAY_SIZE(bxt_rates);
1210 	} else if (IS_SKYLAKE(dev)) {
1211 		*source_rates = skl_rates;
1212 		size = ARRAY_SIZE(skl_rates);
1213 	} else {
1214 		*source_rates = default_rates;
1215 		size = ARRAY_SIZE(default_rates);
1216 	}
1217 
1218 	/* This depends on the fact that 5.4 is last value in the array */
1219 	if (!intel_dp_source_supports_hbr2(dev))
1220 		size--;
1221 
1222 	return size;
1223 }
1224 
1225 static void
intel_dp_set_clock(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1226 intel_dp_set_clock(struct intel_encoder *encoder,
1227 		   struct intel_crtc_state *pipe_config)
1228 {
1229 	struct drm_device *dev = encoder->base.dev;
1230 	const struct dp_link_dpll *divisor = NULL;
1231 	int i, count = 0;
1232 
1233 	if (IS_G4X(dev)) {
1234 		divisor = gen4_dpll;
1235 		count = ARRAY_SIZE(gen4_dpll);
1236 	} else if (HAS_PCH_SPLIT(dev)) {
1237 		divisor = pch_dpll;
1238 		count = ARRAY_SIZE(pch_dpll);
1239 	} else if (IS_CHERRYVIEW(dev)) {
1240 		divisor = chv_dpll;
1241 		count = ARRAY_SIZE(chv_dpll);
1242 	} else if (IS_VALLEYVIEW(dev)) {
1243 		divisor = vlv_dpll;
1244 		count = ARRAY_SIZE(vlv_dpll);
1245 	}
1246 
1247 	if (divisor && count) {
1248 		for (i = 0; i < count; i++) {
1249 			if (pipe_config->port_clock == divisor[i].clock) {
1250 				pipe_config->dpll = divisor[i].dpll;
1251 				pipe_config->clock_set = true;
1252 				break;
1253 			}
1254 		}
1255 	}
1256 }
1257 
intersect_rates(const int * source_rates,int source_len,const int * sink_rates,int sink_len,int * common_rates)1258 static int intersect_rates(const int *source_rates, int source_len,
1259 			   const int *sink_rates, int sink_len,
1260 			   int *common_rates)
1261 {
1262 	int i = 0, j = 0, k = 0;
1263 
1264 	while (i < source_len && j < sink_len) {
1265 		if (source_rates[i] == sink_rates[j]) {
1266 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1267 				return k;
1268 			common_rates[k] = source_rates[i];
1269 			++k;
1270 			++i;
1271 			++j;
1272 		} else if (source_rates[i] < sink_rates[j]) {
1273 			++i;
1274 		} else {
1275 			++j;
1276 		}
1277 	}
1278 	return k;
1279 }
1280 
intel_dp_common_rates(struct intel_dp * intel_dp,int * common_rates)1281 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1282 				 int *common_rates)
1283 {
1284 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1285 	const int *source_rates, *sink_rates;
1286 	int source_len, sink_len;
1287 
1288 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1289 	source_len = intel_dp_source_rates(dev, &source_rates);
1290 
1291 	return intersect_rates(source_rates, source_len,
1292 			       sink_rates, sink_len,
1293 			       common_rates);
1294 }
1295 
snprintf_int_array(char * str,size_t len,const int * array,int nelem)1296 static void snprintf_int_array(char *str, size_t len,
1297 			       const int *array, int nelem)
1298 {
1299 	int i;
1300 
1301 	str[0] = '\0';
1302 
1303 	for (i = 0; i < nelem; i++) {
1304 		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1305 		if (r >= len)
1306 			return;
1307 		str += r;
1308 		len -= r;
1309 	}
1310 }
1311 
intel_dp_print_rates(struct intel_dp * intel_dp)1312 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1313 {
1314 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1315 	const int *source_rates, *sink_rates;
1316 	int source_len, sink_len, common_len;
1317 	int common_rates[DP_MAX_SUPPORTED_RATES];
1318 	char str[128]; /* FIXME: too big for stack? */
1319 
1320 	if ((drm_debug & DRM_UT_KMS) == 0)
1321 		return;
1322 
1323 	source_len = intel_dp_source_rates(dev, &source_rates);
1324 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1325 	DRM_DEBUG_KMS("source rates: %s\n", str);
1326 
1327 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1328 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1329 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1330 
1331 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1332 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1333 	DRM_DEBUG_KMS("common rates: %s\n", str);
1334 }
1335 
rate_to_index(int find,const int * rates)1336 static int rate_to_index(int find, const int *rates)
1337 {
1338 	int i = 0;
1339 
1340 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1341 		if (find == rates[i])
1342 			break;
1343 
1344 	return i;
1345 }
1346 
1347 int
intel_dp_max_link_rate(struct intel_dp * intel_dp)1348 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1349 {
1350 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1351 	int len;
1352 
1353 	len = intel_dp_common_rates(intel_dp, rates);
1354 	if (WARN_ON(len <= 0))
1355 		return 162000;
1356 
1357 	return rates[rate_to_index(0, rates) - 1];
1358 }
1359 
intel_dp_rate_select(struct intel_dp * intel_dp,int rate)1360 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1361 {
1362 	return rate_to_index(rate, intel_dp->sink_rates);
1363 }
1364 
intel_dp_compute_rate(struct intel_dp * intel_dp,int port_clock,uint8_t * link_bw,uint8_t * rate_select)1365 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1366 				  uint8_t *link_bw, uint8_t *rate_select)
1367 {
1368 	if (intel_dp->num_sink_rates) {
1369 		*link_bw = 0;
1370 		*rate_select =
1371 			intel_dp_rate_select(intel_dp, port_clock);
1372 	} else {
1373 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1374 		*rate_select = 0;
1375 	}
1376 }
1377 
1378 bool
intel_dp_compute_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1379 intel_dp_compute_config(struct intel_encoder *encoder,
1380 			struct intel_crtc_state *pipe_config)
1381 {
1382 	struct drm_device *dev = encoder->base.dev;
1383 	struct drm_i915_private *dev_priv = dev->dev_private;
1384 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1385 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1386 	enum port port = dp_to_dig_port(intel_dp)->port;
1387 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1388 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1389 	int lane_count, clock;
1390 	int min_lane_count = 1;
1391 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1392 	/* Conveniently, the link BW constants become indices with a shift...*/
1393 	int min_clock = 0;
1394 	int max_clock;
1395 	int bpp, mode_rate;
1396 	int link_avail, link_clock;
1397 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1398 	int common_len;
1399 	uint8_t link_bw, rate_select;
1400 
1401 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1402 
1403 	/* No common link rates between source and sink */
1404 	WARN_ON(common_len <= 0);
1405 
1406 	max_clock = common_len - 1;
1407 
1408 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1409 		pipe_config->has_pch_encoder = true;
1410 
1411 	pipe_config->has_dp_encoder = true;
1412 	pipe_config->has_drrs = false;
1413 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1414 
1415 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1416 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1417 				       adjusted_mode);
1418 
1419 		if (INTEL_INFO(dev)->gen >= 9) {
1420 			int ret;
1421 			ret = skl_update_scaler_crtc(pipe_config);
1422 			if (ret)
1423 				return ret;
1424 		}
1425 
1426 		if (!HAS_PCH_SPLIT(dev))
1427 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1428 						 intel_connector->panel.fitting_mode);
1429 		else
1430 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1431 						intel_connector->panel.fitting_mode);
1432 	}
1433 
1434 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1435 		return false;
1436 
1437 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1438 		      "max bw %d pixel clock %iKHz\n",
1439 		      max_lane_count, common_rates[max_clock],
1440 		      adjusted_mode->crtc_clock);
1441 
1442 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1443 	 * bpc in between. */
1444 	bpp = pipe_config->pipe_bpp;
1445 	if (is_edp(intel_dp)) {
1446 
1447 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1448 		if (intel_connector->base.display_info.bpc == 0 &&
1449 			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1450 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1451 				      dev_priv->vbt.edp_bpp);
1452 			bpp = dev_priv->vbt.edp_bpp;
1453 		}
1454 
1455 		/*
1456 		 * Use the maximum clock and number of lanes the eDP panel
1457 		 * advertizes being capable of. The panels are generally
1458 		 * designed to support only a single clock and lane
1459 		 * configuration, and typically these values correspond to the
1460 		 * native resolution of the panel.
1461 		 */
1462 		min_lane_count = max_lane_count;
1463 		min_clock = max_clock;
1464 	}
1465 
1466 	for (; bpp >= 6*3; bpp -= 2*3) {
1467 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1468 						   bpp);
1469 
1470 		for (clock = min_clock; clock <= max_clock; clock++) {
1471 			for (lane_count = min_lane_count;
1472 				lane_count <= max_lane_count;
1473 				lane_count <<= 1) {
1474 
1475 				link_clock = common_rates[clock];
1476 				link_avail = intel_dp_max_data_rate(link_clock,
1477 								    lane_count);
1478 
1479 				if (mode_rate <= link_avail) {
1480 					goto found;
1481 				}
1482 			}
1483 		}
1484 	}
1485 
1486 	return false;
1487 
1488 found:
1489 	if (intel_dp->color_range_auto) {
1490 		/*
1491 		 * See:
1492 		 * CEA-861-E - 5.1 Default Encoding Parameters
1493 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1494 		 */
1495 		pipe_config->limited_color_range =
1496 			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1497 	} else {
1498 		pipe_config->limited_color_range =
1499 			intel_dp->limited_color_range;
1500 	}
1501 
1502 	pipe_config->lane_count = lane_count;
1503 
1504 	pipe_config->pipe_bpp = bpp;
1505 	pipe_config->port_clock = common_rates[clock];
1506 
1507 	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1508 			      &link_bw, &rate_select);
1509 
1510 	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1511 		      link_bw, rate_select, pipe_config->lane_count,
1512 		      pipe_config->port_clock, bpp);
1513 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1514 		      mode_rate, link_avail);
1515 
1516 	intel_link_compute_m_n(bpp, lane_count,
1517 			       adjusted_mode->crtc_clock,
1518 			       pipe_config->port_clock,
1519 			       &pipe_config->dp_m_n);
1520 
1521 	if (intel_connector->panel.downclock_mode != NULL &&
1522 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1523 			pipe_config->has_drrs = true;
1524 			intel_link_compute_m_n(bpp, lane_count,
1525 				intel_connector->panel.downclock_mode->clock,
1526 				pipe_config->port_clock,
1527 				&pipe_config->dp_m2_n2);
1528 	}
1529 
1530 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1531 		skl_edp_set_pll_config(pipe_config);
1532 	else if (IS_BROXTON(dev))
1533 		/* handled in ddi */;
1534 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1535 		hsw_dp_set_ddi_pll_sel(pipe_config);
1536 	else
1537 		intel_dp_set_clock(encoder, pipe_config);
1538 
1539 	return true;
1540 }
1541 
ironlake_set_pll_cpu_edp(struct intel_dp * intel_dp)1542 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1543 {
1544 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1545 	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1546 	struct drm_device *dev = crtc->base.dev;
1547 	struct drm_i915_private *dev_priv = dev->dev_private;
1548 	u32 dpa_ctl;
1549 
1550 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1551 		      crtc->config->port_clock);
1552 	dpa_ctl = I915_READ(DP_A);
1553 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1554 
1555 	if (crtc->config->port_clock == 162000) {
1556 		/* For a long time we've carried around a ILK-DevA w/a for the
1557 		 * 160MHz clock. If we're really unlucky, it's still required.
1558 		 */
1559 		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1560 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
1561 		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1562 	} else {
1563 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
1564 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1565 	}
1566 
1567 	I915_WRITE(DP_A, dpa_ctl);
1568 
1569 	POSTING_READ(DP_A);
1570 	udelay(500);
1571 }
1572 
intel_dp_set_link_params(struct intel_dp * intel_dp,const struct intel_crtc_state * pipe_config)1573 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1574 			      const struct intel_crtc_state *pipe_config)
1575 {
1576 	intel_dp->link_rate = pipe_config->port_clock;
1577 	intel_dp->lane_count = pipe_config->lane_count;
1578 }
1579 
intel_dp_prepare(struct intel_encoder * encoder)1580 static void intel_dp_prepare(struct intel_encoder *encoder)
1581 {
1582 	struct drm_device *dev = encoder->base.dev;
1583 	struct drm_i915_private *dev_priv = dev->dev_private;
1584 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1585 	enum port port = dp_to_dig_port(intel_dp)->port;
1586 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1587 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1588 
1589 	intel_dp_set_link_params(intel_dp, crtc->config);
1590 
1591 	/*
1592 	 * There are four kinds of DP registers:
1593 	 *
1594 	 * 	IBX PCH
1595 	 * 	SNB CPU
1596 	 *	IVB CPU
1597 	 * 	CPT PCH
1598 	 *
1599 	 * IBX PCH and CPU are the same for almost everything,
1600 	 * except that the CPU DP PLL is configured in this
1601 	 * register
1602 	 *
1603 	 * CPT PCH is quite different, having many bits moved
1604 	 * to the TRANS_DP_CTL register instead. That
1605 	 * configuration happens (oddly) in ironlake_pch_enable
1606 	 */
1607 
1608 	/* Preserve the BIOS-computed detected bit. This is
1609 	 * supposed to be read-only.
1610 	 */
1611 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1612 
1613 	/* Handle DP bits in common between all three register formats */
1614 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1615 	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1616 
1617 	if (crtc->config->has_audio)
1618 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1619 
1620 	/* Split out the IBX/CPU vs CPT settings */
1621 
1622 	if (IS_GEN7(dev) && port == PORT_A) {
1623 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1624 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1625 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1626 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1627 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1628 
1629 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1630 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1631 
1632 		intel_dp->DP |= crtc->pipe << 29;
1633 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1634 		u32 trans_dp;
1635 
1636 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1637 
1638 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1639 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1640 			trans_dp |= TRANS_DP_ENH_FRAMING;
1641 		else
1642 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1643 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1644 	} else {
1645 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1646 		    crtc->config->limited_color_range)
1647 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1648 
1649 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1650 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1651 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1652 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1653 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1654 
1655 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1656 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1657 
1658 		if (IS_CHERRYVIEW(dev))
1659 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1660 		else if (crtc->pipe == PIPE_B)
1661 			intel_dp->DP |= DP_PIPEB_SELECT;
1662 	}
1663 }
1664 
1665 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1666 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1667 
1668 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1669 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1670 
1671 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1672 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1673 
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)1674 static void wait_panel_status(struct intel_dp *intel_dp,
1675 				       u32 mask,
1676 				       u32 value)
1677 {
1678 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1679 	struct drm_i915_private *dev_priv = dev->dev_private;
1680 	u32 pp_stat_reg, pp_ctrl_reg;
1681 
1682 	lockdep_assert_held(&dev_priv->pps_mutex);
1683 
1684 	pp_stat_reg = _pp_stat_reg(intel_dp);
1685 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1686 
1687 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1688 			mask, value,
1689 			I915_READ(pp_stat_reg),
1690 			I915_READ(pp_ctrl_reg));
1691 
1692 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1693 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1694 				I915_READ(pp_stat_reg),
1695 				I915_READ(pp_ctrl_reg));
1696 	}
1697 
1698 	DRM_DEBUG_KMS("Wait complete\n");
1699 }
1700 
wait_panel_on(struct intel_dp * intel_dp)1701 static void wait_panel_on(struct intel_dp *intel_dp)
1702 {
1703 	DRM_DEBUG_KMS("Wait for panel power on\n");
1704 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1705 }
1706 
wait_panel_off(struct intel_dp * intel_dp)1707 static void wait_panel_off(struct intel_dp *intel_dp)
1708 {
1709 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1710 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1711 }
1712 
wait_panel_power_cycle(struct intel_dp * intel_dp)1713 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1714 {
1715 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1716 
1717 	/* When we disable the VDD override bit last we have to do the manual
1718 	 * wait. */
1719 	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1720 				       intel_dp->panel_power_cycle_delay);
1721 
1722 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1723 }
1724 
wait_backlight_on(struct intel_dp * intel_dp)1725 static void wait_backlight_on(struct intel_dp *intel_dp)
1726 {
1727 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1728 				       intel_dp->backlight_on_delay);
1729 }
1730 
edp_wait_backlight_off(struct intel_dp * intel_dp)1731 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1732 {
1733 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1734 				       intel_dp->backlight_off_delay);
1735 }
1736 
1737 /* Read the current pp_control value, unlocking the register if it
1738  * is locked
1739  */
1740 
ironlake_get_pp_control(struct intel_dp * intel_dp)1741 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1742 {
1743 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1744 	struct drm_i915_private *dev_priv = dev->dev_private;
1745 	u32 control;
1746 
1747 	lockdep_assert_held(&dev_priv->pps_mutex);
1748 
1749 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1750 	if (!IS_BROXTON(dev)) {
1751 		control &= ~PANEL_UNLOCK_MASK;
1752 		control |= PANEL_UNLOCK_REGS;
1753 	}
1754 	return control;
1755 }
1756 
1757 /*
1758  * Must be paired with edp_panel_vdd_off().
1759  * Must hold pps_mutex around the whole on/off sequence.
1760  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1761  */
edp_panel_vdd_on(struct intel_dp * intel_dp)1762 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1763 {
1764 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1765 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1766 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1767 	struct drm_i915_private *dev_priv = dev->dev_private;
1768 	enum intel_display_power_domain power_domain;
1769 	u32 pp;
1770 	u32 pp_stat_reg, pp_ctrl_reg;
1771 	bool need_to_disable = !intel_dp->want_panel_vdd;
1772 
1773 	lockdep_assert_held(&dev_priv->pps_mutex);
1774 
1775 	if (!is_edp(intel_dp))
1776 		return false;
1777 
1778 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1779 	intel_dp->want_panel_vdd = true;
1780 
1781 	if (edp_have_panel_vdd(intel_dp))
1782 		return need_to_disable;
1783 
1784 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1785 	intel_display_power_get(dev_priv, power_domain);
1786 
1787 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1788 		      port_name(intel_dig_port->port));
1789 
1790 	if (!edp_have_panel_power(intel_dp))
1791 		wait_panel_power_cycle(intel_dp);
1792 
1793 	pp = ironlake_get_pp_control(intel_dp);
1794 	pp |= EDP_FORCE_VDD;
1795 
1796 	pp_stat_reg = _pp_stat_reg(intel_dp);
1797 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1798 
1799 	I915_WRITE(pp_ctrl_reg, pp);
1800 	POSTING_READ(pp_ctrl_reg);
1801 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1802 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1803 	/*
1804 	 * If the panel wasn't on, delay before accessing aux channel
1805 	 */
1806 	if (!edp_have_panel_power(intel_dp)) {
1807 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1808 			      port_name(intel_dig_port->port));
1809 		msleep(intel_dp->panel_power_up_delay);
1810 	}
1811 
1812 	return need_to_disable;
1813 }
1814 
1815 /*
1816  * Must be paired with intel_edp_panel_vdd_off() or
1817  * intel_edp_panel_off().
1818  * Nested calls to these functions are not allowed since
1819  * we drop the lock. Caller must use some higher level
1820  * locking to prevent nested calls from other threads.
1821  */
intel_edp_panel_vdd_on(struct intel_dp * intel_dp)1822 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1823 {
1824 	bool vdd;
1825 
1826 	if (!is_edp(intel_dp))
1827 		return;
1828 
1829 	pps_lock(intel_dp);
1830 	vdd = edp_panel_vdd_on(intel_dp);
1831 	pps_unlock(intel_dp);
1832 
1833 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1834 	     port_name(dp_to_dig_port(intel_dp)->port));
1835 }
1836 
edp_panel_vdd_off_sync(struct intel_dp * intel_dp)1837 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1838 {
1839 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1840 	struct drm_i915_private *dev_priv = dev->dev_private;
1841 	struct intel_digital_port *intel_dig_port =
1842 		dp_to_dig_port(intel_dp);
1843 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1844 	enum intel_display_power_domain power_domain;
1845 	u32 pp;
1846 	u32 pp_stat_reg, pp_ctrl_reg;
1847 
1848 	lockdep_assert_held(&dev_priv->pps_mutex);
1849 
1850 	WARN_ON(intel_dp->want_panel_vdd);
1851 
1852 	if (!edp_have_panel_vdd(intel_dp))
1853 		return;
1854 
1855 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1856 		      port_name(intel_dig_port->port));
1857 
1858 	pp = ironlake_get_pp_control(intel_dp);
1859 	pp &= ~EDP_FORCE_VDD;
1860 
1861 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1862 	pp_stat_reg = _pp_stat_reg(intel_dp);
1863 
1864 	I915_WRITE(pp_ctrl_reg, pp);
1865 	POSTING_READ(pp_ctrl_reg);
1866 
1867 	/* Make sure sequencer is idle before allowing subsequent activity */
1868 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1869 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1870 
1871 	if ((pp & POWER_TARGET_ON) == 0)
1872 		intel_dp->last_power_cycle = jiffies;
1873 
1874 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1875 	intel_display_power_put(dev_priv, power_domain);
1876 }
1877 
edp_panel_vdd_work(struct work_struct * __work)1878 static void edp_panel_vdd_work(struct work_struct *__work)
1879 {
1880 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1881 						 struct intel_dp, panel_vdd_work);
1882 
1883 	pps_lock(intel_dp);
1884 	if (!intel_dp->want_panel_vdd)
1885 		edp_panel_vdd_off_sync(intel_dp);
1886 	pps_unlock(intel_dp);
1887 }
1888 
edp_panel_vdd_schedule_off(struct intel_dp * intel_dp)1889 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1890 {
1891 	unsigned long delay;
1892 
1893 	/*
1894 	 * Queue the timer to fire a long time from now (relative to the power
1895 	 * down delay) to keep the panel power up across a sequence of
1896 	 * operations.
1897 	 */
1898 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1899 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1900 }
1901 
1902 /*
1903  * Must be paired with edp_panel_vdd_on().
1904  * Must hold pps_mutex around the whole on/off sequence.
1905  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1906  */
edp_panel_vdd_off(struct intel_dp * intel_dp,bool sync)1907 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1908 {
1909 	struct drm_i915_private *dev_priv =
1910 		intel_dp_to_dev(intel_dp)->dev_private;
1911 
1912 	lockdep_assert_held(&dev_priv->pps_mutex);
1913 
1914 	if (!is_edp(intel_dp))
1915 		return;
1916 
1917 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1918 	     port_name(dp_to_dig_port(intel_dp)->port));
1919 
1920 	intel_dp->want_panel_vdd = false;
1921 
1922 	if (sync)
1923 		edp_panel_vdd_off_sync(intel_dp);
1924 	else
1925 		edp_panel_vdd_schedule_off(intel_dp);
1926 }
1927 
edp_panel_on(struct intel_dp * intel_dp)1928 static void edp_panel_on(struct intel_dp *intel_dp)
1929 {
1930 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1931 	struct drm_i915_private *dev_priv = dev->dev_private;
1932 	u32 pp;
1933 	u32 pp_ctrl_reg;
1934 
1935 	lockdep_assert_held(&dev_priv->pps_mutex);
1936 
1937 	if (!is_edp(intel_dp))
1938 		return;
1939 
1940 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1941 		      port_name(dp_to_dig_port(intel_dp)->port));
1942 
1943 	if (WARN(edp_have_panel_power(intel_dp),
1944 		 "eDP port %c panel power already on\n",
1945 		 port_name(dp_to_dig_port(intel_dp)->port)))
1946 		return;
1947 
1948 	wait_panel_power_cycle(intel_dp);
1949 
1950 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1951 	pp = ironlake_get_pp_control(intel_dp);
1952 	if (IS_GEN5(dev)) {
1953 		/* ILK workaround: disable reset around power sequence */
1954 		pp &= ~PANEL_POWER_RESET;
1955 		I915_WRITE(pp_ctrl_reg, pp);
1956 		POSTING_READ(pp_ctrl_reg);
1957 	}
1958 
1959 	pp |= POWER_TARGET_ON;
1960 	if (!IS_GEN5(dev))
1961 		pp |= PANEL_POWER_RESET;
1962 
1963 	I915_WRITE(pp_ctrl_reg, pp);
1964 	POSTING_READ(pp_ctrl_reg);
1965 
1966 	wait_panel_on(intel_dp);
1967 	intel_dp->last_power_on = jiffies;
1968 
1969 	if (IS_GEN5(dev)) {
1970 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1971 		I915_WRITE(pp_ctrl_reg, pp);
1972 		POSTING_READ(pp_ctrl_reg);
1973 	}
1974 }
1975 
intel_edp_panel_on(struct intel_dp * intel_dp)1976 void intel_edp_panel_on(struct intel_dp *intel_dp)
1977 {
1978 	if (!is_edp(intel_dp))
1979 		return;
1980 
1981 	pps_lock(intel_dp);
1982 	edp_panel_on(intel_dp);
1983 	pps_unlock(intel_dp);
1984 }
1985 
1986 
edp_panel_off(struct intel_dp * intel_dp)1987 static void edp_panel_off(struct intel_dp *intel_dp)
1988 {
1989 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1990 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1991 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1992 	struct drm_i915_private *dev_priv = dev->dev_private;
1993 	enum intel_display_power_domain power_domain;
1994 	u32 pp;
1995 	u32 pp_ctrl_reg;
1996 
1997 	lockdep_assert_held(&dev_priv->pps_mutex);
1998 
1999 	if (!is_edp(intel_dp))
2000 		return;
2001 
2002 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2003 		      port_name(dp_to_dig_port(intel_dp)->port));
2004 
2005 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2006 	     port_name(dp_to_dig_port(intel_dp)->port));
2007 
2008 	pp = ironlake_get_pp_control(intel_dp);
2009 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2010 	 * panels get very unhappy and cease to work. */
2011 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2012 		EDP_BLC_ENABLE);
2013 
2014 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2015 
2016 	intel_dp->want_panel_vdd = false;
2017 
2018 	I915_WRITE(pp_ctrl_reg, pp);
2019 	POSTING_READ(pp_ctrl_reg);
2020 
2021 	intel_dp->last_power_cycle = jiffies;
2022 	wait_panel_off(intel_dp);
2023 
2024 	/* We got a reference when we enabled the VDD. */
2025 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2026 	intel_display_power_put(dev_priv, power_domain);
2027 }
2028 
intel_edp_panel_off(struct intel_dp * intel_dp)2029 void intel_edp_panel_off(struct intel_dp *intel_dp)
2030 {
2031 	if (!is_edp(intel_dp))
2032 		return;
2033 
2034 	pps_lock(intel_dp);
2035 	edp_panel_off(intel_dp);
2036 	pps_unlock(intel_dp);
2037 }
2038 
2039 /* Enable backlight in the panel power control. */
_intel_edp_backlight_on(struct intel_dp * intel_dp)2040 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2041 {
2042 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2043 	struct drm_device *dev = intel_dig_port->base.base.dev;
2044 	struct drm_i915_private *dev_priv = dev->dev_private;
2045 	u32 pp;
2046 	u32 pp_ctrl_reg;
2047 
2048 	/*
2049 	 * If we enable the backlight right away following a panel power
2050 	 * on, we may see slight flicker as the panel syncs with the eDP
2051 	 * link.  So delay a bit to make sure the image is solid before
2052 	 * allowing it to appear.
2053 	 */
2054 	wait_backlight_on(intel_dp);
2055 
2056 	pps_lock(intel_dp);
2057 
2058 	pp = ironlake_get_pp_control(intel_dp);
2059 	pp |= EDP_BLC_ENABLE;
2060 
2061 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2062 
2063 	I915_WRITE(pp_ctrl_reg, pp);
2064 	POSTING_READ(pp_ctrl_reg);
2065 
2066 	pps_unlock(intel_dp);
2067 }
2068 
2069 /* Enable backlight PWM and backlight PP control. */
intel_edp_backlight_on(struct intel_dp * intel_dp)2070 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2071 {
2072 	if (!is_edp(intel_dp))
2073 		return;
2074 
2075 	DRM_DEBUG_KMS("\n");
2076 
2077 	intel_panel_enable_backlight(intel_dp->attached_connector);
2078 	_intel_edp_backlight_on(intel_dp);
2079 }
2080 
2081 /* Disable backlight in the panel power control. */
_intel_edp_backlight_off(struct intel_dp * intel_dp)2082 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2083 {
2084 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2085 	struct drm_i915_private *dev_priv = dev->dev_private;
2086 	u32 pp;
2087 	u32 pp_ctrl_reg;
2088 
2089 	if (!is_edp(intel_dp))
2090 		return;
2091 
2092 	pps_lock(intel_dp);
2093 
2094 	pp = ironlake_get_pp_control(intel_dp);
2095 	pp &= ~EDP_BLC_ENABLE;
2096 
2097 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2098 
2099 	I915_WRITE(pp_ctrl_reg, pp);
2100 	POSTING_READ(pp_ctrl_reg);
2101 
2102 	pps_unlock(intel_dp);
2103 
2104 	intel_dp->last_backlight_off = jiffies;
2105 	edp_wait_backlight_off(intel_dp);
2106 }
2107 
2108 /* Disable backlight PP control and backlight PWM. */
intel_edp_backlight_off(struct intel_dp * intel_dp)2109 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2110 {
2111 	if (!is_edp(intel_dp))
2112 		return;
2113 
2114 	DRM_DEBUG_KMS("\n");
2115 
2116 	_intel_edp_backlight_off(intel_dp);
2117 	intel_panel_disable_backlight(intel_dp->attached_connector);
2118 }
2119 
2120 /*
2121  * Hook for controlling the panel power control backlight through the bl_power
2122  * sysfs attribute. Take care to handle multiple calls.
2123  */
intel_edp_backlight_power(struct intel_connector * connector,bool enable)2124 static void intel_edp_backlight_power(struct intel_connector *connector,
2125 				      bool enable)
2126 {
2127 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2128 	bool is_enabled;
2129 
2130 	pps_lock(intel_dp);
2131 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2132 	pps_unlock(intel_dp);
2133 
2134 	if (is_enabled == enable)
2135 		return;
2136 
2137 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2138 		      enable ? "enable" : "disable");
2139 
2140 	if (enable)
2141 		_intel_edp_backlight_on(intel_dp);
2142 	else
2143 		_intel_edp_backlight_off(intel_dp);
2144 }
2145 
ironlake_edp_pll_on(struct intel_dp * intel_dp)2146 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2147 {
2148 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2149 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2150 	struct drm_device *dev = crtc->dev;
2151 	struct drm_i915_private *dev_priv = dev->dev_private;
2152 	u32 dpa_ctl;
2153 
2154 	assert_pipe_disabled(dev_priv,
2155 			     to_intel_crtc(crtc)->pipe);
2156 
2157 	DRM_DEBUG_KMS("\n");
2158 	dpa_ctl = I915_READ(DP_A);
2159 	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2160 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2161 
2162 	/* We don't adjust intel_dp->DP while tearing down the link, to
2163 	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2164 	 * enable bits here to ensure that we don't enable too much. */
2165 	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2166 	intel_dp->DP |= DP_PLL_ENABLE;
2167 	I915_WRITE(DP_A, intel_dp->DP);
2168 	POSTING_READ(DP_A);
2169 	udelay(200);
2170 }
2171 
ironlake_edp_pll_off(struct intel_dp * intel_dp)2172 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2173 {
2174 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2175 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2176 	struct drm_device *dev = crtc->dev;
2177 	struct drm_i915_private *dev_priv = dev->dev_private;
2178 	u32 dpa_ctl;
2179 
2180 	assert_pipe_disabled(dev_priv,
2181 			     to_intel_crtc(crtc)->pipe);
2182 
2183 	dpa_ctl = I915_READ(DP_A);
2184 	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2185 	     "dp pll off, should be on\n");
2186 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2187 
2188 	/* We can't rely on the value tracked for the DP register in
2189 	 * intel_dp->DP because link_down must not change that (otherwise link
2190 	 * re-training will fail. */
2191 	dpa_ctl &= ~DP_PLL_ENABLE;
2192 	I915_WRITE(DP_A, dpa_ctl);
2193 	POSTING_READ(DP_A);
2194 	udelay(200);
2195 }
2196 
2197 /* If the sink supports it, try to set the power state appropriately */
intel_dp_sink_dpms(struct intel_dp * intel_dp,int mode)2198 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2199 {
2200 	int ret, i;
2201 
2202 	/* Should have a valid DPCD by this point */
2203 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2204 		return;
2205 
2206 	if (mode != DRM_MODE_DPMS_ON) {
2207 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2208 					 DP_SET_POWER_D3);
2209 	} else {
2210 		/*
2211 		 * When turning on, we need to retry for 1ms to give the sink
2212 		 * time to wake up.
2213 		 */
2214 		for (i = 0; i < 3; i++) {
2215 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2216 						 DP_SET_POWER_D0);
2217 			if (ret == 1)
2218 				break;
2219 			msleep(1);
2220 		}
2221 	}
2222 
2223 	if (ret != 1)
2224 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2225 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2226 }
2227 
intel_dp_get_hw_state(struct intel_encoder * encoder,enum pipe * pipe)2228 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2229 				  enum pipe *pipe)
2230 {
2231 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2232 	enum port port = dp_to_dig_port(intel_dp)->port;
2233 	struct drm_device *dev = encoder->base.dev;
2234 	struct drm_i915_private *dev_priv = dev->dev_private;
2235 	enum intel_display_power_domain power_domain;
2236 	u32 tmp;
2237 
2238 	power_domain = intel_display_port_power_domain(encoder);
2239 	if (!intel_display_power_is_enabled(dev_priv, power_domain))
2240 		return false;
2241 
2242 	tmp = I915_READ(intel_dp->output_reg);
2243 
2244 	if (!(tmp & DP_PORT_EN))
2245 		return false;
2246 
2247 	if (IS_GEN7(dev) && port == PORT_A) {
2248 		*pipe = PORT_TO_PIPE_CPT(tmp);
2249 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2250 		enum pipe p;
2251 
2252 		for_each_pipe(dev_priv, p) {
2253 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2254 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2255 				*pipe = p;
2256 				return true;
2257 			}
2258 		}
2259 
2260 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2261 			      intel_dp->output_reg);
2262 	} else if (IS_CHERRYVIEW(dev)) {
2263 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2264 	} else {
2265 		*pipe = PORT_TO_PIPE(tmp);
2266 	}
2267 
2268 	return true;
2269 }
2270 
intel_dp_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)2271 static void intel_dp_get_config(struct intel_encoder *encoder,
2272 				struct intel_crtc_state *pipe_config)
2273 {
2274 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2275 	u32 tmp, flags = 0;
2276 	struct drm_device *dev = encoder->base.dev;
2277 	struct drm_i915_private *dev_priv = dev->dev_private;
2278 	enum port port = dp_to_dig_port(intel_dp)->port;
2279 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2280 	int dotclock;
2281 
2282 	tmp = I915_READ(intel_dp->output_reg);
2283 
2284 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2285 
2286 	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2287 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2288 
2289 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2290 			flags |= DRM_MODE_FLAG_PHSYNC;
2291 		else
2292 			flags |= DRM_MODE_FLAG_NHSYNC;
2293 
2294 		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2295 			flags |= DRM_MODE_FLAG_PVSYNC;
2296 		else
2297 			flags |= DRM_MODE_FLAG_NVSYNC;
2298 	} else {
2299 		if (tmp & DP_SYNC_HS_HIGH)
2300 			flags |= DRM_MODE_FLAG_PHSYNC;
2301 		else
2302 			flags |= DRM_MODE_FLAG_NHSYNC;
2303 
2304 		if (tmp & DP_SYNC_VS_HIGH)
2305 			flags |= DRM_MODE_FLAG_PVSYNC;
2306 		else
2307 			flags |= DRM_MODE_FLAG_NVSYNC;
2308 	}
2309 
2310 	pipe_config->base.adjusted_mode.flags |= flags;
2311 
2312 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2313 	    tmp & DP_COLOR_RANGE_16_235)
2314 		pipe_config->limited_color_range = true;
2315 
2316 	pipe_config->has_dp_encoder = true;
2317 
2318 	pipe_config->lane_count =
2319 		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2320 
2321 	intel_dp_get_m_n(crtc, pipe_config);
2322 
2323 	if (port == PORT_A) {
2324 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2325 			pipe_config->port_clock = 162000;
2326 		else
2327 			pipe_config->port_clock = 270000;
2328 	}
2329 
2330 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2331 					    &pipe_config->dp_m_n);
2332 
2333 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2334 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2335 
2336 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2337 
2338 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2339 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2340 		/*
2341 		 * This is a big fat ugly hack.
2342 		 *
2343 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2344 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2345 		 * unknown we fail to light up. Yet the same BIOS boots up with
2346 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2347 		 * max, not what it tells us to use.
2348 		 *
2349 		 * Note: This will still be broken if the eDP panel is not lit
2350 		 * up by the BIOS, and thus we can't get the mode at module
2351 		 * load.
2352 		 */
2353 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2354 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2355 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2356 	}
2357 }
2358 
intel_disable_dp(struct intel_encoder * encoder)2359 static void intel_disable_dp(struct intel_encoder *encoder)
2360 {
2361 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2362 	struct drm_device *dev = encoder->base.dev;
2363 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2364 
2365 	if (crtc->config->has_audio)
2366 		intel_audio_codec_disable(encoder);
2367 
2368 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2369 		intel_psr_disable(intel_dp);
2370 
2371 	/* Make sure the panel is off before trying to change the mode. But also
2372 	 * ensure that we have vdd while we switch off the panel. */
2373 	intel_edp_panel_vdd_on(intel_dp);
2374 	intel_edp_backlight_off(intel_dp);
2375 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2376 	intel_edp_panel_off(intel_dp);
2377 
2378 	/* disable the port before the pipe on g4x */
2379 	if (INTEL_INFO(dev)->gen < 5)
2380 		intel_dp_link_down(intel_dp);
2381 }
2382 
ilk_post_disable_dp(struct intel_encoder * encoder)2383 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2384 {
2385 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2386 	enum port port = dp_to_dig_port(intel_dp)->port;
2387 
2388 	intel_dp_link_down(intel_dp);
2389 	if (port == PORT_A)
2390 		ironlake_edp_pll_off(intel_dp);
2391 }
2392 
vlv_post_disable_dp(struct intel_encoder * encoder)2393 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2394 {
2395 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2396 
2397 	intel_dp_link_down(intel_dp);
2398 }
2399 
chv_data_lane_soft_reset(struct intel_encoder * encoder,bool reset)2400 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2401 				     bool reset)
2402 {
2403 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2404 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2405 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2406 	enum pipe pipe = crtc->pipe;
2407 	uint32_t val;
2408 
2409 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2410 	if (reset)
2411 		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2412 	else
2413 		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2414 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2415 
2416 	if (crtc->config->lane_count > 2) {
2417 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2418 		if (reset)
2419 			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2420 		else
2421 			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2422 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2423 	}
2424 
2425 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2426 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2427 	if (reset)
2428 		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2429 	else
2430 		val |= DPIO_PCS_CLK_SOFT_RESET;
2431 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2432 
2433 	if (crtc->config->lane_count > 2) {
2434 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2435 		val |= CHV_PCS_REQ_SOFTRESET_EN;
2436 		if (reset)
2437 			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2438 		else
2439 			val |= DPIO_PCS_CLK_SOFT_RESET;
2440 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2441 	}
2442 }
2443 
chv_post_disable_dp(struct intel_encoder * encoder)2444 static void chv_post_disable_dp(struct intel_encoder *encoder)
2445 {
2446 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2447 	struct drm_device *dev = encoder->base.dev;
2448 	struct drm_i915_private *dev_priv = dev->dev_private;
2449 
2450 	intel_dp_link_down(intel_dp);
2451 
2452 	mutex_lock(&dev_priv->sb_lock);
2453 
2454 	/* Assert data lane reset */
2455 	chv_data_lane_soft_reset(encoder, true);
2456 
2457 	mutex_unlock(&dev_priv->sb_lock);
2458 }
2459 
2460 static void
_intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)2461 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2462 			 uint32_t *DP,
2463 			 uint8_t dp_train_pat)
2464 {
2465 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2466 	struct drm_device *dev = intel_dig_port->base.base.dev;
2467 	struct drm_i915_private *dev_priv = dev->dev_private;
2468 	enum port port = intel_dig_port->port;
2469 
2470 	if (HAS_DDI(dev)) {
2471 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2472 
2473 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2474 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2475 		else
2476 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2477 
2478 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2479 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2480 		case DP_TRAINING_PATTERN_DISABLE:
2481 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2482 
2483 			break;
2484 		case DP_TRAINING_PATTERN_1:
2485 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2486 			break;
2487 		case DP_TRAINING_PATTERN_2:
2488 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2489 			break;
2490 		case DP_TRAINING_PATTERN_3:
2491 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2492 			break;
2493 		}
2494 		I915_WRITE(DP_TP_CTL(port), temp);
2495 
2496 	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2497 		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2498 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2499 
2500 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2501 		case DP_TRAINING_PATTERN_DISABLE:
2502 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2503 			break;
2504 		case DP_TRAINING_PATTERN_1:
2505 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2506 			break;
2507 		case DP_TRAINING_PATTERN_2:
2508 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2509 			break;
2510 		case DP_TRAINING_PATTERN_3:
2511 			DRM_ERROR("DP training pattern 3 not supported\n");
2512 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2513 			break;
2514 		}
2515 
2516 	} else {
2517 		if (IS_CHERRYVIEW(dev))
2518 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2519 		else
2520 			*DP &= ~DP_LINK_TRAIN_MASK;
2521 
2522 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2523 		case DP_TRAINING_PATTERN_DISABLE:
2524 			*DP |= DP_LINK_TRAIN_OFF;
2525 			break;
2526 		case DP_TRAINING_PATTERN_1:
2527 			*DP |= DP_LINK_TRAIN_PAT_1;
2528 			break;
2529 		case DP_TRAINING_PATTERN_2:
2530 			*DP |= DP_LINK_TRAIN_PAT_2;
2531 			break;
2532 		case DP_TRAINING_PATTERN_3:
2533 			if (IS_CHERRYVIEW(dev)) {
2534 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2535 			} else {
2536 				DRM_ERROR("DP training pattern 3 not supported\n");
2537 				*DP |= DP_LINK_TRAIN_PAT_2;
2538 			}
2539 			break;
2540 		}
2541 	}
2542 }
2543 
intel_dp_enable_port(struct intel_dp * intel_dp)2544 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2545 {
2546 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2547 	struct drm_i915_private *dev_priv = dev->dev_private;
2548 
2549 	/* enable with pattern 1 (as per spec) */
2550 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2551 				 DP_TRAINING_PATTERN_1);
2552 
2553 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2554 	POSTING_READ(intel_dp->output_reg);
2555 
2556 	/*
2557 	 * Magic for VLV/CHV. We _must_ first set up the register
2558 	 * without actually enabling the port, and then do another
2559 	 * write to enable the port. Otherwise link training will
2560 	 * fail when the power sequencer is freshly used for this port.
2561 	 */
2562 	intel_dp->DP |= DP_PORT_EN;
2563 
2564 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2565 	POSTING_READ(intel_dp->output_reg);
2566 }
2567 
intel_enable_dp(struct intel_encoder * encoder)2568 static void intel_enable_dp(struct intel_encoder *encoder)
2569 {
2570 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2571 	struct drm_device *dev = encoder->base.dev;
2572 	struct drm_i915_private *dev_priv = dev->dev_private;
2573 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2574 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2575 
2576 	if (WARN_ON(dp_reg & DP_PORT_EN))
2577 		return;
2578 
2579 	pps_lock(intel_dp);
2580 
2581 	if (IS_VALLEYVIEW(dev))
2582 		vlv_init_panel_power_sequencer(intel_dp);
2583 
2584 	intel_dp_enable_port(intel_dp);
2585 
2586 	edp_panel_vdd_on(intel_dp);
2587 	edp_panel_on(intel_dp);
2588 	edp_panel_vdd_off(intel_dp, true);
2589 
2590 	pps_unlock(intel_dp);
2591 
2592 	if (IS_VALLEYVIEW(dev)) {
2593 		unsigned int lane_mask = 0x0;
2594 
2595 		if (IS_CHERRYVIEW(dev))
2596 			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2597 
2598 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2599 				    lane_mask);
2600 	}
2601 
2602 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2603 	intel_dp_start_link_train(intel_dp);
2604 	intel_dp_stop_link_train(intel_dp);
2605 
2606 	if (crtc->config->has_audio) {
2607 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2608 				 pipe_name(crtc->pipe));
2609 		intel_audio_codec_enable(encoder);
2610 	}
2611 }
2612 
g4x_enable_dp(struct intel_encoder * encoder)2613 static void g4x_enable_dp(struct intel_encoder *encoder)
2614 {
2615 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2616 
2617 	intel_enable_dp(encoder);
2618 	intel_edp_backlight_on(intel_dp);
2619 }
2620 
vlv_enable_dp(struct intel_encoder * encoder)2621 static void vlv_enable_dp(struct intel_encoder *encoder)
2622 {
2623 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2624 
2625 	intel_edp_backlight_on(intel_dp);
2626 	intel_psr_enable(intel_dp);
2627 }
2628 
g4x_pre_enable_dp(struct intel_encoder * encoder)2629 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2630 {
2631 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2632 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2633 
2634 	intel_dp_prepare(encoder);
2635 
2636 	/* Only ilk+ has port A */
2637 	if (dport->port == PORT_A) {
2638 		ironlake_set_pll_cpu_edp(intel_dp);
2639 		ironlake_edp_pll_on(intel_dp);
2640 	}
2641 }
2642 
vlv_detach_power_sequencer(struct intel_dp * intel_dp)2643 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2644 {
2645 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2646 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2647 	enum pipe pipe = intel_dp->pps_pipe;
2648 	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2649 
2650 	edp_panel_vdd_off_sync(intel_dp);
2651 
2652 	/*
2653 	 * VLV seems to get confused when multiple power seqeuencers
2654 	 * have the same port selected (even if only one has power/vdd
2655 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2656 	 * CHV on the other hand doesn't seem to mind having the same port
2657 	 * selected in multiple power seqeuencers, but let's clear the
2658 	 * port select always when logically disconnecting a power sequencer
2659 	 * from a port.
2660 	 */
2661 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2662 		      pipe_name(pipe), port_name(intel_dig_port->port));
2663 	I915_WRITE(pp_on_reg, 0);
2664 	POSTING_READ(pp_on_reg);
2665 
2666 	intel_dp->pps_pipe = INVALID_PIPE;
2667 }
2668 
vlv_steal_power_sequencer(struct drm_device * dev,enum pipe pipe)2669 static void vlv_steal_power_sequencer(struct drm_device *dev,
2670 				      enum pipe pipe)
2671 {
2672 	struct drm_i915_private *dev_priv = dev->dev_private;
2673 	struct intel_encoder *encoder;
2674 
2675 	lockdep_assert_held(&dev_priv->pps_mutex);
2676 
2677 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2678 		return;
2679 
2680 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2681 			    base.head) {
2682 		struct intel_dp *intel_dp;
2683 		enum port port;
2684 
2685 		if (encoder->type != INTEL_OUTPUT_EDP)
2686 			continue;
2687 
2688 		intel_dp = enc_to_intel_dp(&encoder->base);
2689 		port = dp_to_dig_port(intel_dp)->port;
2690 
2691 		if (intel_dp->pps_pipe != pipe)
2692 			continue;
2693 
2694 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2695 			      pipe_name(pipe), port_name(port));
2696 
2697 		WARN(encoder->base.crtc,
2698 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2699 		     pipe_name(pipe), port_name(port));
2700 
2701 		/* make sure vdd is off before we steal it */
2702 		vlv_detach_power_sequencer(intel_dp);
2703 	}
2704 }
2705 
vlv_init_panel_power_sequencer(struct intel_dp * intel_dp)2706 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2707 {
2708 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2709 	struct intel_encoder *encoder = &intel_dig_port->base;
2710 	struct drm_device *dev = encoder->base.dev;
2711 	struct drm_i915_private *dev_priv = dev->dev_private;
2712 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2713 
2714 	lockdep_assert_held(&dev_priv->pps_mutex);
2715 
2716 	if (!is_edp(intel_dp))
2717 		return;
2718 
2719 	if (intel_dp->pps_pipe == crtc->pipe)
2720 		return;
2721 
2722 	/*
2723 	 * If another power sequencer was being used on this
2724 	 * port previously make sure to turn off vdd there while
2725 	 * we still have control of it.
2726 	 */
2727 	if (intel_dp->pps_pipe != INVALID_PIPE)
2728 		vlv_detach_power_sequencer(intel_dp);
2729 
2730 	/*
2731 	 * We may be stealing the power
2732 	 * sequencer from another port.
2733 	 */
2734 	vlv_steal_power_sequencer(dev, crtc->pipe);
2735 
2736 	/* now it's all ours */
2737 	intel_dp->pps_pipe = crtc->pipe;
2738 
2739 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2740 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2741 
2742 	/* init power sequencer on this pipe and port */
2743 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2744 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2745 }
2746 
vlv_pre_enable_dp(struct intel_encoder * encoder)2747 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2748 {
2749 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2750 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2751 	struct drm_device *dev = encoder->base.dev;
2752 	struct drm_i915_private *dev_priv = dev->dev_private;
2753 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2754 	enum dpio_channel port = vlv_dport_to_channel(dport);
2755 	int pipe = intel_crtc->pipe;
2756 	u32 val;
2757 
2758 	mutex_lock(&dev_priv->sb_lock);
2759 
2760 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2761 	val = 0;
2762 	if (pipe)
2763 		val |= (1<<21);
2764 	else
2765 		val &= ~(1<<21);
2766 	val |= 0x001000c4;
2767 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2768 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2769 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2770 
2771 	mutex_unlock(&dev_priv->sb_lock);
2772 
2773 	intel_enable_dp(encoder);
2774 }
2775 
vlv_dp_pre_pll_enable(struct intel_encoder * encoder)2776 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2777 {
2778 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2779 	struct drm_device *dev = encoder->base.dev;
2780 	struct drm_i915_private *dev_priv = dev->dev_private;
2781 	struct intel_crtc *intel_crtc =
2782 		to_intel_crtc(encoder->base.crtc);
2783 	enum dpio_channel port = vlv_dport_to_channel(dport);
2784 	int pipe = intel_crtc->pipe;
2785 
2786 	intel_dp_prepare(encoder);
2787 
2788 	/* Program Tx lane resets to default */
2789 	mutex_lock(&dev_priv->sb_lock);
2790 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2791 			 DPIO_PCS_TX_LANE2_RESET |
2792 			 DPIO_PCS_TX_LANE1_RESET);
2793 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2794 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2795 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2796 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2797 				 DPIO_PCS_CLK_SOFT_RESET);
2798 
2799 	/* Fix up inter-pair skew failure */
2800 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2801 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2802 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2803 	mutex_unlock(&dev_priv->sb_lock);
2804 }
2805 
chv_pre_enable_dp(struct intel_encoder * encoder)2806 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2807 {
2808 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2809 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2810 	struct drm_device *dev = encoder->base.dev;
2811 	struct drm_i915_private *dev_priv = dev->dev_private;
2812 	struct intel_crtc *intel_crtc =
2813 		to_intel_crtc(encoder->base.crtc);
2814 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2815 	int pipe = intel_crtc->pipe;
2816 	int data, i, stagger;
2817 	u32 val;
2818 
2819 	mutex_lock(&dev_priv->sb_lock);
2820 
2821 	/* allow hardware to manage TX FIFO reset source */
2822 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2823 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2824 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2825 
2826 	if (intel_crtc->config->lane_count > 2) {
2827 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2828 		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2829 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2830 	}
2831 
2832 	/* Program Tx lane latency optimal setting*/
2833 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
2834 		/* Set the upar bit */
2835 		if (intel_crtc->config->lane_count == 1)
2836 			data = 0x0;
2837 		else
2838 			data = (i == 1) ? 0x0 : 0x1;
2839 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2840 				data << DPIO_UPAR_SHIFT);
2841 	}
2842 
2843 	/* Data lane stagger programming */
2844 	if (intel_crtc->config->port_clock > 270000)
2845 		stagger = 0x18;
2846 	else if (intel_crtc->config->port_clock > 135000)
2847 		stagger = 0xd;
2848 	else if (intel_crtc->config->port_clock > 67500)
2849 		stagger = 0x7;
2850 	else if (intel_crtc->config->port_clock > 33750)
2851 		stagger = 0x4;
2852 	else
2853 		stagger = 0x2;
2854 
2855 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2856 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
2857 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2858 
2859 	if (intel_crtc->config->lane_count > 2) {
2860 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2861 		val |= DPIO_TX2_STAGGER_MASK(0x1f);
2862 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2863 	}
2864 
2865 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2866 		       DPIO_LANESTAGGER_STRAP(stagger) |
2867 		       DPIO_LANESTAGGER_STRAP_OVRD |
2868 		       DPIO_TX1_STAGGER_MASK(0x1f) |
2869 		       DPIO_TX1_STAGGER_MULT(6) |
2870 		       DPIO_TX2_STAGGER_MULT(0));
2871 
2872 	if (intel_crtc->config->lane_count > 2) {
2873 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2874 			       DPIO_LANESTAGGER_STRAP(stagger) |
2875 			       DPIO_LANESTAGGER_STRAP_OVRD |
2876 			       DPIO_TX1_STAGGER_MASK(0x1f) |
2877 			       DPIO_TX1_STAGGER_MULT(7) |
2878 			       DPIO_TX2_STAGGER_MULT(5));
2879 	}
2880 
2881 	/* Deassert data lane reset */
2882 	chv_data_lane_soft_reset(encoder, false);
2883 
2884 	mutex_unlock(&dev_priv->sb_lock);
2885 
2886 	intel_enable_dp(encoder);
2887 
2888 	/* Second common lane will stay alive on its own now */
2889 	if (dport->release_cl2_override) {
2890 		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2891 		dport->release_cl2_override = false;
2892 	}
2893 }
2894 
chv_dp_pre_pll_enable(struct intel_encoder * encoder)2895 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2896 {
2897 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2898 	struct drm_device *dev = encoder->base.dev;
2899 	struct drm_i915_private *dev_priv = dev->dev_private;
2900 	struct intel_crtc *intel_crtc =
2901 		to_intel_crtc(encoder->base.crtc);
2902 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2903 	enum pipe pipe = intel_crtc->pipe;
2904 	unsigned int lane_mask =
2905 		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2906 	u32 val;
2907 
2908 	intel_dp_prepare(encoder);
2909 
2910 	/*
2911 	 * Must trick the second common lane into life.
2912 	 * Otherwise we can't even access the PLL.
2913 	 */
2914 	if (ch == DPIO_CH0 && pipe == PIPE_B)
2915 		dport->release_cl2_override =
2916 			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2917 
2918 	chv_phy_powergate_lanes(encoder, true, lane_mask);
2919 
2920 	mutex_lock(&dev_priv->sb_lock);
2921 
2922 	/* Assert data lane reset */
2923 	chv_data_lane_soft_reset(encoder, true);
2924 
2925 	/* program left/right clock distribution */
2926 	if (pipe != PIPE_B) {
2927 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2928 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2929 		if (ch == DPIO_CH0)
2930 			val |= CHV_BUFLEFTENA1_FORCE;
2931 		if (ch == DPIO_CH1)
2932 			val |= CHV_BUFRIGHTENA1_FORCE;
2933 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2934 	} else {
2935 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2936 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2937 		if (ch == DPIO_CH0)
2938 			val |= CHV_BUFLEFTENA2_FORCE;
2939 		if (ch == DPIO_CH1)
2940 			val |= CHV_BUFRIGHTENA2_FORCE;
2941 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2942 	}
2943 
2944 	/* program clock channel usage */
2945 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2946 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2947 	if (pipe != PIPE_B)
2948 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2949 	else
2950 		val |= CHV_PCS_USEDCLKCHANNEL;
2951 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2952 
2953 	if (intel_crtc->config->lane_count > 2) {
2954 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2955 		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2956 		if (pipe != PIPE_B)
2957 			val &= ~CHV_PCS_USEDCLKCHANNEL;
2958 		else
2959 			val |= CHV_PCS_USEDCLKCHANNEL;
2960 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2961 	}
2962 
2963 	/*
2964 	 * This a a bit weird since generally CL
2965 	 * matches the pipe, but here we need to
2966 	 * pick the CL based on the port.
2967 	 */
2968 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2969 	if (pipe != PIPE_B)
2970 		val &= ~CHV_CMN_USEDCLKCHANNEL;
2971 	else
2972 		val |= CHV_CMN_USEDCLKCHANNEL;
2973 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2974 
2975 	mutex_unlock(&dev_priv->sb_lock);
2976 }
2977 
chv_dp_post_pll_disable(struct intel_encoder * encoder)2978 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2979 {
2980 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2981 	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2982 	u32 val;
2983 
2984 	mutex_lock(&dev_priv->sb_lock);
2985 
2986 	/* disable left/right clock distribution */
2987 	if (pipe != PIPE_B) {
2988 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2989 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2990 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2991 	} else {
2992 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2993 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2994 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2995 	}
2996 
2997 	mutex_unlock(&dev_priv->sb_lock);
2998 
2999 	/*
3000 	 * Leave the power down bit cleared for at least one
3001 	 * lane so that chv_powergate_phy_ch() will power
3002 	 * on something when the channel is otherwise unused.
3003 	 * When the port is off and the override is removed
3004 	 * the lanes power down anyway, so otherwise it doesn't
3005 	 * really matter what the state of power down bits is
3006 	 * after this.
3007 	 */
3008 	chv_phy_powergate_lanes(encoder, false, 0x0);
3009 }
3010 
3011 /*
3012  * Native read with retry for link status and receiver capability reads for
3013  * cases where the sink may still be asleep.
3014  *
3015  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3016  * supposed to retry 3 times per the spec.
3017  */
3018 static ssize_t
intel_dp_dpcd_read_wake(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)3019 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3020 			void *buffer, size_t size)
3021 {
3022 	ssize_t ret;
3023 	int i;
3024 
3025 	/*
3026 	 * Sometime we just get the same incorrect byte repeated
3027 	 * over the entire buffer. Doing just one throw away read
3028 	 * initially seems to "solve" it.
3029 	 */
3030 	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3031 
3032 	for (i = 0; i < 3; i++) {
3033 		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3034 		if (ret == size)
3035 			return ret;
3036 		msleep(1);
3037 	}
3038 
3039 	return ret;
3040 }
3041 
3042 /*
3043  * Fetch AUX CH registers 0x202 - 0x207 which contain
3044  * link status information
3045  */
3046 static bool
intel_dp_get_link_status(struct intel_dp * intel_dp,uint8_t link_status[DP_LINK_STATUS_SIZE])3047 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3048 {
3049 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3050 				       DP_LANE0_1_STATUS,
3051 				       link_status,
3052 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3053 }
3054 
3055 /* These are source-specific values. */
3056 static uint8_t
intel_dp_voltage_max(struct intel_dp * intel_dp)3057 intel_dp_voltage_max(struct intel_dp *intel_dp)
3058 {
3059 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3060 	struct drm_i915_private *dev_priv = dev->dev_private;
3061 	enum port port = dp_to_dig_port(intel_dp)->port;
3062 
3063 	if (IS_BROXTON(dev))
3064 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3065 	else if (INTEL_INFO(dev)->gen >= 9) {
3066 		if (dev_priv->edp_low_vswing && port == PORT_A)
3067 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3068 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3069 	} else if (IS_VALLEYVIEW(dev))
3070 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3071 	else if (IS_GEN7(dev) && port == PORT_A)
3072 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3073 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3074 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3075 	else
3076 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3077 }
3078 
3079 static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp * intel_dp,uint8_t voltage_swing)3080 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3081 {
3082 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3083 	enum port port = dp_to_dig_port(intel_dp)->port;
3084 
3085 	if (INTEL_INFO(dev)->gen >= 9) {
3086 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3087 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3088 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3089 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3090 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3091 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3092 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3093 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3094 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3095 		default:
3096 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3097 		}
3098 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3099 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3100 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3101 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3102 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3103 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3104 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3105 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3106 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3107 		default:
3108 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3109 		}
3110 	} else if (IS_VALLEYVIEW(dev)) {
3111 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3112 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3113 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3114 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3115 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3116 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3117 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3118 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3119 		default:
3120 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3121 		}
3122 	} else if (IS_GEN7(dev) && port == PORT_A) {
3123 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3124 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3125 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3126 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3127 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3128 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3129 		default:
3130 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3131 		}
3132 	} else {
3133 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3134 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3135 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3136 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3137 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3138 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3139 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3140 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3141 		default:
3142 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3143 		}
3144 	}
3145 }
3146 
vlv_signal_levels(struct intel_dp * intel_dp)3147 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3148 {
3149 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3150 	struct drm_i915_private *dev_priv = dev->dev_private;
3151 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3152 	struct intel_crtc *intel_crtc =
3153 		to_intel_crtc(dport->base.base.crtc);
3154 	unsigned long demph_reg_value, preemph_reg_value,
3155 		uniqtranscale_reg_value;
3156 	uint8_t train_set = intel_dp->train_set[0];
3157 	enum dpio_channel port = vlv_dport_to_channel(dport);
3158 	int pipe = intel_crtc->pipe;
3159 
3160 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3161 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3162 		preemph_reg_value = 0x0004000;
3163 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3164 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3165 			demph_reg_value = 0x2B405555;
3166 			uniqtranscale_reg_value = 0x552AB83A;
3167 			break;
3168 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3169 			demph_reg_value = 0x2B404040;
3170 			uniqtranscale_reg_value = 0x5548B83A;
3171 			break;
3172 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3173 			demph_reg_value = 0x2B245555;
3174 			uniqtranscale_reg_value = 0x5560B83A;
3175 			break;
3176 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3177 			demph_reg_value = 0x2B405555;
3178 			uniqtranscale_reg_value = 0x5598DA3A;
3179 			break;
3180 		default:
3181 			return 0;
3182 		}
3183 		break;
3184 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3185 		preemph_reg_value = 0x0002000;
3186 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3187 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3188 			demph_reg_value = 0x2B404040;
3189 			uniqtranscale_reg_value = 0x5552B83A;
3190 			break;
3191 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3192 			demph_reg_value = 0x2B404848;
3193 			uniqtranscale_reg_value = 0x5580B83A;
3194 			break;
3195 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3196 			demph_reg_value = 0x2B404040;
3197 			uniqtranscale_reg_value = 0x55ADDA3A;
3198 			break;
3199 		default:
3200 			return 0;
3201 		}
3202 		break;
3203 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3204 		preemph_reg_value = 0x0000000;
3205 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3206 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3207 			demph_reg_value = 0x2B305555;
3208 			uniqtranscale_reg_value = 0x5570B83A;
3209 			break;
3210 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3211 			demph_reg_value = 0x2B2B4040;
3212 			uniqtranscale_reg_value = 0x55ADDA3A;
3213 			break;
3214 		default:
3215 			return 0;
3216 		}
3217 		break;
3218 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3219 		preemph_reg_value = 0x0006000;
3220 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3221 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3222 			demph_reg_value = 0x1B405555;
3223 			uniqtranscale_reg_value = 0x55ADDA3A;
3224 			break;
3225 		default:
3226 			return 0;
3227 		}
3228 		break;
3229 	default:
3230 		return 0;
3231 	}
3232 
3233 	mutex_lock(&dev_priv->sb_lock);
3234 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3235 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3236 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3237 			 uniqtranscale_reg_value);
3238 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3239 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3240 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3241 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3242 	mutex_unlock(&dev_priv->sb_lock);
3243 
3244 	return 0;
3245 }
3246 
chv_need_uniq_trans_scale(uint8_t train_set)3247 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3248 {
3249 	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3250 		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3251 }
3252 
chv_signal_levels(struct intel_dp * intel_dp)3253 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3254 {
3255 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3256 	struct drm_i915_private *dev_priv = dev->dev_private;
3257 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3258 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3259 	u32 deemph_reg_value, margin_reg_value, val;
3260 	uint8_t train_set = intel_dp->train_set[0];
3261 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3262 	enum pipe pipe = intel_crtc->pipe;
3263 	int i;
3264 
3265 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3266 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3267 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3268 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3269 			deemph_reg_value = 128;
3270 			margin_reg_value = 52;
3271 			break;
3272 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3273 			deemph_reg_value = 128;
3274 			margin_reg_value = 77;
3275 			break;
3276 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3277 			deemph_reg_value = 128;
3278 			margin_reg_value = 102;
3279 			break;
3280 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3281 			deemph_reg_value = 128;
3282 			margin_reg_value = 154;
3283 			/* FIXME extra to set for 1200 */
3284 			break;
3285 		default:
3286 			return 0;
3287 		}
3288 		break;
3289 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3290 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3291 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3292 			deemph_reg_value = 85;
3293 			margin_reg_value = 78;
3294 			break;
3295 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3296 			deemph_reg_value = 85;
3297 			margin_reg_value = 116;
3298 			break;
3299 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3300 			deemph_reg_value = 85;
3301 			margin_reg_value = 154;
3302 			break;
3303 		default:
3304 			return 0;
3305 		}
3306 		break;
3307 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3308 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3309 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3310 			deemph_reg_value = 64;
3311 			margin_reg_value = 104;
3312 			break;
3313 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3314 			deemph_reg_value = 64;
3315 			margin_reg_value = 154;
3316 			break;
3317 		default:
3318 			return 0;
3319 		}
3320 		break;
3321 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3322 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3323 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3324 			deemph_reg_value = 43;
3325 			margin_reg_value = 154;
3326 			break;
3327 		default:
3328 			return 0;
3329 		}
3330 		break;
3331 	default:
3332 		return 0;
3333 	}
3334 
3335 	mutex_lock(&dev_priv->sb_lock);
3336 
3337 	/* Clear calc init */
3338 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3339 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3340 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3341 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3342 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3343 
3344 	if (intel_crtc->config->lane_count > 2) {
3345 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3346 		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3347 		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3348 		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3349 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3350 	}
3351 
3352 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3353 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3354 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3355 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3356 
3357 	if (intel_crtc->config->lane_count > 2) {
3358 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3359 		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3360 		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3361 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3362 	}
3363 
3364 	/* Program swing deemph */
3365 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3366 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3367 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3368 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3369 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3370 	}
3371 
3372 	/* Program swing margin */
3373 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3374 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3375 
3376 		val &= ~DPIO_SWING_MARGIN000_MASK;
3377 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3378 
3379 		/*
3380 		 * Supposedly this value shouldn't matter when unique transition
3381 		 * scale is disabled, but in fact it does matter. Let's just
3382 		 * always program the same value and hope it's OK.
3383 		 */
3384 		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3385 		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3386 
3387 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3388 	}
3389 
3390 	/*
3391 	 * The document said it needs to set bit 27 for ch0 and bit 26
3392 	 * for ch1. Might be a typo in the doc.
3393 	 * For now, for this unique transition scale selection, set bit
3394 	 * 27 for ch0 and ch1.
3395 	 */
3396 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3397 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3398 		if (chv_need_uniq_trans_scale(train_set))
3399 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3400 		else
3401 			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3402 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3403 	}
3404 
3405 	/* Start swing calculation */
3406 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3407 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3408 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3409 
3410 	if (intel_crtc->config->lane_count > 2) {
3411 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3412 		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3413 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3414 	}
3415 
3416 	mutex_unlock(&dev_priv->sb_lock);
3417 
3418 	return 0;
3419 }
3420 
3421 static void
intel_get_adjust_train(struct intel_dp * intel_dp,const uint8_t link_status[DP_LINK_STATUS_SIZE])3422 intel_get_adjust_train(struct intel_dp *intel_dp,
3423 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
3424 {
3425 	uint8_t v = 0;
3426 	uint8_t p = 0;
3427 	int lane;
3428 	uint8_t voltage_max;
3429 	uint8_t preemph_max;
3430 
3431 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3432 		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3433 		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3434 
3435 		if (this_v > v)
3436 			v = this_v;
3437 		if (this_p > p)
3438 			p = this_p;
3439 	}
3440 
3441 	voltage_max = intel_dp_voltage_max(intel_dp);
3442 	if (v >= voltage_max)
3443 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3444 
3445 	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3446 	if (p >= preemph_max)
3447 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3448 
3449 	for (lane = 0; lane < 4; lane++)
3450 		intel_dp->train_set[lane] = v | p;
3451 }
3452 
3453 static uint32_t
gen4_signal_levels(uint8_t train_set)3454 gen4_signal_levels(uint8_t train_set)
3455 {
3456 	uint32_t	signal_levels = 0;
3457 
3458 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3459 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3460 	default:
3461 		signal_levels |= DP_VOLTAGE_0_4;
3462 		break;
3463 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3464 		signal_levels |= DP_VOLTAGE_0_6;
3465 		break;
3466 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3467 		signal_levels |= DP_VOLTAGE_0_8;
3468 		break;
3469 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3470 		signal_levels |= DP_VOLTAGE_1_2;
3471 		break;
3472 	}
3473 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3474 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3475 	default:
3476 		signal_levels |= DP_PRE_EMPHASIS_0;
3477 		break;
3478 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3479 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3480 		break;
3481 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3482 		signal_levels |= DP_PRE_EMPHASIS_6;
3483 		break;
3484 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3485 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3486 		break;
3487 	}
3488 	return signal_levels;
3489 }
3490 
3491 /* Gen6's DP voltage swing and pre-emphasis control */
3492 static uint32_t
gen6_edp_signal_levels(uint8_t train_set)3493 gen6_edp_signal_levels(uint8_t train_set)
3494 {
3495 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3496 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3497 	switch (signal_levels) {
3498 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3499 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3500 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3501 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3502 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3503 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3504 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3505 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3506 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3507 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3508 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3509 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3510 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3511 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3512 	default:
3513 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3514 			      "0x%x\n", signal_levels);
3515 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3516 	}
3517 }
3518 
3519 /* Gen7's DP voltage swing and pre-emphasis control */
3520 static uint32_t
gen7_edp_signal_levels(uint8_t train_set)3521 gen7_edp_signal_levels(uint8_t train_set)
3522 {
3523 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3524 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3525 	switch (signal_levels) {
3526 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3527 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3528 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3529 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3530 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3531 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3532 
3533 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3534 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3535 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3536 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3537 
3538 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3539 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3540 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3541 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3542 
3543 	default:
3544 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3545 			      "0x%x\n", signal_levels);
3546 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3547 	}
3548 }
3549 
3550 /* Properly updates "DP" with the correct signal levels. */
3551 static void
intel_dp_set_signal_levels(struct intel_dp * intel_dp,uint32_t * DP)3552 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3553 {
3554 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3555 	enum port port = intel_dig_port->port;
3556 	struct drm_device *dev = intel_dig_port->base.base.dev;
3557 	uint32_t signal_levels, mask = 0;
3558 	uint8_t train_set = intel_dp->train_set[0];
3559 
3560 	if (HAS_DDI(dev)) {
3561 		signal_levels = ddi_signal_levels(intel_dp);
3562 
3563 		if (IS_BROXTON(dev))
3564 			signal_levels = 0;
3565 		else
3566 			mask = DDI_BUF_EMP_MASK;
3567 	} else if (IS_CHERRYVIEW(dev)) {
3568 		signal_levels = chv_signal_levels(intel_dp);
3569 	} else if (IS_VALLEYVIEW(dev)) {
3570 		signal_levels = vlv_signal_levels(intel_dp);
3571 	} else if (IS_GEN7(dev) && port == PORT_A) {
3572 		signal_levels = gen7_edp_signal_levels(train_set);
3573 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3574 	} else if (IS_GEN6(dev) && port == PORT_A) {
3575 		signal_levels = gen6_edp_signal_levels(train_set);
3576 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3577 	} else {
3578 		signal_levels = gen4_signal_levels(train_set);
3579 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3580 	}
3581 
3582 	if (mask)
3583 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3584 
3585 	DRM_DEBUG_KMS("Using vswing level %d\n",
3586 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3587 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3588 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3589 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3590 
3591 	*DP = (*DP & ~mask) | signal_levels;
3592 }
3593 
3594 static bool
intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)3595 intel_dp_set_link_train(struct intel_dp *intel_dp,
3596 			uint32_t *DP,
3597 			uint8_t dp_train_pat)
3598 {
3599 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3600 	struct drm_i915_private *dev_priv =
3601 		to_i915(intel_dig_port->base.base.dev);
3602 	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3603 	int ret, len;
3604 
3605 	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3606 
3607 	I915_WRITE(intel_dp->output_reg, *DP);
3608 	POSTING_READ(intel_dp->output_reg);
3609 
3610 	buf[0] = dp_train_pat;
3611 	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3612 	    DP_TRAINING_PATTERN_DISABLE) {
3613 		/* don't write DP_TRAINING_LANEx_SET on disable */
3614 		len = 1;
3615 	} else {
3616 		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3617 		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3618 		len = intel_dp->lane_count + 1;
3619 	}
3620 
3621 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3622 				buf, len);
3623 
3624 	return ret == len;
3625 }
3626 
3627 static bool
intel_dp_reset_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)3628 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3629 			uint8_t dp_train_pat)
3630 {
3631 	if (!intel_dp->train_set_valid)
3632 		memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3633 	intel_dp_set_signal_levels(intel_dp, DP);
3634 	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3635 }
3636 
3637 static bool
intel_dp_update_link_train(struct intel_dp * intel_dp,uint32_t * DP,const uint8_t link_status[DP_LINK_STATUS_SIZE])3638 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3639 			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3640 {
3641 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3642 	struct drm_i915_private *dev_priv =
3643 		to_i915(intel_dig_port->base.base.dev);
3644 	int ret;
3645 
3646 	intel_get_adjust_train(intel_dp, link_status);
3647 	intel_dp_set_signal_levels(intel_dp, DP);
3648 
3649 	I915_WRITE(intel_dp->output_reg, *DP);
3650 	POSTING_READ(intel_dp->output_reg);
3651 
3652 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3653 				intel_dp->train_set, intel_dp->lane_count);
3654 
3655 	return ret == intel_dp->lane_count;
3656 }
3657 
intel_dp_set_idle_link_train(struct intel_dp * intel_dp)3658 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3659 {
3660 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3661 	struct drm_device *dev = intel_dig_port->base.base.dev;
3662 	struct drm_i915_private *dev_priv = dev->dev_private;
3663 	enum port port = intel_dig_port->port;
3664 	uint32_t val;
3665 
3666 	if (!HAS_DDI(dev))
3667 		return;
3668 
3669 	val = I915_READ(DP_TP_CTL(port));
3670 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3671 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3672 	I915_WRITE(DP_TP_CTL(port), val);
3673 
3674 	/*
3675 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3676 	 * we need to set idle transmission mode is to work around a HW issue
3677 	 * where we enable the pipe while not in idle link-training mode.
3678 	 * In this case there is requirement to wait for a minimum number of
3679 	 * idle patterns to be sent.
3680 	 */
3681 	if (port == PORT_A)
3682 		return;
3683 
3684 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3685 		     1))
3686 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3687 }
3688 
3689 /* Enable corresponding port and start training pattern 1 */
3690 static void
intel_dp_link_training_clock_recovery(struct intel_dp * intel_dp)3691 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3692 {
3693 	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3694 	struct drm_device *dev = encoder->dev;
3695 	int i;
3696 	uint8_t voltage;
3697 	int voltage_tries, loop_tries;
3698 	uint32_t DP = intel_dp->DP;
3699 	uint8_t link_config[2];
3700 	uint8_t link_bw, rate_select;
3701 
3702 	if (HAS_DDI(dev))
3703 		intel_ddi_prepare_link_retrain(encoder);
3704 
3705 	intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3706 			      &link_bw, &rate_select);
3707 
3708 	/* Write the link configuration data */
3709 	link_config[0] = link_bw;
3710 	link_config[1] = intel_dp->lane_count;
3711 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3712 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3713 	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3714 	if (intel_dp->num_sink_rates)
3715 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3716 				  &rate_select, 1);
3717 
3718 	link_config[0] = 0;
3719 	link_config[1] = DP_SET_ANSI_8B10B;
3720 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3721 
3722 	DP |= DP_PORT_EN;
3723 
3724 	/* clock recovery */
3725 	if (!intel_dp_reset_link_train(intel_dp, &DP,
3726 				       DP_TRAINING_PATTERN_1 |
3727 				       DP_LINK_SCRAMBLING_DISABLE)) {
3728 		DRM_ERROR("failed to enable link training\n");
3729 		return;
3730 	}
3731 
3732 	voltage = 0xff;
3733 	voltage_tries = 0;
3734 	loop_tries = 0;
3735 	for (;;) {
3736 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3737 
3738 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3739 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3740 			DRM_ERROR("failed to get link status\n");
3741 			break;
3742 		}
3743 
3744 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3745 			DRM_DEBUG_KMS("clock recovery OK\n");
3746 			break;
3747 		}
3748 
3749 		/*
3750 		 * if we used previously trained voltage and pre-emphasis values
3751 		 * and we don't get clock recovery, reset link training values
3752 		 */
3753 		if (intel_dp->train_set_valid) {
3754 			DRM_DEBUG_KMS("clock recovery not ok, reset");
3755 			/* clear the flag as we are not reusing train set */
3756 			intel_dp->train_set_valid = false;
3757 			if (!intel_dp_reset_link_train(intel_dp, &DP,
3758 						       DP_TRAINING_PATTERN_1 |
3759 						       DP_LINK_SCRAMBLING_DISABLE)) {
3760 				DRM_ERROR("failed to enable link training\n");
3761 				return;
3762 			}
3763 			continue;
3764 		}
3765 
3766 		/* Check to see if we've tried the max voltage */
3767 		for (i = 0; i < intel_dp->lane_count; i++)
3768 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3769 				break;
3770 		if (i == intel_dp->lane_count) {
3771 			++loop_tries;
3772 			if (loop_tries == 5) {
3773 				DRM_ERROR("too many full retries, give up\n");
3774 				break;
3775 			}
3776 			intel_dp_reset_link_train(intel_dp, &DP,
3777 						  DP_TRAINING_PATTERN_1 |
3778 						  DP_LINK_SCRAMBLING_DISABLE);
3779 			voltage_tries = 0;
3780 			continue;
3781 		}
3782 
3783 		/* Check to see if we've tried the same voltage 5 times */
3784 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3785 			++voltage_tries;
3786 			if (voltage_tries == 5) {
3787 				DRM_ERROR("too many voltage retries, give up\n");
3788 				break;
3789 			}
3790 		} else
3791 			voltage_tries = 0;
3792 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3793 
3794 		/* Update training set as requested by target */
3795 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3796 			DRM_ERROR("failed to update link training\n");
3797 			break;
3798 		}
3799 	}
3800 
3801 	intel_dp->DP = DP;
3802 }
3803 
3804 static void
intel_dp_link_training_channel_equalization(struct intel_dp * intel_dp)3805 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3806 {
3807 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3808 	struct drm_device *dev = dig_port->base.base.dev;
3809 	bool channel_eq = false;
3810 	int tries, cr_tries;
3811 	uint32_t DP = intel_dp->DP;
3812 	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3813 
3814 	/*
3815 	 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3816 	 *
3817 	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3818 	 * also mandatory for downstream devices that support HBR2.
3819 	 *
3820 	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3821 	 * supported but still not enabled.
3822 	 */
3823 	if (intel_dp_source_supports_hbr2(dev) &&
3824 	    drm_dp_tps3_supported(intel_dp->dpcd))
3825 		training_pattern = DP_TRAINING_PATTERN_3;
3826 	else if (intel_dp->link_rate == 540000)
3827 		DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3828 
3829 	/* channel equalization */
3830 	if (!intel_dp_set_link_train(intel_dp, &DP,
3831 				     training_pattern |
3832 				     DP_LINK_SCRAMBLING_DISABLE)) {
3833 		DRM_ERROR("failed to start channel equalization\n");
3834 		return;
3835 	}
3836 
3837 	tries = 0;
3838 	cr_tries = 0;
3839 	channel_eq = false;
3840 	for (;;) {
3841 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3842 
3843 		if (cr_tries > 5) {
3844 			DRM_ERROR("failed to train DP, aborting\n");
3845 			break;
3846 		}
3847 
3848 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3849 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3850 			DRM_ERROR("failed to get link status\n");
3851 			break;
3852 		}
3853 
3854 		/* Make sure clock is still ok */
3855 		if (!drm_dp_clock_recovery_ok(link_status,
3856 					      intel_dp->lane_count)) {
3857 			intel_dp->train_set_valid = false;
3858 			intel_dp_link_training_clock_recovery(intel_dp);
3859 			intel_dp_set_link_train(intel_dp, &DP,
3860 						training_pattern |
3861 						DP_LINK_SCRAMBLING_DISABLE);
3862 			cr_tries++;
3863 			continue;
3864 		}
3865 
3866 		if (drm_dp_channel_eq_ok(link_status,
3867 					 intel_dp->lane_count)) {
3868 			channel_eq = true;
3869 			break;
3870 		}
3871 
3872 		/* Try 5 times, then try clock recovery if that fails */
3873 		if (tries > 5) {
3874 			intel_dp->train_set_valid = false;
3875 			intel_dp_link_training_clock_recovery(intel_dp);
3876 			intel_dp_set_link_train(intel_dp, &DP,
3877 						training_pattern |
3878 						DP_LINK_SCRAMBLING_DISABLE);
3879 			tries = 0;
3880 			cr_tries++;
3881 			continue;
3882 		}
3883 
3884 		/* Update training set as requested by target */
3885 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3886 			DRM_ERROR("failed to update link training\n");
3887 			break;
3888 		}
3889 		++tries;
3890 	}
3891 
3892 	intel_dp_set_idle_link_train(intel_dp);
3893 
3894 	intel_dp->DP = DP;
3895 
3896 	if (channel_eq) {
3897 		intel_dp->train_set_valid = true;
3898 		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3899 	}
3900 }
3901 
intel_dp_stop_link_train(struct intel_dp * intel_dp)3902 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3903 {
3904 	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3905 				DP_TRAINING_PATTERN_DISABLE);
3906 }
3907 
3908 void
intel_dp_start_link_train(struct intel_dp * intel_dp)3909 intel_dp_start_link_train(struct intel_dp *intel_dp)
3910 {
3911 	intel_dp_link_training_clock_recovery(intel_dp);
3912 	intel_dp_link_training_channel_equalization(intel_dp);
3913 }
3914 
3915 static void
intel_dp_link_down(struct intel_dp * intel_dp)3916 intel_dp_link_down(struct intel_dp *intel_dp)
3917 {
3918 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3919 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3920 	enum port port = intel_dig_port->port;
3921 	struct drm_device *dev = intel_dig_port->base.base.dev;
3922 	struct drm_i915_private *dev_priv = dev->dev_private;
3923 	uint32_t DP = intel_dp->DP;
3924 
3925 	if (WARN_ON(HAS_DDI(dev)))
3926 		return;
3927 
3928 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3929 		return;
3930 
3931 	DRM_DEBUG_KMS("\n");
3932 
3933 	if ((IS_GEN7(dev) && port == PORT_A) ||
3934 	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
3935 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3936 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3937 	} else {
3938 		if (IS_CHERRYVIEW(dev))
3939 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3940 		else
3941 			DP &= ~DP_LINK_TRAIN_MASK;
3942 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3943 	}
3944 	I915_WRITE(intel_dp->output_reg, DP);
3945 	POSTING_READ(intel_dp->output_reg);
3946 
3947 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3948 	I915_WRITE(intel_dp->output_reg, DP);
3949 	POSTING_READ(intel_dp->output_reg);
3950 
3951 	/*
3952 	 * HW workaround for IBX, we need to move the port
3953 	 * to transcoder A after disabling it to allow the
3954 	 * matching HDMI port to be enabled on transcoder A.
3955 	 */
3956 	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3957 		/* always enable with pattern 1 (as per spec) */
3958 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3959 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3960 		I915_WRITE(intel_dp->output_reg, DP);
3961 		POSTING_READ(intel_dp->output_reg);
3962 
3963 		DP &= ~DP_PORT_EN;
3964 		I915_WRITE(intel_dp->output_reg, DP);
3965 		POSTING_READ(intel_dp->output_reg);
3966 	}
3967 
3968 	msleep(intel_dp->panel_power_down_delay);
3969 }
3970 
3971 static bool
intel_dp_get_dpcd(struct intel_dp * intel_dp)3972 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3973 {
3974 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3975 	struct drm_device *dev = dig_port->base.base.dev;
3976 	struct drm_i915_private *dev_priv = dev->dev_private;
3977 	uint8_t rev;
3978 
3979 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3980 				    sizeof(intel_dp->dpcd)) < 0)
3981 		return false; /* aux transfer failed */
3982 
3983 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3984 
3985 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3986 		return false; /* DPCD not present */
3987 
3988 	/* Check if the panel supports PSR */
3989 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3990 	if (is_edp(intel_dp)) {
3991 		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3992 					intel_dp->psr_dpcd,
3993 					sizeof(intel_dp->psr_dpcd));
3994 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3995 			dev_priv->psr.sink_support = true;
3996 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3997 		}
3998 
3999 		if (INTEL_INFO(dev)->gen >= 9 &&
4000 			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4001 			uint8_t frame_sync_cap;
4002 
4003 			dev_priv->psr.sink_support = true;
4004 			intel_dp_dpcd_read_wake(&intel_dp->aux,
4005 					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4006 					&frame_sync_cap, 1);
4007 			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4008 			/* PSR2 needs frame sync as well */
4009 			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4010 			DRM_DEBUG_KMS("PSR2 %s on sink",
4011 				dev_priv->psr.psr2_support ? "supported" : "not supported");
4012 		}
4013 	}
4014 
4015 	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4016 		      yesno(intel_dp_source_supports_hbr2(dev)),
4017 		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4018 
4019 	/* Intermediate frequency support */
4020 	if (is_edp(intel_dp) &&
4021 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4022 	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4023 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
4024 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4025 		int i;
4026 
4027 		intel_dp_dpcd_read_wake(&intel_dp->aux,
4028 				DP_SUPPORTED_LINK_RATES,
4029 				sink_rates,
4030 				sizeof(sink_rates));
4031 
4032 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4033 			int val = le16_to_cpu(sink_rates[i]);
4034 
4035 			if (val == 0)
4036 				break;
4037 
4038 			/* Value read is in kHz while drm clock is saved in deca-kHz */
4039 			intel_dp->sink_rates[i] = (val * 200) / 10;
4040 		}
4041 		intel_dp->num_sink_rates = i;
4042 	}
4043 
4044 	intel_dp_print_rates(intel_dp);
4045 
4046 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4047 	      DP_DWN_STRM_PORT_PRESENT))
4048 		return true; /* native DP sink */
4049 
4050 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4051 		return true; /* no per-port downstream info */
4052 
4053 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4054 				    intel_dp->downstream_ports,
4055 				    DP_MAX_DOWNSTREAM_PORTS) < 0)
4056 		return false; /* downstream port status fetch failed */
4057 
4058 	return true;
4059 }
4060 
4061 static void
intel_dp_probe_oui(struct intel_dp * intel_dp)4062 intel_dp_probe_oui(struct intel_dp *intel_dp)
4063 {
4064 	u8 buf[3];
4065 
4066 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4067 		return;
4068 
4069 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4070 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4071 			      buf[0], buf[1], buf[2]);
4072 
4073 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4074 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4075 			      buf[0], buf[1], buf[2]);
4076 }
4077 
4078 static bool
intel_dp_probe_mst(struct intel_dp * intel_dp)4079 intel_dp_probe_mst(struct intel_dp *intel_dp)
4080 {
4081 	u8 buf[1];
4082 
4083 	if (!intel_dp->can_mst)
4084 		return false;
4085 
4086 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4087 		return false;
4088 
4089 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4090 		if (buf[0] & DP_MST_CAP) {
4091 			DRM_DEBUG_KMS("Sink is MST capable\n");
4092 			intel_dp->is_mst = true;
4093 		} else {
4094 			DRM_DEBUG_KMS("Sink is not MST capable\n");
4095 			intel_dp->is_mst = false;
4096 		}
4097 	}
4098 
4099 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4100 	return intel_dp->is_mst;
4101 }
4102 
intel_dp_sink_crc_stop(struct intel_dp * intel_dp)4103 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4104 {
4105 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4106 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4107 	u8 buf;
4108 	int ret = 0;
4109 
4110 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4111 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4112 		ret = -EIO;
4113 		goto out;
4114 	}
4115 
4116 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4117 			       buf & ~DP_TEST_SINK_START) < 0) {
4118 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4119 		ret = -EIO;
4120 		goto out;
4121 	}
4122 
4123 	intel_dp->sink_crc.started = false;
4124  out:
4125 	hsw_enable_ips(intel_crtc);
4126 	return ret;
4127 }
4128 
intel_dp_sink_crc_start(struct intel_dp * intel_dp)4129 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4130 {
4131 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4132 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4133 	u8 buf;
4134 	int ret;
4135 
4136 	if (intel_dp->sink_crc.started) {
4137 		ret = intel_dp_sink_crc_stop(intel_dp);
4138 		if (ret)
4139 			return ret;
4140 	}
4141 
4142 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4143 		return -EIO;
4144 
4145 	if (!(buf & DP_TEST_CRC_SUPPORTED))
4146 		return -ENOTTY;
4147 
4148 	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4149 
4150 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4151 		return -EIO;
4152 
4153 	hsw_disable_ips(intel_crtc);
4154 
4155 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4156 			       buf | DP_TEST_SINK_START) < 0) {
4157 		hsw_enable_ips(intel_crtc);
4158 		return -EIO;
4159 	}
4160 
4161 	intel_dp->sink_crc.started = true;
4162 	return 0;
4163 }
4164 
intel_dp_sink_crc(struct intel_dp * intel_dp,u8 * crc)4165 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4166 {
4167 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4168 	struct drm_device *dev = dig_port->base.base.dev;
4169 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4170 	u8 buf;
4171 	int count, ret;
4172 	int attempts = 6;
4173 	bool old_equal_new;
4174 
4175 	ret = intel_dp_sink_crc_start(intel_dp);
4176 	if (ret)
4177 		return ret;
4178 
4179 	do {
4180 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4181 
4182 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4183 				      DP_TEST_SINK_MISC, &buf) < 0) {
4184 			ret = -EIO;
4185 			goto stop;
4186 		}
4187 		count = buf & DP_TEST_COUNT_MASK;
4188 
4189 		/*
4190 		 * Count might be reset during the loop. In this case
4191 		 * last known count needs to be reset as well.
4192 		 */
4193 		if (count == 0)
4194 			intel_dp->sink_crc.last_count = 0;
4195 
4196 		if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4197 			ret = -EIO;
4198 			goto stop;
4199 		}
4200 
4201 		old_equal_new = (count == intel_dp->sink_crc.last_count &&
4202 				 !memcmp(intel_dp->sink_crc.last_crc, crc,
4203 					 6 * sizeof(u8)));
4204 
4205 	} while (--attempts && (count == 0 || old_equal_new));
4206 
4207 	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4208 	memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4209 
4210 	if (attempts == 0) {
4211 		if (old_equal_new) {
4212 			DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4213 		} else {
4214 			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4215 			ret = -ETIMEDOUT;
4216 			goto stop;
4217 		}
4218 	}
4219 
4220 stop:
4221 	intel_dp_sink_crc_stop(intel_dp);
4222 	return ret;
4223 }
4224 
4225 static bool
intel_dp_get_sink_irq(struct intel_dp * intel_dp,u8 * sink_irq_vector)4226 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4227 {
4228 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
4229 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4230 				       sink_irq_vector, 1) == 1;
4231 }
4232 
4233 static bool
intel_dp_get_sink_irq_esi(struct intel_dp * intel_dp,u8 * sink_irq_vector)4234 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4235 {
4236 	int ret;
4237 
4238 	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4239 					     DP_SINK_COUNT_ESI,
4240 					     sink_irq_vector, 14);
4241 	if (ret != 14)
4242 		return false;
4243 
4244 	return true;
4245 }
4246 
intel_dp_autotest_link_training(struct intel_dp * intel_dp)4247 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4248 {
4249 	uint8_t test_result = DP_TEST_ACK;
4250 	return test_result;
4251 }
4252 
intel_dp_autotest_video_pattern(struct intel_dp * intel_dp)4253 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4254 {
4255 	uint8_t test_result = DP_TEST_NAK;
4256 	return test_result;
4257 }
4258 
intel_dp_autotest_edid(struct intel_dp * intel_dp)4259 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4260 {
4261 	uint8_t test_result = DP_TEST_NAK;
4262 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4263 	struct drm_connector *connector = &intel_connector->base;
4264 
4265 	if (intel_connector->detect_edid == NULL ||
4266 	    connector->edid_corrupt ||
4267 	    intel_dp->aux.i2c_defer_count > 6) {
4268 		/* Check EDID read for NACKs, DEFERs and corruption
4269 		 * (DP CTS 1.2 Core r1.1)
4270 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4271 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4272 		 *    4.2.2.6 : EDID corruption detected
4273 		 * Use failsafe mode for all cases
4274 		 */
4275 		if (intel_dp->aux.i2c_nack_count > 0 ||
4276 			intel_dp->aux.i2c_defer_count > 0)
4277 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4278 				      intel_dp->aux.i2c_nack_count,
4279 				      intel_dp->aux.i2c_defer_count);
4280 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4281 	} else {
4282 		struct edid *block = intel_connector->detect_edid;
4283 
4284 		/* We have to write the checksum
4285 		 * of the last block read
4286 		 */
4287 		block += intel_connector->detect_edid->extensions;
4288 
4289 		if (!drm_dp_dpcd_write(&intel_dp->aux,
4290 					DP_TEST_EDID_CHECKSUM,
4291 					&block->checksum,
4292 					1))
4293 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4294 
4295 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4296 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4297 	}
4298 
4299 	/* Set test active flag here so userspace doesn't interrupt things */
4300 	intel_dp->compliance_test_active = 1;
4301 
4302 	return test_result;
4303 }
4304 
intel_dp_autotest_phy_pattern(struct intel_dp * intel_dp)4305 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4306 {
4307 	uint8_t test_result = DP_TEST_NAK;
4308 	return test_result;
4309 }
4310 
intel_dp_handle_test_request(struct intel_dp * intel_dp)4311 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4312 {
4313 	uint8_t response = DP_TEST_NAK;
4314 	uint8_t rxdata = 0;
4315 	int status = 0;
4316 
4317 	intel_dp->compliance_test_active = 0;
4318 	intel_dp->compliance_test_type = 0;
4319 	intel_dp->compliance_test_data = 0;
4320 
4321 	intel_dp->aux.i2c_nack_count = 0;
4322 	intel_dp->aux.i2c_defer_count = 0;
4323 
4324 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4325 	if (status <= 0) {
4326 		DRM_DEBUG_KMS("Could not read test request from sink\n");
4327 		goto update_status;
4328 	}
4329 
4330 	switch (rxdata) {
4331 	case DP_TEST_LINK_TRAINING:
4332 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4333 		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4334 		response = intel_dp_autotest_link_training(intel_dp);
4335 		break;
4336 	case DP_TEST_LINK_VIDEO_PATTERN:
4337 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4338 		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4339 		response = intel_dp_autotest_video_pattern(intel_dp);
4340 		break;
4341 	case DP_TEST_LINK_EDID_READ:
4342 		DRM_DEBUG_KMS("EDID test requested\n");
4343 		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4344 		response = intel_dp_autotest_edid(intel_dp);
4345 		break;
4346 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4347 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4348 		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4349 		response = intel_dp_autotest_phy_pattern(intel_dp);
4350 		break;
4351 	default:
4352 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4353 		break;
4354 	}
4355 
4356 update_status:
4357 	status = drm_dp_dpcd_write(&intel_dp->aux,
4358 				   DP_TEST_RESPONSE,
4359 				   &response, 1);
4360 	if (status <= 0)
4361 		DRM_DEBUG_KMS("Could not write test response to sink\n");
4362 }
4363 
4364 static int
intel_dp_check_mst_status(struct intel_dp * intel_dp)4365 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4366 {
4367 	bool bret;
4368 
4369 	if (intel_dp->is_mst) {
4370 		u8 esi[16] = { 0 };
4371 		int ret = 0;
4372 		int retry;
4373 		bool handled;
4374 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4375 go_again:
4376 		if (bret == true) {
4377 
4378 			/* check link status - esi[10] = 0x200c */
4379 			if (intel_dp->active_mst_links &&
4380 			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4381 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4382 				intel_dp_start_link_train(intel_dp);
4383 				intel_dp_stop_link_train(intel_dp);
4384 			}
4385 
4386 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4387 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4388 
4389 			if (handled) {
4390 				for (retry = 0; retry < 3; retry++) {
4391 					int wret;
4392 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4393 								 DP_SINK_COUNT_ESI+1,
4394 								 &esi[1], 3);
4395 					if (wret == 3) {
4396 						break;
4397 					}
4398 				}
4399 
4400 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4401 				if (bret == true) {
4402 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4403 					goto go_again;
4404 				}
4405 			} else
4406 				ret = 0;
4407 
4408 			return ret;
4409 		} else {
4410 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4411 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4412 			intel_dp->is_mst = false;
4413 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4414 			/* send a hotplug event */
4415 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4416 		}
4417 	}
4418 	return -EINVAL;
4419 }
4420 
4421 /*
4422  * According to DP spec
4423  * 5.1.2:
4424  *  1. Read DPCD
4425  *  2. Configure link according to Receiver Capabilities
4426  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4427  *  4. Check link status on receipt of hot-plug interrupt
4428  */
4429 static void
intel_dp_check_link_status(struct intel_dp * intel_dp)4430 intel_dp_check_link_status(struct intel_dp *intel_dp)
4431 {
4432 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4433 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4434 	u8 sink_irq_vector;
4435 	u8 link_status[DP_LINK_STATUS_SIZE];
4436 
4437 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4438 
4439 	if (!intel_encoder->base.crtc)
4440 		return;
4441 
4442 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4443 		return;
4444 
4445 	/* Try to read receiver status if the link appears to be up */
4446 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4447 		return;
4448 	}
4449 
4450 	/* Now read the DPCD to see if it's actually running */
4451 	if (!intel_dp_get_dpcd(intel_dp)) {
4452 		return;
4453 	}
4454 
4455 	/* Try to read the source of the interrupt */
4456 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4457 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4458 		/* Clear interrupt source */
4459 		drm_dp_dpcd_writeb(&intel_dp->aux,
4460 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4461 				   sink_irq_vector);
4462 
4463 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4464 			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4465 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4466 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4467 	}
4468 
4469 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4470 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4471 			      intel_encoder->base.name);
4472 		intel_dp_start_link_train(intel_dp);
4473 		intel_dp_stop_link_train(intel_dp);
4474 	}
4475 }
4476 
4477 /* XXX this is probably wrong for multiple downstream ports */
4478 static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp * intel_dp)4479 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4480 {
4481 	uint8_t *dpcd = intel_dp->dpcd;
4482 	uint8_t type;
4483 
4484 	if (!intel_dp_get_dpcd(intel_dp))
4485 		return connector_status_disconnected;
4486 
4487 	/* if there's no downstream port, we're done */
4488 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4489 		return connector_status_connected;
4490 
4491 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4492 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4493 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4494 		uint8_t reg;
4495 
4496 		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4497 					    &reg, 1) < 0)
4498 			return connector_status_unknown;
4499 
4500 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4501 					      : connector_status_disconnected;
4502 	}
4503 
4504 	/* If no HPD, poke DDC gently */
4505 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4506 		return connector_status_connected;
4507 
4508 	/* Well we tried, say unknown for unreliable port types */
4509 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4510 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4511 		if (type == DP_DS_PORT_TYPE_VGA ||
4512 		    type == DP_DS_PORT_TYPE_NON_EDID)
4513 			return connector_status_unknown;
4514 	} else {
4515 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4516 			DP_DWN_STRM_PORT_TYPE_MASK;
4517 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4518 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4519 			return connector_status_unknown;
4520 	}
4521 
4522 	/* Anything else is out of spec, warn and ignore */
4523 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4524 	return connector_status_disconnected;
4525 }
4526 
4527 static enum drm_connector_status
edp_detect(struct intel_dp * intel_dp)4528 edp_detect(struct intel_dp *intel_dp)
4529 {
4530 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4531 	enum drm_connector_status status;
4532 
4533 	status = intel_panel_detect(dev);
4534 	if (status == connector_status_unknown)
4535 		status = connector_status_connected;
4536 
4537 	return status;
4538 }
4539 
ibx_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4540 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4541 				       struct intel_digital_port *port)
4542 {
4543 	u32 bit;
4544 
4545 	switch (port->port) {
4546 	case PORT_A:
4547 		return true;
4548 	case PORT_B:
4549 		bit = SDE_PORTB_HOTPLUG;
4550 		break;
4551 	case PORT_C:
4552 		bit = SDE_PORTC_HOTPLUG;
4553 		break;
4554 	case PORT_D:
4555 		bit = SDE_PORTD_HOTPLUG;
4556 		break;
4557 	default:
4558 		MISSING_CASE(port->port);
4559 		return false;
4560 	}
4561 
4562 	return I915_READ(SDEISR) & bit;
4563 }
4564 
cpt_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4565 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4566 				       struct intel_digital_port *port)
4567 {
4568 	u32 bit;
4569 
4570 	switch (port->port) {
4571 	case PORT_A:
4572 		return true;
4573 	case PORT_B:
4574 		bit = SDE_PORTB_HOTPLUG_CPT;
4575 		break;
4576 	case PORT_C:
4577 		bit = SDE_PORTC_HOTPLUG_CPT;
4578 		break;
4579 	case PORT_D:
4580 		bit = SDE_PORTD_HOTPLUG_CPT;
4581 		break;
4582 	case PORT_E:
4583 		bit = SDE_PORTE_HOTPLUG_SPT;
4584 		break;
4585 	default:
4586 		MISSING_CASE(port->port);
4587 		return false;
4588 	}
4589 
4590 	return I915_READ(SDEISR) & bit;
4591 }
4592 
g4x_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4593 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4594 				       struct intel_digital_port *port)
4595 {
4596 	u32 bit;
4597 
4598 	switch (port->port) {
4599 	case PORT_B:
4600 		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4601 		break;
4602 	case PORT_C:
4603 		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4604 		break;
4605 	case PORT_D:
4606 		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4607 		break;
4608 	default:
4609 		MISSING_CASE(port->port);
4610 		return false;
4611 	}
4612 
4613 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4614 }
4615 
vlv_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4616 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4617 				       struct intel_digital_port *port)
4618 {
4619 	u32 bit;
4620 
4621 	switch (port->port) {
4622 	case PORT_B:
4623 		bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4624 		break;
4625 	case PORT_C:
4626 		bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4627 		break;
4628 	case PORT_D:
4629 		bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4630 		break;
4631 	default:
4632 		MISSING_CASE(port->port);
4633 		return false;
4634 	}
4635 
4636 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4637 }
4638 
bxt_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * intel_dig_port)4639 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4640 				       struct intel_digital_port *intel_dig_port)
4641 {
4642 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4643 	enum port port;
4644 	u32 bit;
4645 
4646 	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4647 	switch (port) {
4648 	case PORT_A:
4649 		bit = BXT_DE_PORT_HP_DDIA;
4650 		break;
4651 	case PORT_B:
4652 		bit = BXT_DE_PORT_HP_DDIB;
4653 		break;
4654 	case PORT_C:
4655 		bit = BXT_DE_PORT_HP_DDIC;
4656 		break;
4657 	default:
4658 		MISSING_CASE(port);
4659 		return false;
4660 	}
4661 
4662 	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4663 }
4664 
4665 /*
4666  * intel_digital_port_connected - is the specified port connected?
4667  * @dev_priv: i915 private structure
4668  * @port: the port to test
4669  *
4670  * Return %true if @port is connected, %false otherwise.
4671  */
intel_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4672 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4673 					 struct intel_digital_port *port)
4674 {
4675 	if (HAS_PCH_IBX(dev_priv))
4676 		return ibx_digital_port_connected(dev_priv, port);
4677 	if (HAS_PCH_SPLIT(dev_priv))
4678 		return cpt_digital_port_connected(dev_priv, port);
4679 	else if (IS_BROXTON(dev_priv))
4680 		return bxt_digital_port_connected(dev_priv, port);
4681 	else if (IS_VALLEYVIEW(dev_priv))
4682 		return vlv_digital_port_connected(dev_priv, port);
4683 	else
4684 		return g4x_digital_port_connected(dev_priv, port);
4685 }
4686 
4687 static enum drm_connector_status
ironlake_dp_detect(struct intel_dp * intel_dp)4688 ironlake_dp_detect(struct intel_dp *intel_dp)
4689 {
4690 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4691 	struct drm_i915_private *dev_priv = dev->dev_private;
4692 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4693 
4694 	if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4695 		return connector_status_disconnected;
4696 
4697 	return intel_dp_detect_dpcd(intel_dp);
4698 }
4699 
4700 static enum drm_connector_status
g4x_dp_detect(struct intel_dp * intel_dp)4701 g4x_dp_detect(struct intel_dp *intel_dp)
4702 {
4703 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4704 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4705 
4706 	/* Can't disconnect eDP, but you can close the lid... */
4707 	if (is_edp(intel_dp)) {
4708 		enum drm_connector_status status;
4709 
4710 		status = intel_panel_detect(dev);
4711 		if (status == connector_status_unknown)
4712 			status = connector_status_connected;
4713 		return status;
4714 	}
4715 
4716 	if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4717 		return connector_status_disconnected;
4718 
4719 	return intel_dp_detect_dpcd(intel_dp);
4720 }
4721 
4722 static struct edid *
intel_dp_get_edid(struct intel_dp * intel_dp)4723 intel_dp_get_edid(struct intel_dp *intel_dp)
4724 {
4725 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4726 
4727 	/* use cached edid if we have one */
4728 	if (intel_connector->edid) {
4729 		/* invalid edid */
4730 		if (IS_ERR(intel_connector->edid))
4731 			return NULL;
4732 
4733 		return drm_edid_duplicate(intel_connector->edid);
4734 	} else
4735 		return drm_get_edid(&intel_connector->base,
4736 				    &intel_dp->aux.ddc);
4737 }
4738 
4739 static void
intel_dp_set_edid(struct intel_dp * intel_dp)4740 intel_dp_set_edid(struct intel_dp *intel_dp)
4741 {
4742 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4743 	struct edid *edid;
4744 
4745 	edid = intel_dp_get_edid(intel_dp);
4746 	intel_connector->detect_edid = edid;
4747 
4748 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4749 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4750 	else
4751 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4752 }
4753 
4754 static void
intel_dp_unset_edid(struct intel_dp * intel_dp)4755 intel_dp_unset_edid(struct intel_dp *intel_dp)
4756 {
4757 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4758 
4759 	kfree(intel_connector->detect_edid);
4760 	intel_connector->detect_edid = NULL;
4761 
4762 	intel_dp->has_audio = false;
4763 }
4764 
4765 static enum drm_connector_status
intel_dp_detect(struct drm_connector * connector,bool force)4766 intel_dp_detect(struct drm_connector *connector, bool force)
4767 {
4768 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4769 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4770 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4771 	struct drm_device *dev = connector->dev;
4772 	enum drm_connector_status status;
4773 	enum intel_display_power_domain power_domain;
4774 	bool ret;
4775 	u8 sink_irq_vector;
4776 
4777 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4778 		      connector->base.id, connector->name);
4779 	intel_dp_unset_edid(intel_dp);
4780 
4781 	if (intel_dp->is_mst) {
4782 		/* MST devices are disconnected from a monitor POV */
4783 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4784 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4785 		return connector_status_disconnected;
4786 	}
4787 
4788 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4789 	intel_display_power_get(to_i915(dev), power_domain);
4790 
4791 	/* Can't disconnect eDP, but you can close the lid... */
4792 	if (is_edp(intel_dp))
4793 		status = edp_detect(intel_dp);
4794 	else if (HAS_PCH_SPLIT(dev))
4795 		status = ironlake_dp_detect(intel_dp);
4796 	else
4797 		status = g4x_dp_detect(intel_dp);
4798 	if (status != connector_status_connected)
4799 		goto out;
4800 
4801 	intel_dp_probe_oui(intel_dp);
4802 
4803 	ret = intel_dp_probe_mst(intel_dp);
4804 	if (ret) {
4805 		/* if we are in MST mode then this connector
4806 		   won't appear connected or have anything with EDID on it */
4807 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4808 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4809 		status = connector_status_disconnected;
4810 		goto out;
4811 	}
4812 
4813 	intel_dp_set_edid(intel_dp);
4814 
4815 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4816 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4817 	status = connector_status_connected;
4818 
4819 	/* Try to read the source of the interrupt */
4820 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4821 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4822 		/* Clear interrupt source */
4823 		drm_dp_dpcd_writeb(&intel_dp->aux,
4824 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4825 				   sink_irq_vector);
4826 
4827 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4828 			intel_dp_handle_test_request(intel_dp);
4829 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4830 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4831 	}
4832 
4833 out:
4834 	intel_display_power_put(to_i915(dev), power_domain);
4835 	return status;
4836 }
4837 
4838 static void
intel_dp_force(struct drm_connector * connector)4839 intel_dp_force(struct drm_connector *connector)
4840 {
4841 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4842 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4843 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4844 	enum intel_display_power_domain power_domain;
4845 
4846 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4847 		      connector->base.id, connector->name);
4848 	intel_dp_unset_edid(intel_dp);
4849 
4850 	if (connector->status != connector_status_connected)
4851 		return;
4852 
4853 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4854 	intel_display_power_get(dev_priv, power_domain);
4855 
4856 	intel_dp_set_edid(intel_dp);
4857 
4858 	intel_display_power_put(dev_priv, power_domain);
4859 
4860 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4861 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4862 }
4863 
intel_dp_get_modes(struct drm_connector * connector)4864 static int intel_dp_get_modes(struct drm_connector *connector)
4865 {
4866 	struct intel_connector *intel_connector = to_intel_connector(connector);
4867 	struct edid *edid;
4868 
4869 	edid = intel_connector->detect_edid;
4870 	if (edid) {
4871 		int ret = intel_connector_update_modes(connector, edid);
4872 		if (ret)
4873 			return ret;
4874 	}
4875 
4876 	/* if eDP has no EDID, fall back to fixed mode */
4877 	if (is_edp(intel_attached_dp(connector)) &&
4878 	    intel_connector->panel.fixed_mode) {
4879 		struct drm_display_mode *mode;
4880 
4881 		mode = drm_mode_duplicate(connector->dev,
4882 					  intel_connector->panel.fixed_mode);
4883 		if (mode) {
4884 			drm_mode_probed_add(connector, mode);
4885 			return 1;
4886 		}
4887 	}
4888 
4889 	return 0;
4890 }
4891 
4892 static bool
intel_dp_detect_audio(struct drm_connector * connector)4893 intel_dp_detect_audio(struct drm_connector *connector)
4894 {
4895 	bool has_audio = false;
4896 	struct edid *edid;
4897 
4898 	edid = to_intel_connector(connector)->detect_edid;
4899 	if (edid)
4900 		has_audio = drm_detect_monitor_audio(edid);
4901 
4902 	return has_audio;
4903 }
4904 
4905 static int
intel_dp_set_property(struct drm_connector * connector,struct drm_property * property,uint64_t val)4906 intel_dp_set_property(struct drm_connector *connector,
4907 		      struct drm_property *property,
4908 		      uint64_t val)
4909 {
4910 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4911 	struct intel_connector *intel_connector = to_intel_connector(connector);
4912 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4913 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4914 	int ret;
4915 
4916 	ret = drm_object_property_set_value(&connector->base, property, val);
4917 	if (ret)
4918 		return ret;
4919 
4920 	if (property == dev_priv->force_audio_property) {
4921 		int i = val;
4922 		bool has_audio;
4923 
4924 		if (i == intel_dp->force_audio)
4925 			return 0;
4926 
4927 		intel_dp->force_audio = i;
4928 
4929 		if (i == HDMI_AUDIO_AUTO)
4930 			has_audio = intel_dp_detect_audio(connector);
4931 		else
4932 			has_audio = (i == HDMI_AUDIO_ON);
4933 
4934 		if (has_audio == intel_dp->has_audio)
4935 			return 0;
4936 
4937 		intel_dp->has_audio = has_audio;
4938 		goto done;
4939 	}
4940 
4941 	if (property == dev_priv->broadcast_rgb_property) {
4942 		bool old_auto = intel_dp->color_range_auto;
4943 		bool old_range = intel_dp->limited_color_range;
4944 
4945 		switch (val) {
4946 		case INTEL_BROADCAST_RGB_AUTO:
4947 			intel_dp->color_range_auto = true;
4948 			break;
4949 		case INTEL_BROADCAST_RGB_FULL:
4950 			intel_dp->color_range_auto = false;
4951 			intel_dp->limited_color_range = false;
4952 			break;
4953 		case INTEL_BROADCAST_RGB_LIMITED:
4954 			intel_dp->color_range_auto = false;
4955 			intel_dp->limited_color_range = true;
4956 			break;
4957 		default:
4958 			return -EINVAL;
4959 		}
4960 
4961 		if (old_auto == intel_dp->color_range_auto &&
4962 		    old_range == intel_dp->limited_color_range)
4963 			return 0;
4964 
4965 		goto done;
4966 	}
4967 
4968 	if (is_edp(intel_dp) &&
4969 	    property == connector->dev->mode_config.scaling_mode_property) {
4970 		if (val == DRM_MODE_SCALE_NONE) {
4971 			DRM_DEBUG_KMS("no scaling not supported\n");
4972 			return -EINVAL;
4973 		}
4974 
4975 		if (intel_connector->panel.fitting_mode == val) {
4976 			/* the eDP scaling property is not changed */
4977 			return 0;
4978 		}
4979 		intel_connector->panel.fitting_mode = val;
4980 
4981 		goto done;
4982 	}
4983 
4984 	return -EINVAL;
4985 
4986 done:
4987 	if (intel_encoder->base.crtc)
4988 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4989 
4990 	return 0;
4991 }
4992 
4993 static void
intel_dp_connector_destroy(struct drm_connector * connector)4994 intel_dp_connector_destroy(struct drm_connector *connector)
4995 {
4996 	struct intel_connector *intel_connector = to_intel_connector(connector);
4997 
4998 	kfree(intel_connector->detect_edid);
4999 
5000 	if (!IS_ERR_OR_NULL(intel_connector->edid))
5001 		kfree(intel_connector->edid);
5002 
5003 	/* Can't call is_edp() since the encoder may have been destroyed
5004 	 * already. */
5005 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5006 		intel_panel_fini(&intel_connector->panel);
5007 
5008 	drm_connector_cleanup(connector);
5009 	kfree(connector);
5010 }
5011 
intel_dp_encoder_destroy(struct drm_encoder * encoder)5012 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5013 {
5014 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5015 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5016 
5017 	drm_dp_aux_unregister(&intel_dp->aux);
5018 	intel_dp_mst_encoder_cleanup(intel_dig_port);
5019 	if (is_edp(intel_dp)) {
5020 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5021 		/*
5022 		 * vdd might still be enabled do to the delayed vdd off.
5023 		 * Make sure vdd is actually turned off here.
5024 		 */
5025 		pps_lock(intel_dp);
5026 		edp_panel_vdd_off_sync(intel_dp);
5027 		pps_unlock(intel_dp);
5028 
5029 		if (intel_dp->edp_notifier.notifier_call) {
5030 			unregister_reboot_notifier(&intel_dp->edp_notifier);
5031 			intel_dp->edp_notifier.notifier_call = NULL;
5032 		}
5033 	}
5034 	drm_encoder_cleanup(encoder);
5035 	kfree(intel_dig_port);
5036 }
5037 
intel_dp_encoder_suspend(struct intel_encoder * intel_encoder)5038 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5039 {
5040 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5041 
5042 	if (!is_edp(intel_dp))
5043 		return;
5044 
5045 	/*
5046 	 * vdd might still be enabled do to the delayed vdd off.
5047 	 * Make sure vdd is actually turned off here.
5048 	 */
5049 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5050 	pps_lock(intel_dp);
5051 	edp_panel_vdd_off_sync(intel_dp);
5052 	pps_unlock(intel_dp);
5053 }
5054 
intel_edp_panel_vdd_sanitize(struct intel_dp * intel_dp)5055 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5056 {
5057 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5058 	struct drm_device *dev = intel_dig_port->base.base.dev;
5059 	struct drm_i915_private *dev_priv = dev->dev_private;
5060 	enum intel_display_power_domain power_domain;
5061 
5062 	lockdep_assert_held(&dev_priv->pps_mutex);
5063 
5064 	if (!edp_have_panel_vdd(intel_dp))
5065 		return;
5066 
5067 	/*
5068 	 * The VDD bit needs a power domain reference, so if the bit is
5069 	 * already enabled when we boot or resume, grab this reference and
5070 	 * schedule a vdd off, so we don't hold on to the reference
5071 	 * indefinitely.
5072 	 */
5073 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5074 	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5075 	intel_display_power_get(dev_priv, power_domain);
5076 
5077 	edp_panel_vdd_schedule_off(intel_dp);
5078 }
5079 
intel_dp_encoder_reset(struct drm_encoder * encoder)5080 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5081 {
5082 	struct intel_dp *intel_dp;
5083 
5084 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5085 		return;
5086 
5087 	intel_dp = enc_to_intel_dp(encoder);
5088 
5089 	pps_lock(intel_dp);
5090 
5091 	/*
5092 	 * Read out the current power sequencer assignment,
5093 	 * in case the BIOS did something with it.
5094 	 */
5095 	if (IS_VALLEYVIEW(encoder->dev))
5096 		vlv_initial_power_sequencer_setup(intel_dp);
5097 
5098 	intel_edp_panel_vdd_sanitize(intel_dp);
5099 
5100 	pps_unlock(intel_dp);
5101 }
5102 
5103 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5104 	.dpms = drm_atomic_helper_connector_dpms,
5105 	.detect = intel_dp_detect,
5106 	.force = intel_dp_force,
5107 	.fill_modes = drm_helper_probe_single_connector_modes,
5108 	.set_property = intel_dp_set_property,
5109 	.atomic_get_property = intel_connector_atomic_get_property,
5110 	.destroy = intel_dp_connector_destroy,
5111 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5112 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5113 };
5114 
5115 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5116 	.get_modes = intel_dp_get_modes,
5117 	.mode_valid = intel_dp_mode_valid,
5118 	.best_encoder = intel_best_encoder,
5119 };
5120 
5121 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5122 	.reset = intel_dp_encoder_reset,
5123 	.destroy = intel_dp_encoder_destroy,
5124 };
5125 
5126 enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port * intel_dig_port,bool long_hpd)5127 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5128 {
5129 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5130 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5131 	struct drm_device *dev = intel_dig_port->base.base.dev;
5132 	struct drm_i915_private *dev_priv = dev->dev_private;
5133 	enum intel_display_power_domain power_domain;
5134 	enum irqreturn ret = IRQ_NONE;
5135 
5136 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5137 	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5138 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5139 
5140 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5141 		/*
5142 		 * vdd off can generate a long pulse on eDP which
5143 		 * would require vdd on to handle it, and thus we
5144 		 * would end up in an endless cycle of
5145 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5146 		 */
5147 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5148 			      port_name(intel_dig_port->port));
5149 		return IRQ_HANDLED;
5150 	}
5151 
5152 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5153 		      port_name(intel_dig_port->port),
5154 		      long_hpd ? "long" : "short");
5155 
5156 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5157 	intel_display_power_get(dev_priv, power_domain);
5158 
5159 	if (long_hpd) {
5160 		/* indicate that we need to restart link training */
5161 		intel_dp->train_set_valid = false;
5162 
5163 		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5164 			goto mst_fail;
5165 
5166 		if (!intel_dp_get_dpcd(intel_dp)) {
5167 			goto mst_fail;
5168 		}
5169 
5170 		intel_dp_probe_oui(intel_dp);
5171 
5172 		if (!intel_dp_probe_mst(intel_dp)) {
5173 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5174 			intel_dp_check_link_status(intel_dp);
5175 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5176 			goto mst_fail;
5177 		}
5178 	} else {
5179 		if (intel_dp->is_mst) {
5180 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5181 				goto mst_fail;
5182 		}
5183 
5184 		if (!intel_dp->is_mst) {
5185 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5186 			intel_dp_check_link_status(intel_dp);
5187 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5188 		}
5189 	}
5190 
5191 	ret = IRQ_HANDLED;
5192 
5193 	goto put_power;
5194 mst_fail:
5195 	/* if we were in MST mode, and device is not there get out of MST mode */
5196 	if (intel_dp->is_mst) {
5197 		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5198 		intel_dp->is_mst = false;
5199 		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5200 	}
5201 put_power:
5202 	intel_display_power_put(dev_priv, power_domain);
5203 
5204 	return ret;
5205 }
5206 
5207 /* Return which DP Port should be selected for Transcoder DP control */
5208 int
intel_trans_dp_port_sel(struct drm_crtc * crtc)5209 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5210 {
5211 	struct drm_device *dev = crtc->dev;
5212 	struct intel_encoder *intel_encoder;
5213 	struct intel_dp *intel_dp;
5214 
5215 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5216 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
5217 
5218 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5219 		    intel_encoder->type == INTEL_OUTPUT_EDP)
5220 			return intel_dp->output_reg;
5221 	}
5222 
5223 	return -1;
5224 }
5225 
5226 /* check the VBT to see whether the eDP is on another port */
intel_dp_is_edp(struct drm_device * dev,enum port port)5227 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5228 {
5229 	struct drm_i915_private *dev_priv = dev->dev_private;
5230 	union child_device_config *p_child;
5231 	int i;
5232 	static const short port_mapping[] = {
5233 		[PORT_B] = DVO_PORT_DPB,
5234 		[PORT_C] = DVO_PORT_DPC,
5235 		[PORT_D] = DVO_PORT_DPD,
5236 		[PORT_E] = DVO_PORT_DPE,
5237 	};
5238 
5239 	/*
5240 	 * eDP not supported on g4x. so bail out early just
5241 	 * for a bit extra safety in case the VBT is bonkers.
5242 	 */
5243 	if (INTEL_INFO(dev)->gen < 5)
5244 		return false;
5245 
5246 	if (port == PORT_A)
5247 		return true;
5248 
5249 	if (!dev_priv->vbt.child_dev_num)
5250 		return false;
5251 
5252 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5253 		p_child = dev_priv->vbt.child_dev + i;
5254 
5255 		if (p_child->common.dvo_port == port_mapping[port] &&
5256 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5257 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5258 			return true;
5259 	}
5260 	return false;
5261 }
5262 
5263 void
intel_dp_add_properties(struct intel_dp * intel_dp,struct drm_connector * connector)5264 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5265 {
5266 	struct intel_connector *intel_connector = to_intel_connector(connector);
5267 
5268 	intel_attach_force_audio_property(connector);
5269 	intel_attach_broadcast_rgb_property(connector);
5270 	intel_dp->color_range_auto = true;
5271 
5272 	if (is_edp(intel_dp)) {
5273 		drm_mode_create_scaling_mode_property(connector->dev);
5274 		drm_object_attach_property(
5275 			&connector->base,
5276 			connector->dev->mode_config.scaling_mode_property,
5277 			DRM_MODE_SCALE_ASPECT);
5278 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5279 	}
5280 }
5281 
intel_dp_init_panel_power_timestamps(struct intel_dp * intel_dp)5282 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5283 {
5284 	intel_dp->last_power_cycle = jiffies;
5285 	intel_dp->last_power_on = jiffies;
5286 	intel_dp->last_backlight_off = jiffies;
5287 }
5288 
5289 static void
intel_dp_init_panel_power_sequencer(struct drm_device * dev,struct intel_dp * intel_dp)5290 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5291 				    struct intel_dp *intel_dp)
5292 {
5293 	struct drm_i915_private *dev_priv = dev->dev_private;
5294 	struct edp_power_seq cur, vbt, spec,
5295 		*final = &intel_dp->pps_delays;
5296 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5297 	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5298 
5299 	lockdep_assert_held(&dev_priv->pps_mutex);
5300 
5301 	/* already initialized? */
5302 	if (final->t11_t12 != 0)
5303 		return;
5304 
5305 	if (IS_BROXTON(dev)) {
5306 		/*
5307 		 * TODO: BXT has 2 sets of PPS registers.
5308 		 * Correct Register for Broxton need to be identified
5309 		 * using VBT. hardcoding for now
5310 		 */
5311 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5312 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5313 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5314 	} else if (HAS_PCH_SPLIT(dev)) {
5315 		pp_ctrl_reg = PCH_PP_CONTROL;
5316 		pp_on_reg = PCH_PP_ON_DELAYS;
5317 		pp_off_reg = PCH_PP_OFF_DELAYS;
5318 		pp_div_reg = PCH_PP_DIVISOR;
5319 	} else {
5320 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5321 
5322 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5323 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5324 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5325 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5326 	}
5327 
5328 	/* Workaround: Need to write PP_CONTROL with the unlock key as
5329 	 * the very first thing. */
5330 	pp_ctl = ironlake_get_pp_control(intel_dp);
5331 
5332 	pp_on = I915_READ(pp_on_reg);
5333 	pp_off = I915_READ(pp_off_reg);
5334 	if (!IS_BROXTON(dev)) {
5335 		I915_WRITE(pp_ctrl_reg, pp_ctl);
5336 		pp_div = I915_READ(pp_div_reg);
5337 	}
5338 
5339 	/* Pull timing values out of registers */
5340 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5341 		PANEL_POWER_UP_DELAY_SHIFT;
5342 
5343 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5344 		PANEL_LIGHT_ON_DELAY_SHIFT;
5345 
5346 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5347 		PANEL_LIGHT_OFF_DELAY_SHIFT;
5348 
5349 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5350 		PANEL_POWER_DOWN_DELAY_SHIFT;
5351 
5352 	if (IS_BROXTON(dev)) {
5353 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5354 			BXT_POWER_CYCLE_DELAY_SHIFT;
5355 		if (tmp > 0)
5356 			cur.t11_t12 = (tmp - 1) * 1000;
5357 		else
5358 			cur.t11_t12 = 0;
5359 	} else {
5360 		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5361 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5362 	}
5363 
5364 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5365 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5366 
5367 	vbt = dev_priv->vbt.edp_pps;
5368 
5369 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5370 	 * our hw here, which are all in 100usec. */
5371 	spec.t1_t3 = 210 * 10;
5372 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5373 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5374 	spec.t10 = 500 * 10;
5375 	/* This one is special and actually in units of 100ms, but zero
5376 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5377 	 * table multiplies it with 1000 to make it in units of 100usec,
5378 	 * too. */
5379 	spec.t11_t12 = (510 + 100) * 10;
5380 
5381 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5382 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5383 
5384 	/* Use the max of the register settings and vbt. If both are
5385 	 * unset, fall back to the spec limits. */
5386 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5387 				       spec.field : \
5388 				       max(cur.field, vbt.field))
5389 	assign_final(t1_t3);
5390 	assign_final(t8);
5391 	assign_final(t9);
5392 	assign_final(t10);
5393 	assign_final(t11_t12);
5394 #undef assign_final
5395 
5396 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5397 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5398 	intel_dp->backlight_on_delay = get_delay(t8);
5399 	intel_dp->backlight_off_delay = get_delay(t9);
5400 	intel_dp->panel_power_down_delay = get_delay(t10);
5401 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5402 #undef get_delay
5403 
5404 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5405 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5406 		      intel_dp->panel_power_cycle_delay);
5407 
5408 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5409 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5410 }
5411 
5412 static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device * dev,struct intel_dp * intel_dp)5413 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5414 					      struct intel_dp *intel_dp)
5415 {
5416 	struct drm_i915_private *dev_priv = dev->dev_private;
5417 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5418 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5419 	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5420 	enum port port = dp_to_dig_port(intel_dp)->port;
5421 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5422 
5423 	lockdep_assert_held(&dev_priv->pps_mutex);
5424 
5425 	if (IS_BROXTON(dev)) {
5426 		/*
5427 		 * TODO: BXT has 2 sets of PPS registers.
5428 		 * Correct Register for Broxton need to be identified
5429 		 * using VBT. hardcoding for now
5430 		 */
5431 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5432 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5433 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5434 
5435 	} else if (HAS_PCH_SPLIT(dev)) {
5436 		pp_on_reg = PCH_PP_ON_DELAYS;
5437 		pp_off_reg = PCH_PP_OFF_DELAYS;
5438 		pp_div_reg = PCH_PP_DIVISOR;
5439 	} else {
5440 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5441 
5442 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5443 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5444 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5445 	}
5446 
5447 	/*
5448 	 * And finally store the new values in the power sequencer. The
5449 	 * backlight delays are set to 1 because we do manual waits on them. For
5450 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5451 	 * we'll end up waiting for the backlight off delay twice: once when we
5452 	 * do the manual sleep, and once when we disable the panel and wait for
5453 	 * the PP_STATUS bit to become zero.
5454 	 */
5455 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5456 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5457 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5458 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5459 	/* Compute the divisor for the pp clock, simply match the Bspec
5460 	 * formula. */
5461 	if (IS_BROXTON(dev)) {
5462 		pp_div = I915_READ(pp_ctrl_reg);
5463 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5464 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5465 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5466 	} else {
5467 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5468 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5469 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5470 	}
5471 
5472 	/* Haswell doesn't have any port selection bits for the panel
5473 	 * power sequencer any more. */
5474 	if (IS_VALLEYVIEW(dev)) {
5475 		port_sel = PANEL_PORT_SELECT_VLV(port);
5476 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5477 		if (port == PORT_A)
5478 			port_sel = PANEL_PORT_SELECT_DPA;
5479 		else
5480 			port_sel = PANEL_PORT_SELECT_DPD;
5481 	}
5482 
5483 	pp_on |= port_sel;
5484 
5485 	I915_WRITE(pp_on_reg, pp_on);
5486 	I915_WRITE(pp_off_reg, pp_off);
5487 	if (IS_BROXTON(dev))
5488 		I915_WRITE(pp_ctrl_reg, pp_div);
5489 	else
5490 		I915_WRITE(pp_div_reg, pp_div);
5491 
5492 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5493 		      I915_READ(pp_on_reg),
5494 		      I915_READ(pp_off_reg),
5495 		      IS_BROXTON(dev) ?
5496 		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5497 		      I915_READ(pp_div_reg));
5498 }
5499 
5500 /**
5501  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5502  * @dev: DRM device
5503  * @refresh_rate: RR to be programmed
5504  *
5505  * This function gets called when refresh rate (RR) has to be changed from
5506  * one frequency to another. Switches can be between high and low RR
5507  * supported by the panel or to any other RR based on media playback (in
5508  * this case, RR value needs to be passed from user space).
5509  *
5510  * The caller of this function needs to take a lock on dev_priv->drrs.
5511  */
intel_dp_set_drrs_state(struct drm_device * dev,int refresh_rate)5512 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5513 {
5514 	struct drm_i915_private *dev_priv = dev->dev_private;
5515 	struct intel_encoder *encoder;
5516 	struct intel_digital_port *dig_port = NULL;
5517 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5518 	struct intel_crtc_state *config = NULL;
5519 	struct intel_crtc *intel_crtc = NULL;
5520 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5521 
5522 	if (refresh_rate <= 0) {
5523 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5524 		return;
5525 	}
5526 
5527 	if (intel_dp == NULL) {
5528 		DRM_DEBUG_KMS("DRRS not supported.\n");
5529 		return;
5530 	}
5531 
5532 	/*
5533 	 * FIXME: This needs proper synchronization with psr state for some
5534 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5535 	 */
5536 
5537 	dig_port = dp_to_dig_port(intel_dp);
5538 	encoder = &dig_port->base;
5539 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5540 
5541 	if (!intel_crtc) {
5542 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5543 		return;
5544 	}
5545 
5546 	config = intel_crtc->config;
5547 
5548 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5549 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5550 		return;
5551 	}
5552 
5553 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5554 			refresh_rate)
5555 		index = DRRS_LOW_RR;
5556 
5557 	if (index == dev_priv->drrs.refresh_rate_type) {
5558 		DRM_DEBUG_KMS(
5559 			"DRRS requested for previously set RR...ignoring\n");
5560 		return;
5561 	}
5562 
5563 	if (!intel_crtc->active) {
5564 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5565 		return;
5566 	}
5567 
5568 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5569 		switch (index) {
5570 		case DRRS_HIGH_RR:
5571 			intel_dp_set_m_n(intel_crtc, M1_N1);
5572 			break;
5573 		case DRRS_LOW_RR:
5574 			intel_dp_set_m_n(intel_crtc, M2_N2);
5575 			break;
5576 		case DRRS_MAX_RR:
5577 		default:
5578 			DRM_ERROR("Unsupported refreshrate type\n");
5579 		}
5580 	} else if (INTEL_INFO(dev)->gen > 6) {
5581 		u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5582 		u32 val;
5583 
5584 		val = I915_READ(reg);
5585 		if (index > DRRS_HIGH_RR) {
5586 			if (IS_VALLEYVIEW(dev))
5587 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5588 			else
5589 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5590 		} else {
5591 			if (IS_VALLEYVIEW(dev))
5592 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5593 			else
5594 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5595 		}
5596 		I915_WRITE(reg, val);
5597 	}
5598 
5599 	dev_priv->drrs.refresh_rate_type = index;
5600 
5601 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5602 }
5603 
5604 /**
5605  * intel_edp_drrs_enable - init drrs struct if supported
5606  * @intel_dp: DP struct
5607  *
5608  * Initializes frontbuffer_bits and drrs.dp
5609  */
intel_edp_drrs_enable(struct intel_dp * intel_dp)5610 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5611 {
5612 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5613 	struct drm_i915_private *dev_priv = dev->dev_private;
5614 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5615 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5616 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5617 
5618 	if (!intel_crtc->config->has_drrs) {
5619 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5620 		return;
5621 	}
5622 
5623 	mutex_lock(&dev_priv->drrs.mutex);
5624 	if (WARN_ON(dev_priv->drrs.dp)) {
5625 		DRM_ERROR("DRRS already enabled\n");
5626 		goto unlock;
5627 	}
5628 
5629 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5630 
5631 	dev_priv->drrs.dp = intel_dp;
5632 
5633 unlock:
5634 	mutex_unlock(&dev_priv->drrs.mutex);
5635 }
5636 
5637 /**
5638  * intel_edp_drrs_disable - Disable DRRS
5639  * @intel_dp: DP struct
5640  *
5641  */
intel_edp_drrs_disable(struct intel_dp * intel_dp)5642 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5643 {
5644 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5645 	struct drm_i915_private *dev_priv = dev->dev_private;
5646 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5647 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5648 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5649 
5650 	if (!intel_crtc->config->has_drrs)
5651 		return;
5652 
5653 	mutex_lock(&dev_priv->drrs.mutex);
5654 	if (!dev_priv->drrs.dp) {
5655 		mutex_unlock(&dev_priv->drrs.mutex);
5656 		return;
5657 	}
5658 
5659 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5660 		intel_dp_set_drrs_state(dev_priv->dev,
5661 			intel_dp->attached_connector->panel.
5662 			fixed_mode->vrefresh);
5663 
5664 	dev_priv->drrs.dp = NULL;
5665 	mutex_unlock(&dev_priv->drrs.mutex);
5666 
5667 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5668 }
5669 
intel_edp_drrs_downclock_work(struct work_struct * work)5670 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5671 {
5672 	struct drm_i915_private *dev_priv =
5673 		container_of(work, typeof(*dev_priv), drrs.work.work);
5674 	struct intel_dp *intel_dp;
5675 
5676 	mutex_lock(&dev_priv->drrs.mutex);
5677 
5678 	intel_dp = dev_priv->drrs.dp;
5679 
5680 	if (!intel_dp)
5681 		goto unlock;
5682 
5683 	/*
5684 	 * The delayed work can race with an invalidate hence we need to
5685 	 * recheck.
5686 	 */
5687 
5688 	if (dev_priv->drrs.busy_frontbuffer_bits)
5689 		goto unlock;
5690 
5691 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5692 		intel_dp_set_drrs_state(dev_priv->dev,
5693 			intel_dp->attached_connector->panel.
5694 			downclock_mode->vrefresh);
5695 
5696 unlock:
5697 	mutex_unlock(&dev_priv->drrs.mutex);
5698 }
5699 
5700 /**
5701  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5702  * @dev: DRM device
5703  * @frontbuffer_bits: frontbuffer plane tracking bits
5704  *
5705  * This function gets called everytime rendering on the given planes start.
5706  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5707  *
5708  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5709  */
intel_edp_drrs_invalidate(struct drm_device * dev,unsigned frontbuffer_bits)5710 void intel_edp_drrs_invalidate(struct drm_device *dev,
5711 		unsigned frontbuffer_bits)
5712 {
5713 	struct drm_i915_private *dev_priv = dev->dev_private;
5714 	struct drm_crtc *crtc;
5715 	enum pipe pipe;
5716 
5717 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5718 		return;
5719 
5720 	cancel_delayed_work(&dev_priv->drrs.work);
5721 
5722 	mutex_lock(&dev_priv->drrs.mutex);
5723 	if (!dev_priv->drrs.dp) {
5724 		mutex_unlock(&dev_priv->drrs.mutex);
5725 		return;
5726 	}
5727 
5728 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5729 	pipe = to_intel_crtc(crtc)->pipe;
5730 
5731 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5732 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5733 
5734 	/* invalidate means busy screen hence upclock */
5735 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5736 		intel_dp_set_drrs_state(dev_priv->dev,
5737 				dev_priv->drrs.dp->attached_connector->panel.
5738 				fixed_mode->vrefresh);
5739 
5740 	mutex_unlock(&dev_priv->drrs.mutex);
5741 }
5742 
5743 /**
5744  * intel_edp_drrs_flush - Restart Idleness DRRS
5745  * @dev: DRM device
5746  * @frontbuffer_bits: frontbuffer plane tracking bits
5747  *
5748  * This function gets called every time rendering on the given planes has
5749  * completed or flip on a crtc is completed. So DRRS should be upclocked
5750  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5751  * if no other planes are dirty.
5752  *
5753  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5754  */
intel_edp_drrs_flush(struct drm_device * dev,unsigned frontbuffer_bits)5755 void intel_edp_drrs_flush(struct drm_device *dev,
5756 		unsigned frontbuffer_bits)
5757 {
5758 	struct drm_i915_private *dev_priv = dev->dev_private;
5759 	struct drm_crtc *crtc;
5760 	enum pipe pipe;
5761 
5762 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5763 		return;
5764 
5765 	cancel_delayed_work(&dev_priv->drrs.work);
5766 
5767 	mutex_lock(&dev_priv->drrs.mutex);
5768 	if (!dev_priv->drrs.dp) {
5769 		mutex_unlock(&dev_priv->drrs.mutex);
5770 		return;
5771 	}
5772 
5773 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5774 	pipe = to_intel_crtc(crtc)->pipe;
5775 
5776 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5777 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5778 
5779 	/* flush means busy screen hence upclock */
5780 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5781 		intel_dp_set_drrs_state(dev_priv->dev,
5782 				dev_priv->drrs.dp->attached_connector->panel.
5783 				fixed_mode->vrefresh);
5784 
5785 	/*
5786 	 * flush also means no more activity hence schedule downclock, if all
5787 	 * other fbs are quiescent too
5788 	 */
5789 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5790 		schedule_delayed_work(&dev_priv->drrs.work,
5791 				msecs_to_jiffies(1000));
5792 	mutex_unlock(&dev_priv->drrs.mutex);
5793 }
5794 
5795 /**
5796  * DOC: Display Refresh Rate Switching (DRRS)
5797  *
5798  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5799  * which enables swtching between low and high refresh rates,
5800  * dynamically, based on the usage scenario. This feature is applicable
5801  * for internal panels.
5802  *
5803  * Indication that the panel supports DRRS is given by the panel EDID, which
5804  * would list multiple refresh rates for one resolution.
5805  *
5806  * DRRS is of 2 types - static and seamless.
5807  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5808  * (may appear as a blink on screen) and is used in dock-undock scenario.
5809  * Seamless DRRS involves changing RR without any visual effect to the user
5810  * and can be used during normal system usage. This is done by programming
5811  * certain registers.
5812  *
5813  * Support for static/seamless DRRS may be indicated in the VBT based on
5814  * inputs from the panel spec.
5815  *
5816  * DRRS saves power by switching to low RR based on usage scenarios.
5817  *
5818  * eDP DRRS:-
5819  *        The implementation is based on frontbuffer tracking implementation.
5820  * When there is a disturbance on the screen triggered by user activity or a
5821  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5822  * When there is no movement on screen, after a timeout of 1 second, a switch
5823  * to low RR is made.
5824  *        For integration with frontbuffer tracking code,
5825  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5826  *
5827  * DRRS can be further extended to support other internal panels and also
5828  * the scenario of video playback wherein RR is set based on the rate
5829  * requested by userspace.
5830  */
5831 
5832 /**
5833  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5834  * @intel_connector: eDP connector
5835  * @fixed_mode: preferred mode of panel
5836  *
5837  * This function is  called only once at driver load to initialize basic
5838  * DRRS stuff.
5839  *
5840  * Returns:
5841  * Downclock mode if panel supports it, else return NULL.
5842  * DRRS support is determined by the presence of downclock mode (apart
5843  * from VBT setting).
5844  */
5845 static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector * intel_connector,struct drm_display_mode * fixed_mode)5846 intel_dp_drrs_init(struct intel_connector *intel_connector,
5847 		struct drm_display_mode *fixed_mode)
5848 {
5849 	struct drm_connector *connector = &intel_connector->base;
5850 	struct drm_device *dev = connector->dev;
5851 	struct drm_i915_private *dev_priv = dev->dev_private;
5852 	struct drm_display_mode *downclock_mode = NULL;
5853 
5854 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5855 	mutex_init(&dev_priv->drrs.mutex);
5856 
5857 	if (INTEL_INFO(dev)->gen <= 6) {
5858 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5859 		return NULL;
5860 	}
5861 
5862 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5863 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5864 		return NULL;
5865 	}
5866 
5867 	downclock_mode = intel_find_panel_downclock
5868 					(dev, fixed_mode, connector);
5869 
5870 	if (!downclock_mode) {
5871 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5872 		return NULL;
5873 	}
5874 
5875 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5876 
5877 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5878 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5879 	return downclock_mode;
5880 }
5881 
intel_edp_init_connector(struct intel_dp * intel_dp,struct intel_connector * intel_connector)5882 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5883 				     struct intel_connector *intel_connector)
5884 {
5885 	struct drm_connector *connector = &intel_connector->base;
5886 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5887 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5888 	struct drm_device *dev = intel_encoder->base.dev;
5889 	struct drm_i915_private *dev_priv = dev->dev_private;
5890 	struct drm_display_mode *fixed_mode = NULL;
5891 	struct drm_display_mode *downclock_mode = NULL;
5892 	bool has_dpcd;
5893 	struct drm_display_mode *scan;
5894 	struct edid *edid;
5895 	enum pipe pipe = INVALID_PIPE;
5896 
5897 	if (!is_edp(intel_dp))
5898 		return true;
5899 
5900 	pps_lock(intel_dp);
5901 	intel_edp_panel_vdd_sanitize(intel_dp);
5902 	pps_unlock(intel_dp);
5903 
5904 	/* Cache DPCD and EDID for edp. */
5905 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5906 
5907 	if (has_dpcd) {
5908 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5909 			dev_priv->no_aux_handshake =
5910 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5911 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5912 	} else {
5913 		/* if this fails, presume the device is a ghost */
5914 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5915 		return false;
5916 	}
5917 
5918 	/* We now know it's not a ghost, init power sequence regs. */
5919 	pps_lock(intel_dp);
5920 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5921 	pps_unlock(intel_dp);
5922 
5923 	mutex_lock(&dev->mode_config.mutex);
5924 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5925 	if (edid) {
5926 		if (drm_add_edid_modes(connector, edid)) {
5927 			drm_mode_connector_update_edid_property(connector,
5928 								edid);
5929 			drm_edid_to_eld(connector, edid);
5930 		} else {
5931 			kfree(edid);
5932 			edid = ERR_PTR(-EINVAL);
5933 		}
5934 	} else {
5935 		edid = ERR_PTR(-ENOENT);
5936 	}
5937 	intel_connector->edid = edid;
5938 
5939 	/* prefer fixed mode from EDID if available */
5940 	list_for_each_entry(scan, &connector->probed_modes, head) {
5941 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5942 			fixed_mode = drm_mode_duplicate(dev, scan);
5943 			downclock_mode = intel_dp_drrs_init(
5944 						intel_connector, fixed_mode);
5945 			break;
5946 		}
5947 	}
5948 
5949 	/* fallback to VBT if available for eDP */
5950 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5951 		fixed_mode = drm_mode_duplicate(dev,
5952 					dev_priv->vbt.lfp_lvds_vbt_mode);
5953 		if (fixed_mode)
5954 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5955 	}
5956 	mutex_unlock(&dev->mode_config.mutex);
5957 
5958 	if (IS_VALLEYVIEW(dev)) {
5959 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5960 		register_reboot_notifier(&intel_dp->edp_notifier);
5961 
5962 		/*
5963 		 * Figure out the current pipe for the initial backlight setup.
5964 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5965 		 * fails just assume pipe A.
5966 		 */
5967 		if (IS_CHERRYVIEW(dev))
5968 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5969 		else
5970 			pipe = PORT_TO_PIPE(intel_dp->DP);
5971 
5972 		if (pipe != PIPE_A && pipe != PIPE_B)
5973 			pipe = intel_dp->pps_pipe;
5974 
5975 		if (pipe != PIPE_A && pipe != PIPE_B)
5976 			pipe = PIPE_A;
5977 
5978 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5979 			      pipe_name(pipe));
5980 	}
5981 
5982 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5983 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5984 	intel_panel_setup_backlight(connector, pipe);
5985 
5986 	return true;
5987 }
5988 
5989 bool
intel_dp_init_connector(struct intel_digital_port * intel_dig_port,struct intel_connector * intel_connector)5990 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5991 			struct intel_connector *intel_connector)
5992 {
5993 	struct drm_connector *connector = &intel_connector->base;
5994 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5995 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5996 	struct drm_device *dev = intel_encoder->base.dev;
5997 	struct drm_i915_private *dev_priv = dev->dev_private;
5998 	enum port port = intel_dig_port->port;
5999 	int type;
6000 
6001 	intel_dp->pps_pipe = INVALID_PIPE;
6002 
6003 	/* intel_dp vfuncs */
6004 	if (INTEL_INFO(dev)->gen >= 9)
6005 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6006 	else if (IS_VALLEYVIEW(dev))
6007 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6008 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6009 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6010 	else if (HAS_PCH_SPLIT(dev))
6011 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6012 	else
6013 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6014 
6015 	if (INTEL_INFO(dev)->gen >= 9)
6016 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6017 	else
6018 		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6019 
6020 	/* Preserve the current hw state. */
6021 	intel_dp->DP = I915_READ(intel_dp->output_reg);
6022 	intel_dp->attached_connector = intel_connector;
6023 
6024 	if (intel_dp_is_edp(dev, port))
6025 		type = DRM_MODE_CONNECTOR_eDP;
6026 	else
6027 		type = DRM_MODE_CONNECTOR_DisplayPort;
6028 
6029 	/*
6030 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6031 	 * for DP the encoder type can be set by the caller to
6032 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6033 	 */
6034 	if (type == DRM_MODE_CONNECTOR_eDP)
6035 		intel_encoder->type = INTEL_OUTPUT_EDP;
6036 
6037 	/* eDP only on port B and/or C on vlv/chv */
6038 	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6039 		    port != PORT_B && port != PORT_C))
6040 		return false;
6041 
6042 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6043 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6044 			port_name(port));
6045 
6046 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6047 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6048 
6049 	connector->interlace_allowed = true;
6050 	connector->doublescan_allowed = 0;
6051 
6052 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6053 			  edp_panel_vdd_work);
6054 
6055 	intel_connector_attach_encoder(intel_connector, intel_encoder);
6056 	drm_connector_register(connector);
6057 
6058 	if (HAS_DDI(dev))
6059 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6060 	else
6061 		intel_connector->get_hw_state = intel_connector_get_hw_state;
6062 	intel_connector->unregister = intel_dp_connector_unregister;
6063 
6064 	/* Set up the hotplug pin. */
6065 	switch (port) {
6066 	case PORT_A:
6067 		intel_encoder->hpd_pin = HPD_PORT_A;
6068 		break;
6069 	case PORT_B:
6070 		intel_encoder->hpd_pin = HPD_PORT_B;
6071 		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6072 			intel_encoder->hpd_pin = HPD_PORT_A;
6073 		break;
6074 	case PORT_C:
6075 		intel_encoder->hpd_pin = HPD_PORT_C;
6076 		break;
6077 	case PORT_D:
6078 		intel_encoder->hpd_pin = HPD_PORT_D;
6079 		break;
6080 	case PORT_E:
6081 		intel_encoder->hpd_pin = HPD_PORT_E;
6082 		break;
6083 	default:
6084 		BUG();
6085 	}
6086 
6087 	if (is_edp(intel_dp)) {
6088 		pps_lock(intel_dp);
6089 		intel_dp_init_panel_power_timestamps(intel_dp);
6090 		if (IS_VALLEYVIEW(dev))
6091 			vlv_initial_power_sequencer_setup(intel_dp);
6092 		else
6093 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
6094 		pps_unlock(intel_dp);
6095 	}
6096 
6097 	intel_dp_aux_init(intel_dp, intel_connector);
6098 
6099 	/* init MST on ports that can support it */
6100 	if (HAS_DP_MST(dev) &&
6101 	    (port == PORT_B || port == PORT_C || port == PORT_D))
6102 		intel_dp_mst_encoder_init(intel_dig_port,
6103 					  intel_connector->base.base.id);
6104 
6105 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6106 		drm_dp_aux_unregister(&intel_dp->aux);
6107 		if (is_edp(intel_dp)) {
6108 			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6109 			/*
6110 			 * vdd might still be enabled do to the delayed vdd off.
6111 			 * Make sure vdd is actually turned off here.
6112 			 */
6113 			pps_lock(intel_dp);
6114 			edp_panel_vdd_off_sync(intel_dp);
6115 			pps_unlock(intel_dp);
6116 		}
6117 		drm_connector_unregister(connector);
6118 		drm_connector_cleanup(connector);
6119 		return false;
6120 	}
6121 
6122 	intel_dp_add_properties(intel_dp, connector);
6123 
6124 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6125 	 * 0xd.  Failure to do so will result in spurious interrupts being
6126 	 * generated on the port when a cable is not attached.
6127 	 */
6128 	if (IS_G4X(dev) && !IS_GM45(dev)) {
6129 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6130 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6131 	}
6132 
6133 	i915_debugfs_connector_add(connector);
6134 
6135 	return true;
6136 }
6137 
6138 void
intel_dp_init(struct drm_device * dev,int output_reg,enum port port)6139 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6140 {
6141 	struct drm_i915_private *dev_priv = dev->dev_private;
6142 	struct intel_digital_port *intel_dig_port;
6143 	struct intel_encoder *intel_encoder;
6144 	struct drm_encoder *encoder;
6145 	struct intel_connector *intel_connector;
6146 
6147 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6148 	if (!intel_dig_port)
6149 		return;
6150 
6151 	intel_connector = intel_connector_alloc();
6152 	if (!intel_connector)
6153 		goto err_connector_alloc;
6154 
6155 	intel_encoder = &intel_dig_port->base;
6156 	encoder = &intel_encoder->base;
6157 
6158 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6159 			 DRM_MODE_ENCODER_TMDS);
6160 
6161 	intel_encoder->compute_config = intel_dp_compute_config;
6162 	intel_encoder->disable = intel_disable_dp;
6163 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6164 	intel_encoder->get_config = intel_dp_get_config;
6165 	intel_encoder->suspend = intel_dp_encoder_suspend;
6166 	if (IS_CHERRYVIEW(dev)) {
6167 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6168 		intel_encoder->pre_enable = chv_pre_enable_dp;
6169 		intel_encoder->enable = vlv_enable_dp;
6170 		intel_encoder->post_disable = chv_post_disable_dp;
6171 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6172 	} else if (IS_VALLEYVIEW(dev)) {
6173 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6174 		intel_encoder->pre_enable = vlv_pre_enable_dp;
6175 		intel_encoder->enable = vlv_enable_dp;
6176 		intel_encoder->post_disable = vlv_post_disable_dp;
6177 	} else {
6178 		intel_encoder->pre_enable = g4x_pre_enable_dp;
6179 		intel_encoder->enable = g4x_enable_dp;
6180 		if (INTEL_INFO(dev)->gen >= 5)
6181 			intel_encoder->post_disable = ilk_post_disable_dp;
6182 	}
6183 
6184 	intel_dig_port->port = port;
6185 	intel_dig_port->dp.output_reg = output_reg;
6186 
6187 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6188 	if (IS_CHERRYVIEW(dev)) {
6189 		if (port == PORT_D)
6190 			intel_encoder->crtc_mask = 1 << 2;
6191 		else
6192 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6193 	} else {
6194 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6195 	}
6196 	intel_encoder->cloneable = 0;
6197 
6198 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6199 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6200 
6201 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6202 		goto err_init_connector;
6203 
6204 	return;
6205 
6206 err_init_connector:
6207 	drm_encoder_cleanup(encoder);
6208 	kfree(intel_connector);
6209 err_connector_alloc:
6210 	kfree(intel_dig_port);
6211 
6212 	return;
6213 }
6214 
intel_dp_mst_suspend(struct drm_device * dev)6215 void intel_dp_mst_suspend(struct drm_device *dev)
6216 {
6217 	struct drm_i915_private *dev_priv = dev->dev_private;
6218 	int i;
6219 
6220 	/* disable MST */
6221 	for (i = 0; i < I915_MAX_PORTS; i++) {
6222 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6223 		if (!intel_dig_port)
6224 			continue;
6225 
6226 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6227 			if (!intel_dig_port->dp.can_mst)
6228 				continue;
6229 			if (intel_dig_port->dp.is_mst)
6230 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6231 		}
6232 	}
6233 }
6234 
intel_dp_mst_resume(struct drm_device * dev)6235 void intel_dp_mst_resume(struct drm_device *dev)
6236 {
6237 	struct drm_i915_private *dev_priv = dev->dev_private;
6238 	int i;
6239 
6240 	for (i = 0; i < I915_MAX_PORTS; i++) {
6241 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6242 		if (!intel_dig_port)
6243 			continue;
6244 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6245 			int ret;
6246 
6247 			if (!intel_dig_port->dp.can_mst)
6248 				continue;
6249 
6250 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6251 			if (ret != 0) {
6252 				intel_dp_check_mst_status(&intel_dig_port->dp);
6253 			}
6254 		}
6255 	}
6256 }
6257