This source file includes following definitions.
- intel_dp_is_edp
- intel_attached_dp
- intel_dp_set_sink_rates
- intel_dp_rate_limit_len
- intel_dp_common_len_rate_limit
- intel_dp_max_common_rate
- intel_dp_max_common_lane_count
- intel_dp_max_lane_count
- intel_dp_link_required
- intel_dp_max_data_rate
- intel_dp_downstream_max_dotclock
- cnl_max_source_rate
- icl_max_source_rate
- intel_dp_set_source_rates
- intersect_rates
- intel_dp_rate_index
- intel_dp_set_common_rates
- intel_dp_link_params_valid
- intel_dp_can_link_train_fallback_for_edp
- intel_dp_get_link_train_fallback_values
- intel_dp_mode_to_fec_clock
- intel_dp_dsc_get_output_bpp
- intel_dp_dsc_get_slice_count
- intel_dp_mode_valid
- intel_dp_pack_aux
- intel_dp_unpack_aux
- pps_lock
- pps_unlock
- vlv_power_sequencer_kick
- vlv_find_free_pps
- vlv_power_sequencer_pipe
- bxt_power_sequencer_idx
- vlv_pipe_has_pp_on
- vlv_pipe_has_vdd_on
- vlv_pipe_any
- vlv_initial_pps_pipe
- vlv_initial_power_sequencer_setup
- intel_power_sequencer_reset
- intel_pps_get_registers
- _pp_ctrl_reg
- _pp_stat_reg
- edp_notify_handler
- edp_have_panel_power
- edp_have_panel_vdd
- intel_dp_check_edp
- intel_dp_aux_wait_done
- g4x_get_aux_clock_divider
- ilk_get_aux_clock_divider
- hsw_get_aux_clock_divider
- skl_get_aux_clock_divider
- g4x_get_aux_send_ctl
- skl_get_aux_send_ctl
- intel_dp_aux_xfer
- intel_dp_aux_header
- intel_dp_aux_transfer
- g4x_aux_ctl_reg
- g4x_aux_data_reg
- ilk_aux_ctl_reg
- ilk_aux_data_reg
- skl_aux_ctl_reg
- skl_aux_data_reg
- intel_dp_aux_fini
- intel_dp_aux_init
- intel_dp_source_supports_hbr2
- intel_dp_source_supports_hbr3
- intel_dp_set_clock
- snprintf_int_array
- intel_dp_print_rates
- intel_dp_max_link_rate
- intel_dp_rate_select
- intel_dp_compute_rate
- intel_dp_source_supports_fec
- intel_dp_supports_fec
- intel_dp_source_supports_dsc
- intel_dp_supports_dsc
- intel_dp_compute_bpp
- intel_dp_adjust_compliance_config
- intel_dp_output_bpp
- intel_dp_compute_link_config_wide
- intel_dp_dsc_compute_bpp
- intel_dp_dsc_compute_config
- intel_dp_min_bpp
- intel_dp_compute_link_config
- intel_dp_ycbcr420_config
- intel_dp_limited_color_range
- intel_dp_compute_config
- intel_dp_set_link_params
- intel_dp_prepare
- wait_panel_status
- wait_panel_on
- wait_panel_off
- wait_panel_power_cycle
- wait_backlight_on
- edp_wait_backlight_off
- ironlake_get_pp_control
- edp_panel_vdd_on
- intel_edp_panel_vdd_on
- edp_panel_vdd_off_sync
- edp_panel_vdd_work
- edp_panel_vdd_schedule_off
- edp_panel_vdd_off
- edp_panel_on
- intel_edp_panel_on
- edp_panel_off
- intel_edp_panel_off
- _intel_edp_backlight_on
- intel_edp_backlight_on
- _intel_edp_backlight_off
- intel_edp_backlight_off
- intel_edp_backlight_power
- assert_dp_port
- assert_edp_pll
- ironlake_edp_pll_on
- ironlake_edp_pll_off
- downstream_hpd_needs_d0
- intel_dp_sink_set_decompression_state
- intel_dp_sink_dpms
- cpt_dp_port_selected
- intel_dp_port_enabled
- intel_dp_get_hw_state
- intel_dp_get_config
- intel_disable_dp
- g4x_disable_dp
- vlv_disable_dp
- g4x_post_disable_dp
- vlv_post_disable_dp
- chv_post_disable_dp
- _intel_dp_set_link_train
- intel_dp_enable_port
- intel_enable_dp
- g4x_enable_dp
- vlv_enable_dp
- g4x_pre_enable_dp
- vlv_detach_power_sequencer
- vlv_steal_power_sequencer
- vlv_init_panel_power_sequencer
- vlv_pre_enable_dp
- vlv_dp_pre_pll_enable
- chv_pre_enable_dp
- chv_dp_pre_pll_enable
- chv_dp_post_pll_disable
- intel_dp_get_link_status
- intel_dp_voltage_max
- intel_dp_pre_emphasis_max
- vlv_signal_levels
- chv_signal_levels
- g4x_signal_levels
- snb_cpu_edp_signal_levels
- ivb_cpu_edp_signal_levels
- intel_dp_set_signal_levels
- intel_dp_program_link_training_pattern
- intel_dp_set_idle_link_train
- intel_dp_link_down
- intel_dp_extended_receiver_capabilities
- intel_dp_read_dpcd
- intel_dp_get_colorimetry_status
- intel_dp_get_dsc_sink_cap
- intel_edp_init_dpcd
- intel_dp_get_dpcd
- intel_dp_sink_can_mst
- intel_dp_can_mst
- intel_dp_configure_mst
- intel_dp_get_sink_irq_esi
- intel_pixel_encoding_setup_vsc
- intel_dp_ycbcr_420_enable
- intel_dp_autotest_link_training
- intel_dp_autotest_video_pattern
- intel_dp_autotest_edid
- intel_dp_autotest_phy_pattern
- intel_dp_handle_test_request
- intel_dp_check_mst_status
- intel_dp_needs_link_retrain
- intel_dp_retrain_link
- intel_dp_hotplug
- intel_dp_check_service_irq
- intel_dp_short_pulse
- intel_dp_detect_dpcd
- edp_detect
- ibx_digital_port_connected
- cpt_digital_port_connected
- spt_digital_port_connected
- g4x_digital_port_connected
- gm45_digital_port_connected
- ilk_digital_port_connected
- snb_digital_port_connected
- ivb_digital_port_connected
- bdw_digital_port_connected
- bxt_digital_port_connected
- icl_combo_port_connected
- icl_digital_port_connected
- __intel_digital_port_connected
- intel_digital_port_connected
- intel_dp_get_edid
- intel_dp_set_edid
- intel_dp_unset_edid
- intel_dp_detect
- intel_dp_force
- intel_dp_get_modes
- intel_dp_connector_register
- intel_dp_connector_unregister
- intel_dp_encoder_flush_work
- intel_dp_encoder_destroy
- intel_dp_encoder_suspend
- intel_dp_hdcp_wait_for_cp_irq
- intel_dp_hdcp_write_an_aksv
- intel_dp_hdcp_read_bksv
- intel_dp_hdcp_read_bstatus
- intel_dp_hdcp_read_bcaps
- intel_dp_hdcp_repeater_present
- intel_dp_hdcp_read_ri_prime
- intel_dp_hdcp_read_ksv_ready
- intel_dp_hdcp_read_ksv_fifo
- intel_dp_hdcp_read_v_prime_part
- intel_dp_hdcp_toggle_signalling
- intel_dp_hdcp_check_link
- intel_dp_hdcp_capable
- intel_dp_hdcp2_read_rx_status
- hdcp2_detect_msg_availability
- intel_dp_hdcp2_wait_for_msg
- get_hdcp2_dp_msg_data
- intel_dp_hdcp2_write_msg
- get_receiver_id_list_size
- intel_dp_hdcp2_read_msg
- intel_dp_hdcp2_config_stream_type
- intel_dp_hdcp2_check_link
- intel_dp_hdcp2_capable
- intel_edp_panel_vdd_sanitize
- vlv_active_pipe
- intel_dp_encoder_reset
- intel_dp_hpd_pulse
- intel_dp_is_port_edp
- intel_dp_add_properties
- intel_dp_init_panel_power_timestamps
- intel_pps_readout_hw_state
- intel_pps_dump_state
- intel_pps_verify_state
- intel_dp_init_panel_power_sequencer
- intel_dp_init_panel_power_sequencer_registers
- intel_dp_pps_init
- intel_dp_set_drrs_state
- intel_edp_drrs_enable
- intel_edp_drrs_disable
- intel_edp_drrs_downclock_work
- intel_edp_drrs_invalidate
- intel_edp_drrs_flush
- intel_dp_drrs_init
- intel_edp_init_connector
- intel_dp_modeset_retry_work_fn
- intel_dp_init_connector
- intel_dp_init
- intel_dp_mst_suspend
- intel_dp_mst_resume
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34
35 #include <asm/byteorder.h>
36
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
44
45 #include "i915_debugfs.h"
46 #include "i915_drv.h"
47 #include "i915_trace.h"
48 #include "intel_atomic.h"
49 #include "intel_audio.h"
50 #include "intel_connector.h"
51 #include "intel_ddi.h"
52 #include "intel_display_types.h"
53 #include "intel_dp.h"
54 #include "intel_dp_link_training.h"
55 #include "intel_dp_mst.h"
56 #include "intel_dpio_phy.h"
57 #include "intel_fifo_underrun.h"
58 #include "intel_hdcp.h"
59 #include "intel_hdmi.h"
60 #include "intel_hotplug.h"
61 #include "intel_lspcon.h"
62 #include "intel_lvds.h"
63 #include "intel_panel.h"
64 #include "intel_psr.h"
65 #include "intel_sideband.h"
66 #include "intel_tc.h"
67 #include "intel_vdsc.h"
68
69 #define DP_DPRX_ESI_LEN 14
70
71
72 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
73 #define DP_DSC_MIN_SUPPORTED_BPC 8
74 #define DP_DSC_MAX_SUPPORTED_BPC 10
75
76
77 #define DP_DSC_PEAK_PIXEL_RATE 2720000
78 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
79 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
80
81
82 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261
83
84
85 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
86 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
88 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
89
90 struct dp_link_dpll {
91 int clock;
92 struct dpll dpll;
93 };
94
95 static const struct dp_link_dpll g4x_dpll[] = {
96 { 162000,
97 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
98 { 270000,
99 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
100 };
101
102 static const struct dp_link_dpll pch_dpll[] = {
103 { 162000,
104 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
105 { 270000,
106 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
107 };
108
109 static const struct dp_link_dpll vlv_dpll[] = {
110 { 162000,
111 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
112 { 270000,
113 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
114 };
115
116
117
118
119
120 static const struct dp_link_dpll chv_dpll[] = {
121
122
123
124
125
126 { 162000,
127 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
128 { 270000,
129 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
130 };
131
132
133 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
134
135
136
137
138 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
139
140
141
142
143
144
145
146
147 bool intel_dp_is_edp(struct intel_dp *intel_dp)
148 {
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150
151 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
152 }
153
154 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
155 {
156 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
157 }
158
159 static void intel_dp_link_down(struct intel_encoder *encoder,
160 const struct intel_crtc_state *old_crtc_state);
161 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
162 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
163 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
164 const struct intel_crtc_state *crtc_state);
165 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
166 enum pipe pipe);
167 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
168
169
170 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
171 {
172 static const int dp_rates[] = {
173 162000, 270000, 540000, 810000
174 };
175 int i, max_rate;
176
177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
178
179 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
180 if (dp_rates[i] > max_rate)
181 break;
182 intel_dp->sink_rates[i] = dp_rates[i];
183 }
184
185 intel_dp->num_sink_rates = i;
186 }
187
188
189 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
190 {
191 int i;
192
193
194 for (i = 0; i < len; i++) {
195 if (rates[len - i - 1] <= max_rate)
196 return len - i;
197 }
198
199 return 0;
200 }
201
202
203 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
204 int max_rate)
205 {
206 return intel_dp_rate_limit_len(intel_dp->common_rates,
207 intel_dp->num_common_rates, max_rate);
208 }
209
210
211 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
212 {
213 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
214 }
215
216
217 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
218 {
219 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
220 int source_max = intel_dig_port->max_lanes;
221 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
222 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
223
224 return min3(source_max, sink_max, fia_max);
225 }
226
227 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
228 {
229 return intel_dp->max_link_lane_count;
230 }
231
232 int
233 intel_dp_link_required(int pixel_clock, int bpp)
234 {
235
236 return DIV_ROUND_UP(pixel_clock * bpp, 8);
237 }
238
239 int
240 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
241 {
242
243
244
245
246
247
248 return max_link_clock * max_lanes;
249 }
250
251 static int
252 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
253 {
254 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
255 struct intel_encoder *encoder = &intel_dig_port->base;
256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
257 int max_dotclk = dev_priv->max_dotclk_freq;
258 int ds_max_dotclk;
259
260 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
261
262 if (type != DP_DS_PORT_TYPE_VGA)
263 return max_dotclk;
264
265 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
266 intel_dp->downstream_ports);
267
268 if (ds_max_dotclk != 0)
269 max_dotclk = min(max_dotclk, ds_max_dotclk);
270
271 return max_dotclk;
272 }
273
274 static int cnl_max_source_rate(struct intel_dp *intel_dp)
275 {
276 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
277 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
278 enum port port = dig_port->base.port;
279
280 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
281
282
283 if (voltage == VOLTAGE_INFO_0_85V)
284 return 540000;
285
286
287 if (IS_CNL_WITH_PORT_F(dev_priv))
288 return 810000;
289
290
291 if (port == PORT_A || port == PORT_D)
292 return 540000;
293
294 return 810000;
295 }
296
297 static int icl_max_source_rate(struct intel_dp *intel_dp)
298 {
299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
301 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
302
303 if (intel_phy_is_combo(dev_priv, phy) &&
304 !IS_ELKHARTLAKE(dev_priv) &&
305 !intel_dp_is_edp(intel_dp))
306 return 540000;
307
308 return 810000;
309 }
310
311 static void
312 intel_dp_set_source_rates(struct intel_dp *intel_dp)
313 {
314
315 static const int cnl_rates[] = {
316 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
317 };
318 static const int bxt_rates[] = {
319 162000, 216000, 243000, 270000, 324000, 432000, 540000
320 };
321 static const int skl_rates[] = {
322 162000, 216000, 270000, 324000, 432000, 540000
323 };
324 static const int hsw_rates[] = {
325 162000, 270000, 540000
326 };
327 static const int g4x_rates[] = {
328 162000, 270000
329 };
330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
331 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
332 const struct ddi_vbt_port_info *info =
333 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
334 const int *source_rates;
335 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
336
337
338 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
339
340 if (INTEL_GEN(dev_priv) >= 10) {
341 source_rates = cnl_rates;
342 size = ARRAY_SIZE(cnl_rates);
343 if (IS_GEN(dev_priv, 10))
344 max_rate = cnl_max_source_rate(intel_dp);
345 else
346 max_rate = icl_max_source_rate(intel_dp);
347 } else if (IS_GEN9_LP(dev_priv)) {
348 source_rates = bxt_rates;
349 size = ARRAY_SIZE(bxt_rates);
350 } else if (IS_GEN9_BC(dev_priv)) {
351 source_rates = skl_rates;
352 size = ARRAY_SIZE(skl_rates);
353 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
354 IS_BROADWELL(dev_priv)) {
355 source_rates = hsw_rates;
356 size = ARRAY_SIZE(hsw_rates);
357 } else {
358 source_rates = g4x_rates;
359 size = ARRAY_SIZE(g4x_rates);
360 }
361
362 if (max_rate && vbt_max_rate)
363 max_rate = min(max_rate, vbt_max_rate);
364 else if (vbt_max_rate)
365 max_rate = vbt_max_rate;
366
367 if (max_rate)
368 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
369
370 intel_dp->source_rates = source_rates;
371 intel_dp->num_source_rates = size;
372 }
373
374 static int intersect_rates(const int *source_rates, int source_len,
375 const int *sink_rates, int sink_len,
376 int *common_rates)
377 {
378 int i = 0, j = 0, k = 0;
379
380 while (i < source_len && j < sink_len) {
381 if (source_rates[i] == sink_rates[j]) {
382 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
383 return k;
384 common_rates[k] = source_rates[i];
385 ++k;
386 ++i;
387 ++j;
388 } else if (source_rates[i] < sink_rates[j]) {
389 ++i;
390 } else {
391 ++j;
392 }
393 }
394 return k;
395 }
396
397
398 static int intel_dp_rate_index(const int *rates, int len, int rate)
399 {
400 int i;
401
402 for (i = 0; i < len; i++)
403 if (rate == rates[i])
404 return i;
405
406 return -1;
407 }
408
409 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
410 {
411 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
412
413 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
414 intel_dp->num_source_rates,
415 intel_dp->sink_rates,
416 intel_dp->num_sink_rates,
417 intel_dp->common_rates);
418
419
420 if (WARN_ON(intel_dp->num_common_rates == 0)) {
421 intel_dp->common_rates[0] = 162000;
422 intel_dp->num_common_rates = 1;
423 }
424 }
425
426 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
427 u8 lane_count)
428 {
429
430
431
432
433
434 if (link_rate == 0 ||
435 link_rate > intel_dp->max_link_rate)
436 return false;
437
438 if (lane_count == 0 ||
439 lane_count > intel_dp_max_lane_count(intel_dp))
440 return false;
441
442 return true;
443 }
444
445 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
446 int link_rate,
447 u8 lane_count)
448 {
449 const struct drm_display_mode *fixed_mode =
450 intel_dp->attached_connector->panel.fixed_mode;
451 int mode_rate, max_rate;
452
453 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
454 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
455 if (mode_rate > max_rate)
456 return false;
457
458 return true;
459 }
460
461 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
462 int link_rate, u8 lane_count)
463 {
464 int index;
465
466 index = intel_dp_rate_index(intel_dp->common_rates,
467 intel_dp->num_common_rates,
468 link_rate);
469 if (index > 0) {
470 if (intel_dp_is_edp(intel_dp) &&
471 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
472 intel_dp->common_rates[index - 1],
473 lane_count)) {
474 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
475 return 0;
476 }
477 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
478 intel_dp->max_link_lane_count = lane_count;
479 } else if (lane_count > 1) {
480 if (intel_dp_is_edp(intel_dp) &&
481 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
482 intel_dp_max_common_rate(intel_dp),
483 lane_count >> 1)) {
484 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
485 return 0;
486 }
487 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
488 intel_dp->max_link_lane_count = lane_count >> 1;
489 } else {
490 DRM_ERROR("Link Training Unsuccessful\n");
491 return -1;
492 }
493
494 return 0;
495 }
496
497 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
498 {
499 return div_u64(mul_u32_u32(mode_clock, 1000000U),
500 DP_DSC_FEC_OVERHEAD_FACTOR);
501 }
502
503 static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
504 u32 mode_clock, u32 mode_hdisplay)
505 {
506 u32 bits_per_pixel, max_bpp_small_joiner_ram;
507 int i;
508
509
510
511
512
513
514
515 bits_per_pixel = (link_clock * lane_count * 8) /
516 intel_dp_mode_to_fec_clock(mode_clock);
517 DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
518
519
520 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
521 DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
522
523
524
525
526
527 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
528
529
530 if (bits_per_pixel < valid_dsc_bpp[0]) {
531 DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
532 bits_per_pixel, valid_dsc_bpp[0]);
533 return 0;
534 }
535
536
537 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
538 if (bits_per_pixel < valid_dsc_bpp[i + 1])
539 break;
540 }
541 bits_per_pixel = valid_dsc_bpp[i];
542
543
544
545
546
547 return bits_per_pixel << 4;
548 }
549
550 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
551 int mode_clock, int mode_hdisplay)
552 {
553 u8 min_slice_count, i;
554 int max_slice_width;
555
556 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
557 min_slice_count = DIV_ROUND_UP(mode_clock,
558 DP_DSC_MAX_ENC_THROUGHPUT_0);
559 else
560 min_slice_count = DIV_ROUND_UP(mode_clock,
561 DP_DSC_MAX_ENC_THROUGHPUT_1);
562
563 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
564 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
565 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
566 max_slice_width);
567 return 0;
568 }
569
570 min_slice_count = min_t(u8, min_slice_count,
571 DIV_ROUND_UP(mode_hdisplay,
572 max_slice_width));
573
574
575 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
576 if (valid_dsc_slicecount[i] >
577 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
578 false))
579 break;
580 if (min_slice_count <= valid_dsc_slicecount[i])
581 return valid_dsc_slicecount[i];
582 }
583
584 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
585 return 0;
586 }
587
588 static enum drm_mode_status
589 intel_dp_mode_valid(struct drm_connector *connector,
590 struct drm_display_mode *mode)
591 {
592 struct intel_dp *intel_dp = intel_attached_dp(connector);
593 struct intel_connector *intel_connector = to_intel_connector(connector);
594 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
595 struct drm_i915_private *dev_priv = to_i915(connector->dev);
596 int target_clock = mode->clock;
597 int max_rate, mode_rate, max_lanes, max_link_clock;
598 int max_dotclk;
599 u16 dsc_max_output_bpp = 0;
600 u8 dsc_slice_count = 0;
601
602 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
603 return MODE_NO_DBLESCAN;
604
605 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
606
607 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
608 if (mode->hdisplay > fixed_mode->hdisplay)
609 return MODE_PANEL;
610
611 if (mode->vdisplay > fixed_mode->vdisplay)
612 return MODE_PANEL;
613
614 target_clock = fixed_mode->clock;
615 }
616
617 max_link_clock = intel_dp_max_link_rate(intel_dp);
618 max_lanes = intel_dp_max_lane_count(intel_dp);
619
620 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
621 mode_rate = intel_dp_link_required(target_clock, 18);
622
623
624
625
626
627 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
628 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
629 if (intel_dp_is_edp(intel_dp)) {
630 dsc_max_output_bpp =
631 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
632 dsc_slice_count =
633 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
634 true);
635 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
636 dsc_max_output_bpp =
637 intel_dp_dsc_get_output_bpp(max_link_clock,
638 max_lanes,
639 target_clock,
640 mode->hdisplay) >> 4;
641 dsc_slice_count =
642 intel_dp_dsc_get_slice_count(intel_dp,
643 target_clock,
644 mode->hdisplay);
645 }
646 }
647
648 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
649 target_clock > max_dotclk)
650 return MODE_CLOCK_HIGH;
651
652 if (mode->clock < 10000)
653 return MODE_CLOCK_LOW;
654
655 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
656 return MODE_H_ILLEGAL;
657
658 return MODE_OK;
659 }
660
661 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
662 {
663 int i;
664 u32 v = 0;
665
666 if (src_bytes > 4)
667 src_bytes = 4;
668 for (i = 0; i < src_bytes; i++)
669 v |= ((u32)src[i]) << ((3 - i) * 8);
670 return v;
671 }
672
673 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
674 {
675 int i;
676 if (dst_bytes > 4)
677 dst_bytes = 4;
678 for (i = 0; i < dst_bytes; i++)
679 dst[i] = src >> ((3-i) * 8);
680 }
681
682 static void
683 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
684 static void
685 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
686 bool force_disable_vdd);
687 static void
688 intel_dp_pps_init(struct intel_dp *intel_dp);
689
690 static intel_wakeref_t
691 pps_lock(struct intel_dp *intel_dp)
692 {
693 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
694 intel_wakeref_t wakeref;
695
696
697
698
699
700 wakeref = intel_display_power_get(dev_priv,
701 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
702
703 mutex_lock(&dev_priv->pps_mutex);
704
705 return wakeref;
706 }
707
708 static intel_wakeref_t
709 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
710 {
711 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
712
713 mutex_unlock(&dev_priv->pps_mutex);
714 intel_display_power_put(dev_priv,
715 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
716 wakeref);
717 return 0;
718 }
719
720 #define with_pps_lock(dp, wf) \
721 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
722
723 static void
724 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
725 {
726 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
727 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
728 enum pipe pipe = intel_dp->pps_pipe;
729 bool pll_enabled, release_cl_override = false;
730 enum dpio_phy phy = DPIO_PHY(pipe);
731 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
732 u32 DP;
733
734 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
735 "skipping pipe %c power sequencer kick due to port %c being active\n",
736 pipe_name(pipe), port_name(intel_dig_port->base.port)))
737 return;
738
739 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
740 pipe_name(pipe), port_name(intel_dig_port->base.port));
741
742
743
744
745 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
746 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
747 DP |= DP_PORT_WIDTH(1);
748 DP |= DP_LINK_TRAIN_PAT_1;
749
750 if (IS_CHERRYVIEW(dev_priv))
751 DP |= DP_PIPE_SEL_CHV(pipe);
752 else
753 DP |= DP_PIPE_SEL(pipe);
754
755 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
756
757
758
759
760
761 if (!pll_enabled) {
762 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
763 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
764
765 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
766 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
767 DRM_ERROR("Failed to force on pll for pipe %c!\n",
768 pipe_name(pipe));
769 return;
770 }
771 }
772
773
774
775
776
777
778
779 I915_WRITE(intel_dp->output_reg, DP);
780 POSTING_READ(intel_dp->output_reg);
781
782 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
783 POSTING_READ(intel_dp->output_reg);
784
785 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
786 POSTING_READ(intel_dp->output_reg);
787
788 if (!pll_enabled) {
789 vlv_force_pll_off(dev_priv, pipe);
790
791 if (release_cl_override)
792 chv_phy_powergate_ch(dev_priv, phy, ch, false);
793 }
794 }
795
796 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
797 {
798 struct intel_encoder *encoder;
799 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
800
801
802
803
804
805 for_each_intel_dp(&dev_priv->drm, encoder) {
806 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
807
808 if (encoder->type == INTEL_OUTPUT_EDP) {
809 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
810 intel_dp->active_pipe != intel_dp->pps_pipe);
811
812 if (intel_dp->pps_pipe != INVALID_PIPE)
813 pipes &= ~(1 << intel_dp->pps_pipe);
814 } else {
815 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
816
817 if (intel_dp->active_pipe != INVALID_PIPE)
818 pipes &= ~(1 << intel_dp->active_pipe);
819 }
820 }
821
822 if (pipes == 0)
823 return INVALID_PIPE;
824
825 return ffs(pipes) - 1;
826 }
827
828 static enum pipe
829 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
830 {
831 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
832 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
833 enum pipe pipe;
834
835 lockdep_assert_held(&dev_priv->pps_mutex);
836
837
838 WARN_ON(!intel_dp_is_edp(intel_dp));
839
840 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
841 intel_dp->active_pipe != intel_dp->pps_pipe);
842
843 if (intel_dp->pps_pipe != INVALID_PIPE)
844 return intel_dp->pps_pipe;
845
846 pipe = vlv_find_free_pps(dev_priv);
847
848
849
850
851
852 if (WARN_ON(pipe == INVALID_PIPE))
853 pipe = PIPE_A;
854
855 vlv_steal_power_sequencer(dev_priv, pipe);
856 intel_dp->pps_pipe = pipe;
857
858 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
859 pipe_name(intel_dp->pps_pipe),
860 port_name(intel_dig_port->base.port));
861
862
863 intel_dp_init_panel_power_sequencer(intel_dp);
864 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
865
866
867
868
869
870 vlv_power_sequencer_kick(intel_dp);
871
872 return intel_dp->pps_pipe;
873 }
874
875 static int
876 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
877 {
878 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
879 int backlight_controller = dev_priv->vbt.backlight.controller;
880
881 lockdep_assert_held(&dev_priv->pps_mutex);
882
883
884 WARN_ON(!intel_dp_is_edp(intel_dp));
885
886 if (!intel_dp->pps_reset)
887 return backlight_controller;
888
889 intel_dp->pps_reset = false;
890
891
892
893
894
895 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
896
897 return backlight_controller;
898 }
899
900 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
901 enum pipe pipe);
902
903 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
904 enum pipe pipe)
905 {
906 return I915_READ(PP_STATUS(pipe)) & PP_ON;
907 }
908
909 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
910 enum pipe pipe)
911 {
912 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
913 }
914
915 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
916 enum pipe pipe)
917 {
918 return true;
919 }
920
921 static enum pipe
922 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
923 enum port port,
924 vlv_pipe_check pipe_check)
925 {
926 enum pipe pipe;
927
928 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
929 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
930 PANEL_PORT_SELECT_MASK;
931
932 if (port_sel != PANEL_PORT_SELECT_VLV(port))
933 continue;
934
935 if (!pipe_check(dev_priv, pipe))
936 continue;
937
938 return pipe;
939 }
940
941 return INVALID_PIPE;
942 }
943
944 static void
945 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
946 {
947 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
948 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
949 enum port port = intel_dig_port->base.port;
950
951 lockdep_assert_held(&dev_priv->pps_mutex);
952
953
954
955 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
956 vlv_pipe_has_pp_on);
957
958 if (intel_dp->pps_pipe == INVALID_PIPE)
959 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
960 vlv_pipe_has_vdd_on);
961
962 if (intel_dp->pps_pipe == INVALID_PIPE)
963 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
964 vlv_pipe_any);
965
966
967 if (intel_dp->pps_pipe == INVALID_PIPE) {
968 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
969 port_name(port));
970 return;
971 }
972
973 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
974 port_name(port), pipe_name(intel_dp->pps_pipe));
975
976 intel_dp_init_panel_power_sequencer(intel_dp);
977 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
978 }
979
980 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
981 {
982 struct intel_encoder *encoder;
983
984 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
985 !IS_GEN9_LP(dev_priv)))
986 return;
987
988
989
990
991
992
993
994
995
996
997
998 for_each_intel_dp(&dev_priv->drm, encoder) {
999 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1000
1001 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
1002
1003 if (encoder->type != INTEL_OUTPUT_EDP)
1004 continue;
1005
1006 if (IS_GEN9_LP(dev_priv))
1007 intel_dp->pps_reset = true;
1008 else
1009 intel_dp->pps_pipe = INVALID_PIPE;
1010 }
1011 }
1012
1013 struct pps_registers {
1014 i915_reg_t pp_ctrl;
1015 i915_reg_t pp_stat;
1016 i915_reg_t pp_on;
1017 i915_reg_t pp_off;
1018 i915_reg_t pp_div;
1019 };
1020
1021 static void intel_pps_get_registers(struct intel_dp *intel_dp,
1022 struct pps_registers *regs)
1023 {
1024 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1025 int pps_idx = 0;
1026
1027 memset(regs, 0, sizeof(*regs));
1028
1029 if (IS_GEN9_LP(dev_priv))
1030 pps_idx = bxt_power_sequencer_idx(intel_dp);
1031 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1032 pps_idx = vlv_power_sequencer_pipe(intel_dp);
1033
1034 regs->pp_ctrl = PP_CONTROL(pps_idx);
1035 regs->pp_stat = PP_STATUS(pps_idx);
1036 regs->pp_on = PP_ON_DELAYS(pps_idx);
1037 regs->pp_off = PP_OFF_DELAYS(pps_idx);
1038
1039
1040 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1041 regs->pp_div = INVALID_MMIO_REG;
1042 else
1043 regs->pp_div = PP_DIVISOR(pps_idx);
1044 }
1045
1046 static i915_reg_t
1047 _pp_ctrl_reg(struct intel_dp *intel_dp)
1048 {
1049 struct pps_registers regs;
1050
1051 intel_pps_get_registers(intel_dp, ®s);
1052
1053 return regs.pp_ctrl;
1054 }
1055
1056 static i915_reg_t
1057 _pp_stat_reg(struct intel_dp *intel_dp)
1058 {
1059 struct pps_registers regs;
1060
1061 intel_pps_get_registers(intel_dp, ®s);
1062
1063 return regs.pp_stat;
1064 }
1065
1066
1067
1068 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1069 void *unused)
1070 {
1071 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1072 edp_notifier);
1073 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1074 intel_wakeref_t wakeref;
1075
1076 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1077 return 0;
1078
1079 with_pps_lock(intel_dp, wakeref) {
1080 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1081 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1082 i915_reg_t pp_ctrl_reg, pp_div_reg;
1083 u32 pp_div;
1084
1085 pp_ctrl_reg = PP_CONTROL(pipe);
1086 pp_div_reg = PP_DIVISOR(pipe);
1087 pp_div = I915_READ(pp_div_reg);
1088 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1089
1090
1091 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1092 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1093 msleep(intel_dp->panel_power_cycle_delay);
1094 }
1095 }
1096
1097 return 0;
1098 }
1099
1100 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1101 {
1102 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1103
1104 lockdep_assert_held(&dev_priv->pps_mutex);
1105
1106 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1107 intel_dp->pps_pipe == INVALID_PIPE)
1108 return false;
1109
1110 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1111 }
1112
1113 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1114 {
1115 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1116
1117 lockdep_assert_held(&dev_priv->pps_mutex);
1118
1119 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1120 intel_dp->pps_pipe == INVALID_PIPE)
1121 return false;
1122
1123 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1124 }
1125
1126 static void
1127 intel_dp_check_edp(struct intel_dp *intel_dp)
1128 {
1129 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1130
1131 if (!intel_dp_is_edp(intel_dp))
1132 return;
1133
1134 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1135 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1136 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1137 I915_READ(_pp_stat_reg(intel_dp)),
1138 I915_READ(_pp_ctrl_reg(intel_dp)));
1139 }
1140 }
1141
1142 static u32
1143 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1144 {
1145 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1146 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1147 u32 status;
1148 bool done;
1149
1150 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1151 done = wait_event_timeout(i915->gmbus_wait_queue, C,
1152 msecs_to_jiffies_timeout(10));
1153
1154
1155 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1156
1157 if (!done)
1158 DRM_ERROR("dp aux hw did not signal timeout!\n");
1159 #undef C
1160
1161 return status;
1162 }
1163
1164 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1165 {
1166 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1167
1168 if (index)
1169 return 0;
1170
1171
1172
1173
1174
1175 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1176 }
1177
1178 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1179 {
1180 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1182
1183 if (index)
1184 return 0;
1185
1186
1187
1188
1189
1190
1191 if (dig_port->aux_ch == AUX_CH_A)
1192 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1193 else
1194 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1195 }
1196
1197 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1198 {
1199 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1200 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1201
1202 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1203
1204 switch (index) {
1205 case 0: return 63;
1206 case 1: return 72;
1207 default: return 0;
1208 }
1209 }
1210
1211 return ilk_get_aux_clock_divider(intel_dp, index);
1212 }
1213
1214 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1215 {
1216
1217
1218
1219
1220
1221 return index ? 0 : 1;
1222 }
1223
1224 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1225 int send_bytes,
1226 u32 aux_clock_divider)
1227 {
1228 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1229 struct drm_i915_private *dev_priv =
1230 to_i915(intel_dig_port->base.base.dev);
1231 u32 precharge, timeout;
1232
1233 if (IS_GEN(dev_priv, 6))
1234 precharge = 3;
1235 else
1236 precharge = 5;
1237
1238 if (IS_BROADWELL(dev_priv))
1239 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1240 else
1241 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1242
1243 return DP_AUX_CH_CTL_SEND_BUSY |
1244 DP_AUX_CH_CTL_DONE |
1245 DP_AUX_CH_CTL_INTERRUPT |
1246 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1247 timeout |
1248 DP_AUX_CH_CTL_RECEIVE_ERROR |
1249 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1250 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1251 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1252 }
1253
1254 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1255 int send_bytes,
1256 u32 unused)
1257 {
1258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1259 struct drm_i915_private *i915 =
1260 to_i915(intel_dig_port->base.base.dev);
1261 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1262 u32 ret;
1263
1264 ret = DP_AUX_CH_CTL_SEND_BUSY |
1265 DP_AUX_CH_CTL_DONE |
1266 DP_AUX_CH_CTL_INTERRUPT |
1267 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1268 DP_AUX_CH_CTL_TIME_OUT_MAX |
1269 DP_AUX_CH_CTL_RECEIVE_ERROR |
1270 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1271 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1272 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1273
1274 if (intel_phy_is_tc(i915, phy) &&
1275 intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1276 ret |= DP_AUX_CH_CTL_TBT_IO;
1277
1278 return ret;
1279 }
1280
1281 static int
1282 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1283 const u8 *send, int send_bytes,
1284 u8 *recv, int recv_size,
1285 u32 aux_send_ctl_flags)
1286 {
1287 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1288 struct drm_i915_private *i915 =
1289 to_i915(intel_dig_port->base.base.dev);
1290 struct intel_uncore *uncore = &i915->uncore;
1291 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1292 bool is_tc_port = intel_phy_is_tc(i915, phy);
1293 i915_reg_t ch_ctl, ch_data[5];
1294 u32 aux_clock_divider;
1295 enum intel_display_power_domain aux_domain =
1296 intel_aux_power_domain(intel_dig_port);
1297 intel_wakeref_t aux_wakeref;
1298 intel_wakeref_t pps_wakeref;
1299 int i, ret, recv_bytes;
1300 int try, clock = 0;
1301 u32 status;
1302 bool vdd;
1303
1304 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1305 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1306 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1307
1308 if (is_tc_port)
1309 intel_tc_port_lock(intel_dig_port);
1310
1311 aux_wakeref = intel_display_power_get(i915, aux_domain);
1312 pps_wakeref = pps_lock(intel_dp);
1313
1314
1315
1316
1317
1318
1319
1320 vdd = edp_panel_vdd_on(intel_dp);
1321
1322
1323
1324
1325
1326 pm_qos_update_request(&i915->pm_qos, 0);
1327
1328 intel_dp_check_edp(intel_dp);
1329
1330
1331 for (try = 0; try < 3; try++) {
1332 status = intel_uncore_read_notrace(uncore, ch_ctl);
1333 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1334 break;
1335 msleep(1);
1336 }
1337
1338 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1339
1340 if (try == 3) {
1341 static u32 last_status = -1;
1342 const u32 status = intel_uncore_read(uncore, ch_ctl);
1343
1344 if (status != last_status) {
1345 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1346 status);
1347 last_status = status;
1348 }
1349
1350 ret = -EBUSY;
1351 goto out;
1352 }
1353
1354
1355 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1356 ret = -E2BIG;
1357 goto out;
1358 }
1359
1360 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1361 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1362 send_bytes,
1363 aux_clock_divider);
1364
1365 send_ctl |= aux_send_ctl_flags;
1366
1367
1368 for (try = 0; try < 5; try++) {
1369
1370 for (i = 0; i < send_bytes; i += 4)
1371 intel_uncore_write(uncore,
1372 ch_data[i >> 2],
1373 intel_dp_pack_aux(send + i,
1374 send_bytes - i));
1375
1376
1377 intel_uncore_write(uncore, ch_ctl, send_ctl);
1378
1379 status = intel_dp_aux_wait_done(intel_dp);
1380
1381
1382 intel_uncore_write(uncore,
1383 ch_ctl,
1384 status |
1385 DP_AUX_CH_CTL_DONE |
1386 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1387 DP_AUX_CH_CTL_RECEIVE_ERROR);
1388
1389
1390
1391
1392
1393
1394 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1395 continue;
1396
1397 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1398 usleep_range(400, 500);
1399 continue;
1400 }
1401 if (status & DP_AUX_CH_CTL_DONE)
1402 goto done;
1403 }
1404 }
1405
1406 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1407 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1408 ret = -EBUSY;
1409 goto out;
1410 }
1411
1412 done:
1413
1414
1415
1416 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1417 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1418 ret = -EIO;
1419 goto out;
1420 }
1421
1422
1423
1424 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1425 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1426 ret = -ETIMEDOUT;
1427 goto out;
1428 }
1429
1430
1431 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1432 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1433
1434
1435
1436
1437
1438
1439 if (recv_bytes == 0 || recv_bytes > 20) {
1440 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1441 recv_bytes);
1442 ret = -EBUSY;
1443 goto out;
1444 }
1445
1446 if (recv_bytes > recv_size)
1447 recv_bytes = recv_size;
1448
1449 for (i = 0; i < recv_bytes; i += 4)
1450 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1451 recv + i, recv_bytes - i);
1452
1453 ret = recv_bytes;
1454 out:
1455 pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1456
1457 if (vdd)
1458 edp_panel_vdd_off(intel_dp, false);
1459
1460 pps_unlock(intel_dp, pps_wakeref);
1461 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1462
1463 if (is_tc_port)
1464 intel_tc_port_unlock(intel_dig_port);
1465
1466 return ret;
1467 }
1468
1469 #define BARE_ADDRESS_SIZE 3
1470 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1471
1472 static void
1473 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1474 const struct drm_dp_aux_msg *msg)
1475 {
1476 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1477 txbuf[1] = (msg->address >> 8) & 0xff;
1478 txbuf[2] = msg->address & 0xff;
1479 txbuf[3] = msg->size - 1;
1480 }
1481
1482 static ssize_t
1483 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1484 {
1485 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1486 u8 txbuf[20], rxbuf[20];
1487 size_t txsize, rxsize;
1488 int ret;
1489
1490 intel_dp_aux_header(txbuf, msg);
1491
1492 switch (msg->request & ~DP_AUX_I2C_MOT) {
1493 case DP_AUX_NATIVE_WRITE:
1494 case DP_AUX_I2C_WRITE:
1495 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1496 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1497 rxsize = 2;
1498
1499 if (WARN_ON(txsize > 20))
1500 return -E2BIG;
1501
1502 WARN_ON(!msg->buffer != !msg->size);
1503
1504 if (msg->buffer)
1505 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1506
1507 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1508 rxbuf, rxsize, 0);
1509 if (ret > 0) {
1510 msg->reply = rxbuf[0] >> 4;
1511
1512 if (ret > 1) {
1513
1514 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1515 } else {
1516
1517 ret = msg->size;
1518 }
1519 }
1520 break;
1521
1522 case DP_AUX_NATIVE_READ:
1523 case DP_AUX_I2C_READ:
1524 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1525 rxsize = msg->size + 1;
1526
1527 if (WARN_ON(rxsize > 20))
1528 return -E2BIG;
1529
1530 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1531 rxbuf, rxsize, 0);
1532 if (ret > 0) {
1533 msg->reply = rxbuf[0] >> 4;
1534
1535
1536
1537
1538
1539
1540 ret--;
1541 memcpy(msg->buffer, rxbuf + 1, ret);
1542 }
1543 break;
1544
1545 default:
1546 ret = -EINVAL;
1547 break;
1548 }
1549
1550 return ret;
1551 }
1552
1553
1554 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1555 {
1556 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1557 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1558 enum aux_ch aux_ch = dig_port->aux_ch;
1559
1560 switch (aux_ch) {
1561 case AUX_CH_B:
1562 case AUX_CH_C:
1563 case AUX_CH_D:
1564 return DP_AUX_CH_CTL(aux_ch);
1565 default:
1566 MISSING_CASE(aux_ch);
1567 return DP_AUX_CH_CTL(AUX_CH_B);
1568 }
1569 }
1570
1571 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1572 {
1573 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1574 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1575 enum aux_ch aux_ch = dig_port->aux_ch;
1576
1577 switch (aux_ch) {
1578 case AUX_CH_B:
1579 case AUX_CH_C:
1580 case AUX_CH_D:
1581 return DP_AUX_CH_DATA(aux_ch, index);
1582 default:
1583 MISSING_CASE(aux_ch);
1584 return DP_AUX_CH_DATA(AUX_CH_B, index);
1585 }
1586 }
1587
1588 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1589 {
1590 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1591 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1592 enum aux_ch aux_ch = dig_port->aux_ch;
1593
1594 switch (aux_ch) {
1595 case AUX_CH_A:
1596 return DP_AUX_CH_CTL(aux_ch);
1597 case AUX_CH_B:
1598 case AUX_CH_C:
1599 case AUX_CH_D:
1600 return PCH_DP_AUX_CH_CTL(aux_ch);
1601 default:
1602 MISSING_CASE(aux_ch);
1603 return DP_AUX_CH_CTL(AUX_CH_A);
1604 }
1605 }
1606
1607 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1608 {
1609 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1610 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1611 enum aux_ch aux_ch = dig_port->aux_ch;
1612
1613 switch (aux_ch) {
1614 case AUX_CH_A:
1615 return DP_AUX_CH_DATA(aux_ch, index);
1616 case AUX_CH_B:
1617 case AUX_CH_C:
1618 case AUX_CH_D:
1619 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1620 default:
1621 MISSING_CASE(aux_ch);
1622 return DP_AUX_CH_DATA(AUX_CH_A, index);
1623 }
1624 }
1625
1626 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1627 {
1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1629 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1630 enum aux_ch aux_ch = dig_port->aux_ch;
1631
1632 switch (aux_ch) {
1633 case AUX_CH_A:
1634 case AUX_CH_B:
1635 case AUX_CH_C:
1636 case AUX_CH_D:
1637 case AUX_CH_E:
1638 case AUX_CH_F:
1639 return DP_AUX_CH_CTL(aux_ch);
1640 default:
1641 MISSING_CASE(aux_ch);
1642 return DP_AUX_CH_CTL(AUX_CH_A);
1643 }
1644 }
1645
1646 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1647 {
1648 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1649 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1650 enum aux_ch aux_ch = dig_port->aux_ch;
1651
1652 switch (aux_ch) {
1653 case AUX_CH_A:
1654 case AUX_CH_B:
1655 case AUX_CH_C:
1656 case AUX_CH_D:
1657 case AUX_CH_E:
1658 case AUX_CH_F:
1659 return DP_AUX_CH_DATA(aux_ch, index);
1660 default:
1661 MISSING_CASE(aux_ch);
1662 return DP_AUX_CH_DATA(AUX_CH_A, index);
1663 }
1664 }
1665
1666 static void
1667 intel_dp_aux_fini(struct intel_dp *intel_dp)
1668 {
1669 kfree(intel_dp->aux.name);
1670 }
1671
1672 static void
1673 intel_dp_aux_init(struct intel_dp *intel_dp)
1674 {
1675 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1676 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1677 struct intel_encoder *encoder = &dig_port->base;
1678
1679 if (INTEL_GEN(dev_priv) >= 9) {
1680 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1681 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1682 } else if (HAS_PCH_SPLIT(dev_priv)) {
1683 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1684 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1685 } else {
1686 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1687 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1688 }
1689
1690 if (INTEL_GEN(dev_priv) >= 9)
1691 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1692 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1693 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1694 else if (HAS_PCH_SPLIT(dev_priv))
1695 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1696 else
1697 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1698
1699 if (INTEL_GEN(dev_priv) >= 9)
1700 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1701 else
1702 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1703
1704 drm_dp_aux_init(&intel_dp->aux);
1705
1706
1707 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1708 port_name(encoder->port));
1709 intel_dp->aux.transfer = intel_dp_aux_transfer;
1710 }
1711
1712 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1713 {
1714 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1715
1716 return max_rate >= 540000;
1717 }
1718
1719 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1720 {
1721 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1722
1723 return max_rate >= 810000;
1724 }
1725
1726 static void
1727 intel_dp_set_clock(struct intel_encoder *encoder,
1728 struct intel_crtc_state *pipe_config)
1729 {
1730 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1731 const struct dp_link_dpll *divisor = NULL;
1732 int i, count = 0;
1733
1734 if (IS_G4X(dev_priv)) {
1735 divisor = g4x_dpll;
1736 count = ARRAY_SIZE(g4x_dpll);
1737 } else if (HAS_PCH_SPLIT(dev_priv)) {
1738 divisor = pch_dpll;
1739 count = ARRAY_SIZE(pch_dpll);
1740 } else if (IS_CHERRYVIEW(dev_priv)) {
1741 divisor = chv_dpll;
1742 count = ARRAY_SIZE(chv_dpll);
1743 } else if (IS_VALLEYVIEW(dev_priv)) {
1744 divisor = vlv_dpll;
1745 count = ARRAY_SIZE(vlv_dpll);
1746 }
1747
1748 if (divisor && count) {
1749 for (i = 0; i < count; i++) {
1750 if (pipe_config->port_clock == divisor[i].clock) {
1751 pipe_config->dpll = divisor[i].dpll;
1752 pipe_config->clock_set = true;
1753 break;
1754 }
1755 }
1756 }
1757 }
1758
1759 static void snprintf_int_array(char *str, size_t len,
1760 const int *array, int nelem)
1761 {
1762 int i;
1763
1764 str[0] = '\0';
1765
1766 for (i = 0; i < nelem; i++) {
1767 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1768 if (r >= len)
1769 return;
1770 str += r;
1771 len -= r;
1772 }
1773 }
1774
1775 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1776 {
1777 char str[128];
1778
1779 if ((drm_debug & DRM_UT_KMS) == 0)
1780 return;
1781
1782 snprintf_int_array(str, sizeof(str),
1783 intel_dp->source_rates, intel_dp->num_source_rates);
1784 DRM_DEBUG_KMS("source rates: %s\n", str);
1785
1786 snprintf_int_array(str, sizeof(str),
1787 intel_dp->sink_rates, intel_dp->num_sink_rates);
1788 DRM_DEBUG_KMS("sink rates: %s\n", str);
1789
1790 snprintf_int_array(str, sizeof(str),
1791 intel_dp->common_rates, intel_dp->num_common_rates);
1792 DRM_DEBUG_KMS("common rates: %s\n", str);
1793 }
1794
1795 int
1796 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1797 {
1798 int len;
1799
1800 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1801 if (WARN_ON(len <= 0))
1802 return 162000;
1803
1804 return intel_dp->common_rates[len - 1];
1805 }
1806
1807 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1808 {
1809 int i = intel_dp_rate_index(intel_dp->sink_rates,
1810 intel_dp->num_sink_rates, rate);
1811
1812 if (WARN_ON(i < 0))
1813 i = 0;
1814
1815 return i;
1816 }
1817
1818 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1819 u8 *link_bw, u8 *rate_select)
1820 {
1821
1822 if (intel_dp->use_rate_select) {
1823 *link_bw = 0;
1824 *rate_select =
1825 intel_dp_rate_select(intel_dp, port_clock);
1826 } else {
1827 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1828 *rate_select = 0;
1829 }
1830 }
1831
1832 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1833 const struct intel_crtc_state *pipe_config)
1834 {
1835 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1836
1837 return INTEL_GEN(dev_priv) >= 11 &&
1838 pipe_config->cpu_transcoder != TRANSCODER_A;
1839 }
1840
1841 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1842 const struct intel_crtc_state *pipe_config)
1843 {
1844 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1845 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1846 }
1847
1848 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1849 const struct intel_crtc_state *pipe_config)
1850 {
1851 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1852
1853 return INTEL_GEN(dev_priv) >= 10 &&
1854 pipe_config->cpu_transcoder != TRANSCODER_A;
1855 }
1856
1857 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1858 const struct intel_crtc_state *pipe_config)
1859 {
1860 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1861 return false;
1862
1863 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1864 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1865 }
1866
1867 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1868 struct intel_crtc_state *pipe_config)
1869 {
1870 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1871 struct intel_connector *intel_connector = intel_dp->attached_connector;
1872 int bpp, bpc;
1873
1874 bpp = pipe_config->pipe_bpp;
1875 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1876
1877 if (bpc > 0)
1878 bpp = min(bpp, 3*bpc);
1879
1880 if (intel_dp_is_edp(intel_dp)) {
1881
1882 if (intel_connector->base.display_info.bpc == 0 &&
1883 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1884 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1885 dev_priv->vbt.edp.bpp);
1886 bpp = dev_priv->vbt.edp.bpp;
1887 }
1888 }
1889
1890 return bpp;
1891 }
1892
1893
1894 void
1895 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1896 struct intel_crtc_state *pipe_config,
1897 struct link_config_limits *limits)
1898 {
1899
1900 if (intel_dp->compliance.test_data.bpc != 0) {
1901 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1902
1903 limits->min_bpp = limits->max_bpp = bpp;
1904 pipe_config->dither_force_disable = bpp == 6 * 3;
1905
1906 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1907 }
1908
1909
1910 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1911 int index;
1912
1913
1914
1915
1916 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1917 intel_dp->compliance.test_lane_count)) {
1918 index = intel_dp_rate_index(intel_dp->common_rates,
1919 intel_dp->num_common_rates,
1920 intel_dp->compliance.test_link_rate);
1921 if (index >= 0)
1922 limits->min_clock = limits->max_clock = index;
1923 limits->min_lane_count = limits->max_lane_count =
1924 intel_dp->compliance.test_lane_count;
1925 }
1926 }
1927 }
1928
1929 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1930 {
1931
1932
1933
1934
1935
1936 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1937 bpp /= 2;
1938
1939 return bpp;
1940 }
1941
1942
1943 static int
1944 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1945 struct intel_crtc_state *pipe_config,
1946 const struct link_config_limits *limits)
1947 {
1948 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1949 int bpp, clock, lane_count;
1950 int mode_rate, link_clock, link_avail;
1951
1952 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1953 int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
1954
1955 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1956 output_bpp);
1957
1958 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1959 for (lane_count = limits->min_lane_count;
1960 lane_count <= limits->max_lane_count;
1961 lane_count <<= 1) {
1962 link_clock = intel_dp->common_rates[clock];
1963 link_avail = intel_dp_max_data_rate(link_clock,
1964 lane_count);
1965
1966 if (mode_rate <= link_avail) {
1967 pipe_config->lane_count = lane_count;
1968 pipe_config->pipe_bpp = bpp;
1969 pipe_config->port_clock = link_clock;
1970
1971 return 0;
1972 }
1973 }
1974 }
1975 }
1976
1977 return -EINVAL;
1978 }
1979
1980 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1981 {
1982 int i, num_bpc;
1983 u8 dsc_bpc[3] = {0};
1984
1985 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1986 dsc_bpc);
1987 for (i = 0; i < num_bpc; i++) {
1988 if (dsc_max_bpc >= dsc_bpc[i])
1989 return dsc_bpc[i] * 3;
1990 }
1991
1992 return 0;
1993 }
1994
1995 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1996 struct intel_crtc_state *pipe_config,
1997 struct drm_connector_state *conn_state,
1998 struct link_config_limits *limits)
1999 {
2000 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2001 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2002 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2003 u8 dsc_max_bpc;
2004 int pipe_bpp;
2005 int ret;
2006
2007 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2008 intel_dp_supports_fec(intel_dp, pipe_config);
2009
2010 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2011 return -EINVAL;
2012
2013 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
2014 conn_state->max_requested_bpc);
2015
2016 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2017 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
2018 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
2019 return -EINVAL;
2020 }
2021
2022
2023
2024
2025
2026
2027 pipe_config->pipe_bpp = pipe_bpp;
2028 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2029 pipe_config->lane_count = limits->max_lane_count;
2030
2031 if (intel_dp_is_edp(intel_dp)) {
2032 pipe_config->dsc_params.compressed_bpp =
2033 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2034 pipe_config->pipe_bpp);
2035 pipe_config->dsc_params.slice_count =
2036 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2037 true);
2038 } else {
2039 u16 dsc_max_output_bpp;
2040 u8 dsc_dp_slice_count;
2041
2042 dsc_max_output_bpp =
2043 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
2044 pipe_config->lane_count,
2045 adjusted_mode->crtc_clock,
2046 adjusted_mode->crtc_hdisplay);
2047 dsc_dp_slice_count =
2048 intel_dp_dsc_get_slice_count(intel_dp,
2049 adjusted_mode->crtc_clock,
2050 adjusted_mode->crtc_hdisplay);
2051 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2052 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
2053 return -EINVAL;
2054 }
2055 pipe_config->dsc_params.compressed_bpp = min_t(u16,
2056 dsc_max_output_bpp >> 4,
2057 pipe_config->pipe_bpp);
2058 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
2059 }
2060
2061
2062
2063
2064
2065 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2066 if (pipe_config->dsc_params.slice_count > 1) {
2067 pipe_config->dsc_params.dsc_split = true;
2068 } else {
2069 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
2070 return -EINVAL;
2071 }
2072 }
2073
2074 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2075 if (ret < 0) {
2076 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2077 "Compressed BPP = %d\n",
2078 pipe_config->pipe_bpp,
2079 pipe_config->dsc_params.compressed_bpp);
2080 return ret;
2081 }
2082
2083 pipe_config->dsc_params.compression_enable = true;
2084 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2085 "Compressed Bpp = %d Slice Count = %d\n",
2086 pipe_config->pipe_bpp,
2087 pipe_config->dsc_params.compressed_bpp,
2088 pipe_config->dsc_params.slice_count);
2089
2090 return 0;
2091 }
2092
2093 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2094 {
2095 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2096 return 6 * 3;
2097 else
2098 return 8 * 3;
2099 }
2100
2101 static int
2102 intel_dp_compute_link_config(struct intel_encoder *encoder,
2103 struct intel_crtc_state *pipe_config,
2104 struct drm_connector_state *conn_state)
2105 {
2106 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2107 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2108 struct link_config_limits limits;
2109 int common_len;
2110 int ret;
2111
2112 common_len = intel_dp_common_len_rate_limit(intel_dp,
2113 intel_dp->max_link_rate);
2114
2115
2116 WARN_ON(common_len <= 0);
2117
2118 limits.min_clock = 0;
2119 limits.max_clock = common_len - 1;
2120
2121 limits.min_lane_count = 1;
2122 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2123
2124 limits.min_bpp = intel_dp_min_bpp(pipe_config);
2125 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2126
2127 if (intel_dp_is_edp(intel_dp)) {
2128
2129
2130
2131
2132
2133
2134
2135 limits.min_lane_count = limits.max_lane_count;
2136 limits.min_clock = limits.max_clock;
2137 }
2138
2139 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2140
2141 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2142 "max rate %d max bpp %d pixel clock %iKHz\n",
2143 limits.max_lane_count,
2144 intel_dp->common_rates[limits.max_clock],
2145 limits.max_bpp, adjusted_mode->crtc_clock);
2146
2147
2148
2149
2150
2151 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2152
2153
2154 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2155 if (ret || intel_dp->force_dsc_en) {
2156 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2157 conn_state, &limits);
2158 if (ret < 0)
2159 return ret;
2160 }
2161
2162 if (pipe_config->dsc_params.compression_enable) {
2163 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2164 pipe_config->lane_count, pipe_config->port_clock,
2165 pipe_config->pipe_bpp,
2166 pipe_config->dsc_params.compressed_bpp);
2167
2168 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2169 intel_dp_link_required(adjusted_mode->crtc_clock,
2170 pipe_config->dsc_params.compressed_bpp),
2171 intel_dp_max_data_rate(pipe_config->port_clock,
2172 pipe_config->lane_count));
2173 } else {
2174 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2175 pipe_config->lane_count, pipe_config->port_clock,
2176 pipe_config->pipe_bpp);
2177
2178 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2179 intel_dp_link_required(adjusted_mode->crtc_clock,
2180 pipe_config->pipe_bpp),
2181 intel_dp_max_data_rate(pipe_config->port_clock,
2182 pipe_config->lane_count));
2183 }
2184 return 0;
2185 }
2186
2187 static int
2188 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2189 struct drm_connector *connector,
2190 struct intel_crtc_state *crtc_state)
2191 {
2192 const struct drm_display_info *info = &connector->display_info;
2193 const struct drm_display_mode *adjusted_mode =
2194 &crtc_state->base.adjusted_mode;
2195 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2196 int ret;
2197
2198 if (!drm_mode_is_420_only(info, adjusted_mode) ||
2199 !intel_dp_get_colorimetry_status(intel_dp) ||
2200 !connector->ycbcr_420_allowed)
2201 return 0;
2202
2203 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2204
2205
2206 ret = skl_update_scaler_crtc(crtc_state);
2207 if (ret) {
2208 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2209 return ret;
2210 }
2211
2212 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2213
2214 return 0;
2215 }
2216
2217 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2218 const struct drm_connector_state *conn_state)
2219 {
2220 const struct intel_digital_connector_state *intel_conn_state =
2221 to_intel_digital_connector_state(conn_state);
2222 const struct drm_display_mode *adjusted_mode =
2223 &crtc_state->base.adjusted_mode;
2224
2225 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2226
2227
2228
2229
2230
2231 return crtc_state->pipe_bpp != 18 &&
2232 drm_default_rgb_quant_range(adjusted_mode) ==
2233 HDMI_QUANTIZATION_RANGE_LIMITED;
2234 } else {
2235 return intel_conn_state->broadcast_rgb ==
2236 INTEL_BROADCAST_RGB_LIMITED;
2237 }
2238 }
2239
2240 int
2241 intel_dp_compute_config(struct intel_encoder *encoder,
2242 struct intel_crtc_state *pipe_config,
2243 struct drm_connector_state *conn_state)
2244 {
2245 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2246 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2247 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2248 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2249 enum port port = encoder->port;
2250 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2251 struct intel_connector *intel_connector = intel_dp->attached_connector;
2252 struct intel_digital_connector_state *intel_conn_state =
2253 to_intel_digital_connector_state(conn_state);
2254 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2255 DP_DPCD_QUIRK_CONSTANT_N);
2256 int ret = 0, output_bpp;
2257
2258 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2259 pipe_config->has_pch_encoder = true;
2260
2261 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2262 if (lspcon->active)
2263 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2264 else
2265 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2266 pipe_config);
2267
2268 if (ret)
2269 return ret;
2270
2271 pipe_config->has_drrs = false;
2272 if (IS_G4X(dev_priv) || port == PORT_A)
2273 pipe_config->has_audio = false;
2274 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2275 pipe_config->has_audio = intel_dp->has_audio;
2276 else
2277 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2278
2279 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2280 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2281 adjusted_mode);
2282
2283 if (INTEL_GEN(dev_priv) >= 9) {
2284 ret = skl_update_scaler_crtc(pipe_config);
2285 if (ret)
2286 return ret;
2287 }
2288
2289 if (HAS_GMCH(dev_priv))
2290 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2291 conn_state->scaling_mode);
2292 else
2293 intel_pch_panel_fitting(intel_crtc, pipe_config,
2294 conn_state->scaling_mode);
2295 }
2296
2297 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2298 return -EINVAL;
2299
2300 if (HAS_GMCH(dev_priv) &&
2301 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2302 return -EINVAL;
2303
2304 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2305 return -EINVAL;
2306
2307 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2308 if (ret < 0)
2309 return ret;
2310
2311 pipe_config->limited_color_range =
2312 intel_dp_limited_color_range(pipe_config, conn_state);
2313
2314 if (pipe_config->dsc_params.compression_enable)
2315 output_bpp = pipe_config->dsc_params.compressed_bpp;
2316 else
2317 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2318
2319 intel_link_compute_m_n(output_bpp,
2320 pipe_config->lane_count,
2321 adjusted_mode->crtc_clock,
2322 pipe_config->port_clock,
2323 &pipe_config->dp_m_n,
2324 constant_n, pipe_config->fec_enable);
2325
2326 if (intel_connector->panel.downclock_mode != NULL &&
2327 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2328 pipe_config->has_drrs = true;
2329 intel_link_compute_m_n(output_bpp,
2330 pipe_config->lane_count,
2331 intel_connector->panel.downclock_mode->clock,
2332 pipe_config->port_clock,
2333 &pipe_config->dp_m2_n2,
2334 constant_n, pipe_config->fec_enable);
2335 }
2336
2337 if (!HAS_DDI(dev_priv))
2338 intel_dp_set_clock(encoder, pipe_config);
2339
2340 intel_psr_compute_config(intel_dp, pipe_config);
2341
2342 return 0;
2343 }
2344
2345 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2346 int link_rate, u8 lane_count,
2347 bool link_mst)
2348 {
2349 intel_dp->link_trained = false;
2350 intel_dp->link_rate = link_rate;
2351 intel_dp->lane_count = lane_count;
2352 intel_dp->link_mst = link_mst;
2353 }
2354
2355 static void intel_dp_prepare(struct intel_encoder *encoder,
2356 const struct intel_crtc_state *pipe_config)
2357 {
2358 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2359 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2360 enum port port = encoder->port;
2361 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2362 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2363
2364 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2365 pipe_config->lane_count,
2366 intel_crtc_has_type(pipe_config,
2367 INTEL_OUTPUT_DP_MST));
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2390
2391
2392 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2393 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2394
2395
2396
2397 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2398 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2399 intel_dp->DP |= DP_SYNC_HS_HIGH;
2400 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2401 intel_dp->DP |= DP_SYNC_VS_HIGH;
2402 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2403
2404 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2405 intel_dp->DP |= DP_ENHANCED_FRAMING;
2406
2407 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2408 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2409 u32 trans_dp;
2410
2411 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2412
2413 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2414 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2415 trans_dp |= TRANS_DP_ENH_FRAMING;
2416 else
2417 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2418 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2419 } else {
2420 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2421 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2422
2423 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2424 intel_dp->DP |= DP_SYNC_HS_HIGH;
2425 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2426 intel_dp->DP |= DP_SYNC_VS_HIGH;
2427 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2428
2429 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2430 intel_dp->DP |= DP_ENHANCED_FRAMING;
2431
2432 if (IS_CHERRYVIEW(dev_priv))
2433 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2434 else
2435 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2436 }
2437 }
2438
2439 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2440 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2441
2442 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2443 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2444
2445 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2446 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2447
2448 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2449
2450 static void wait_panel_status(struct intel_dp *intel_dp,
2451 u32 mask,
2452 u32 value)
2453 {
2454 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2455 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2456
2457 lockdep_assert_held(&dev_priv->pps_mutex);
2458
2459 intel_pps_verify_state(intel_dp);
2460
2461 pp_stat_reg = _pp_stat_reg(intel_dp);
2462 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2463
2464 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2465 mask, value,
2466 I915_READ(pp_stat_reg),
2467 I915_READ(pp_ctrl_reg));
2468
2469 if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2470 mask, value, 5000))
2471 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2472 I915_READ(pp_stat_reg),
2473 I915_READ(pp_ctrl_reg));
2474
2475 DRM_DEBUG_KMS("Wait complete\n");
2476 }
2477
2478 static void wait_panel_on(struct intel_dp *intel_dp)
2479 {
2480 DRM_DEBUG_KMS("Wait for panel power on\n");
2481 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2482 }
2483
2484 static void wait_panel_off(struct intel_dp *intel_dp)
2485 {
2486 DRM_DEBUG_KMS("Wait for panel power off time\n");
2487 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2488 }
2489
2490 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2491 {
2492 ktime_t panel_power_on_time;
2493 s64 panel_power_off_duration;
2494
2495 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2496
2497
2498
2499 panel_power_on_time = ktime_get_boottime();
2500 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2501
2502
2503
2504 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2505 wait_remaining_ms_from_jiffies(jiffies,
2506 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2507
2508 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2509 }
2510
2511 static void wait_backlight_on(struct intel_dp *intel_dp)
2512 {
2513 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2514 intel_dp->backlight_on_delay);
2515 }
2516
2517 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2518 {
2519 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2520 intel_dp->backlight_off_delay);
2521 }
2522
2523
2524
2525
2526
2527 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2528 {
2529 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2530 u32 control;
2531
2532 lockdep_assert_held(&dev_priv->pps_mutex);
2533
2534 control = I915_READ(_pp_ctrl_reg(intel_dp));
2535 if (WARN_ON(!HAS_DDI(dev_priv) &&
2536 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2537 control &= ~PANEL_UNLOCK_MASK;
2538 control |= PANEL_UNLOCK_REGS;
2539 }
2540 return control;
2541 }
2542
2543
2544
2545
2546
2547
2548 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2549 {
2550 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2551 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2552 u32 pp;
2553 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2554 bool need_to_disable = !intel_dp->want_panel_vdd;
2555
2556 lockdep_assert_held(&dev_priv->pps_mutex);
2557
2558 if (!intel_dp_is_edp(intel_dp))
2559 return false;
2560
2561 cancel_delayed_work(&intel_dp->panel_vdd_work);
2562 intel_dp->want_panel_vdd = true;
2563
2564 if (edp_have_panel_vdd(intel_dp))
2565 return need_to_disable;
2566
2567 intel_display_power_get(dev_priv,
2568 intel_aux_power_domain(intel_dig_port));
2569
2570 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2571 port_name(intel_dig_port->base.port));
2572
2573 if (!edp_have_panel_power(intel_dp))
2574 wait_panel_power_cycle(intel_dp);
2575
2576 pp = ironlake_get_pp_control(intel_dp);
2577 pp |= EDP_FORCE_VDD;
2578
2579 pp_stat_reg = _pp_stat_reg(intel_dp);
2580 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2581
2582 I915_WRITE(pp_ctrl_reg, pp);
2583 POSTING_READ(pp_ctrl_reg);
2584 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2585 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2586
2587
2588
2589 if (!edp_have_panel_power(intel_dp)) {
2590 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2591 port_name(intel_dig_port->base.port));
2592 msleep(intel_dp->panel_power_up_delay);
2593 }
2594
2595 return need_to_disable;
2596 }
2597
2598
2599
2600
2601
2602
2603
2604
2605 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2606 {
2607 intel_wakeref_t wakeref;
2608 bool vdd;
2609
2610 if (!intel_dp_is_edp(intel_dp))
2611 return;
2612
2613 vdd = false;
2614 with_pps_lock(intel_dp, wakeref)
2615 vdd = edp_panel_vdd_on(intel_dp);
2616 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2617 port_name(dp_to_dig_port(intel_dp)->base.port));
2618 }
2619
2620 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2621 {
2622 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2623 struct intel_digital_port *intel_dig_port =
2624 dp_to_dig_port(intel_dp);
2625 u32 pp;
2626 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2627
2628 lockdep_assert_held(&dev_priv->pps_mutex);
2629
2630 WARN_ON(intel_dp->want_panel_vdd);
2631
2632 if (!edp_have_panel_vdd(intel_dp))
2633 return;
2634
2635 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2636 port_name(intel_dig_port->base.port));
2637
2638 pp = ironlake_get_pp_control(intel_dp);
2639 pp &= ~EDP_FORCE_VDD;
2640
2641 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2642 pp_stat_reg = _pp_stat_reg(intel_dp);
2643
2644 I915_WRITE(pp_ctrl_reg, pp);
2645 POSTING_READ(pp_ctrl_reg);
2646
2647
2648 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2649 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2650
2651 if ((pp & PANEL_POWER_ON) == 0)
2652 intel_dp->panel_power_off_time = ktime_get_boottime();
2653
2654 intel_display_power_put_unchecked(dev_priv,
2655 intel_aux_power_domain(intel_dig_port));
2656 }
2657
2658 static void edp_panel_vdd_work(struct work_struct *__work)
2659 {
2660 struct intel_dp *intel_dp =
2661 container_of(to_delayed_work(__work),
2662 struct intel_dp, panel_vdd_work);
2663 intel_wakeref_t wakeref;
2664
2665 with_pps_lock(intel_dp, wakeref) {
2666 if (!intel_dp->want_panel_vdd)
2667 edp_panel_vdd_off_sync(intel_dp);
2668 }
2669 }
2670
2671 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2672 {
2673 unsigned long delay;
2674
2675
2676
2677
2678
2679
2680 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2681 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2682 }
2683
2684
2685
2686
2687
2688
2689 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2690 {
2691 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2692
2693 lockdep_assert_held(&dev_priv->pps_mutex);
2694
2695 if (!intel_dp_is_edp(intel_dp))
2696 return;
2697
2698 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2699 port_name(dp_to_dig_port(intel_dp)->base.port));
2700
2701 intel_dp->want_panel_vdd = false;
2702
2703 if (sync)
2704 edp_panel_vdd_off_sync(intel_dp);
2705 else
2706 edp_panel_vdd_schedule_off(intel_dp);
2707 }
2708
2709 static void edp_panel_on(struct intel_dp *intel_dp)
2710 {
2711 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2712 u32 pp;
2713 i915_reg_t pp_ctrl_reg;
2714
2715 lockdep_assert_held(&dev_priv->pps_mutex);
2716
2717 if (!intel_dp_is_edp(intel_dp))
2718 return;
2719
2720 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2721 port_name(dp_to_dig_port(intel_dp)->base.port));
2722
2723 if (WARN(edp_have_panel_power(intel_dp),
2724 "eDP port %c panel power already on\n",
2725 port_name(dp_to_dig_port(intel_dp)->base.port)))
2726 return;
2727
2728 wait_panel_power_cycle(intel_dp);
2729
2730 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2731 pp = ironlake_get_pp_control(intel_dp);
2732 if (IS_GEN(dev_priv, 5)) {
2733
2734 pp &= ~PANEL_POWER_RESET;
2735 I915_WRITE(pp_ctrl_reg, pp);
2736 POSTING_READ(pp_ctrl_reg);
2737 }
2738
2739 pp |= PANEL_POWER_ON;
2740 if (!IS_GEN(dev_priv, 5))
2741 pp |= PANEL_POWER_RESET;
2742
2743 I915_WRITE(pp_ctrl_reg, pp);
2744 POSTING_READ(pp_ctrl_reg);
2745
2746 wait_panel_on(intel_dp);
2747 intel_dp->last_power_on = jiffies;
2748
2749 if (IS_GEN(dev_priv, 5)) {
2750 pp |= PANEL_POWER_RESET;
2751 I915_WRITE(pp_ctrl_reg, pp);
2752 POSTING_READ(pp_ctrl_reg);
2753 }
2754 }
2755
2756 void intel_edp_panel_on(struct intel_dp *intel_dp)
2757 {
2758 intel_wakeref_t wakeref;
2759
2760 if (!intel_dp_is_edp(intel_dp))
2761 return;
2762
2763 with_pps_lock(intel_dp, wakeref)
2764 edp_panel_on(intel_dp);
2765 }
2766
2767
2768 static void edp_panel_off(struct intel_dp *intel_dp)
2769 {
2770 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2771 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2772 u32 pp;
2773 i915_reg_t pp_ctrl_reg;
2774
2775 lockdep_assert_held(&dev_priv->pps_mutex);
2776
2777 if (!intel_dp_is_edp(intel_dp))
2778 return;
2779
2780 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2781 port_name(dig_port->base.port));
2782
2783 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2784 port_name(dig_port->base.port));
2785
2786 pp = ironlake_get_pp_control(intel_dp);
2787
2788
2789 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2790 EDP_BLC_ENABLE);
2791
2792 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2793
2794 intel_dp->want_panel_vdd = false;
2795
2796 I915_WRITE(pp_ctrl_reg, pp);
2797 POSTING_READ(pp_ctrl_reg);
2798
2799 wait_panel_off(intel_dp);
2800 intel_dp->panel_power_off_time = ktime_get_boottime();
2801
2802
2803 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2804 }
2805
2806 void intel_edp_panel_off(struct intel_dp *intel_dp)
2807 {
2808 intel_wakeref_t wakeref;
2809
2810 if (!intel_dp_is_edp(intel_dp))
2811 return;
2812
2813 with_pps_lock(intel_dp, wakeref)
2814 edp_panel_off(intel_dp);
2815 }
2816
2817
2818 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2819 {
2820 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2821 intel_wakeref_t wakeref;
2822
2823
2824
2825
2826
2827
2828
2829 wait_backlight_on(intel_dp);
2830
2831 with_pps_lock(intel_dp, wakeref) {
2832 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2833 u32 pp;
2834
2835 pp = ironlake_get_pp_control(intel_dp);
2836 pp |= EDP_BLC_ENABLE;
2837
2838 I915_WRITE(pp_ctrl_reg, pp);
2839 POSTING_READ(pp_ctrl_reg);
2840 }
2841 }
2842
2843
2844 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2845 const struct drm_connector_state *conn_state)
2846 {
2847 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2848
2849 if (!intel_dp_is_edp(intel_dp))
2850 return;
2851
2852 DRM_DEBUG_KMS("\n");
2853
2854 intel_panel_enable_backlight(crtc_state, conn_state);
2855 _intel_edp_backlight_on(intel_dp);
2856 }
2857
2858
2859 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2860 {
2861 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2862 intel_wakeref_t wakeref;
2863
2864 if (!intel_dp_is_edp(intel_dp))
2865 return;
2866
2867 with_pps_lock(intel_dp, wakeref) {
2868 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2869 u32 pp;
2870
2871 pp = ironlake_get_pp_control(intel_dp);
2872 pp &= ~EDP_BLC_ENABLE;
2873
2874 I915_WRITE(pp_ctrl_reg, pp);
2875 POSTING_READ(pp_ctrl_reg);
2876 }
2877
2878 intel_dp->last_backlight_off = jiffies;
2879 edp_wait_backlight_off(intel_dp);
2880 }
2881
2882
2883 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2884 {
2885 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2886
2887 if (!intel_dp_is_edp(intel_dp))
2888 return;
2889
2890 DRM_DEBUG_KMS("\n");
2891
2892 _intel_edp_backlight_off(intel_dp);
2893 intel_panel_disable_backlight(old_conn_state);
2894 }
2895
2896
2897
2898
2899
2900 static void intel_edp_backlight_power(struct intel_connector *connector,
2901 bool enable)
2902 {
2903 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2904 intel_wakeref_t wakeref;
2905 bool is_enabled;
2906
2907 is_enabled = false;
2908 with_pps_lock(intel_dp, wakeref)
2909 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2910 if (is_enabled == enable)
2911 return;
2912
2913 DRM_DEBUG_KMS("panel power control backlight %s\n",
2914 enable ? "enable" : "disable");
2915
2916 if (enable)
2917 _intel_edp_backlight_on(intel_dp);
2918 else
2919 _intel_edp_backlight_off(intel_dp);
2920 }
2921
2922 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2923 {
2924 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2925 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2926 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2927
2928 I915_STATE_WARN(cur_state != state,
2929 "DP port %c state assertion failure (expected %s, current %s)\n",
2930 port_name(dig_port->base.port),
2931 onoff(state), onoff(cur_state));
2932 }
2933 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2934
2935 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2936 {
2937 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2938
2939 I915_STATE_WARN(cur_state != state,
2940 "eDP PLL state assertion failure (expected %s, current %s)\n",
2941 onoff(state), onoff(cur_state));
2942 }
2943 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2944 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2945
2946 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2947 const struct intel_crtc_state *pipe_config)
2948 {
2949 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2950 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2951
2952 assert_pipe_disabled(dev_priv, crtc->pipe);
2953 assert_dp_port_disabled(intel_dp);
2954 assert_edp_pll_disabled(dev_priv);
2955
2956 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2957 pipe_config->port_clock);
2958
2959 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2960
2961 if (pipe_config->port_clock == 162000)
2962 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2963 else
2964 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2965
2966 I915_WRITE(DP_A, intel_dp->DP);
2967 POSTING_READ(DP_A);
2968 udelay(500);
2969
2970
2971
2972
2973
2974
2975
2976 if (IS_GEN(dev_priv, 5))
2977 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2978
2979 intel_dp->DP |= DP_PLL_ENABLE;
2980
2981 I915_WRITE(DP_A, intel_dp->DP);
2982 POSTING_READ(DP_A);
2983 udelay(200);
2984 }
2985
2986 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2987 const struct intel_crtc_state *old_crtc_state)
2988 {
2989 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2990 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2991
2992 assert_pipe_disabled(dev_priv, crtc->pipe);
2993 assert_dp_port_disabled(intel_dp);
2994 assert_edp_pll_enabled(dev_priv);
2995
2996 DRM_DEBUG_KMS("disabling eDP PLL\n");
2997
2998 intel_dp->DP &= ~DP_PLL_ENABLE;
2999
3000 I915_WRITE(DP_A, intel_dp->DP);
3001 POSTING_READ(DP_A);
3002 udelay(200);
3003 }
3004
3005 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3006 {
3007
3008
3009
3010
3011
3012
3013
3014
3015 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3016 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
3017 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3018 }
3019
3020 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3021 const struct intel_crtc_state *crtc_state,
3022 bool enable)
3023 {
3024 int ret;
3025
3026 if (!crtc_state->dsc_params.compression_enable)
3027 return;
3028
3029 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3030 enable ? DP_DECOMPRESSION_EN : 0);
3031 if (ret < 0)
3032 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
3033 enable ? "enable" : "disable");
3034 }
3035
3036
3037 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3038 {
3039 int ret, i;
3040
3041
3042 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3043 return;
3044
3045 if (mode != DRM_MODE_DPMS_ON) {
3046 if (downstream_hpd_needs_d0(intel_dp))
3047 return;
3048
3049 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3050 DP_SET_POWER_D3);
3051 } else {
3052 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3053
3054
3055
3056
3057
3058 for (i = 0; i < 3; i++) {
3059 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3060 DP_SET_POWER_D0);
3061 if (ret == 1)
3062 break;
3063 msleep(1);
3064 }
3065
3066 if (ret == 1 && lspcon->active)
3067 lspcon_wait_pcon_mode(lspcon);
3068 }
3069
3070 if (ret != 1)
3071 DRM_DEBUG_KMS("failed to %s sink power state\n",
3072 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3073 }
3074
3075 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3076 enum port port, enum pipe *pipe)
3077 {
3078 enum pipe p;
3079
3080 for_each_pipe(dev_priv, p) {
3081 u32 val = I915_READ(TRANS_DP_CTL(p));
3082
3083 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3084 *pipe = p;
3085 return true;
3086 }
3087 }
3088
3089 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3090
3091
3092 *pipe = PIPE_A;
3093
3094 return false;
3095 }
3096
3097 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3098 i915_reg_t dp_reg, enum port port,
3099 enum pipe *pipe)
3100 {
3101 bool ret;
3102 u32 val;
3103
3104 val = I915_READ(dp_reg);
3105
3106 ret = val & DP_PORT_EN;
3107
3108
3109 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3110 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3111 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3112 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3113 else if (IS_CHERRYVIEW(dev_priv))
3114 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3115 else
3116 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3117
3118 return ret;
3119 }
3120
3121 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3122 enum pipe *pipe)
3123 {
3124 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3125 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3126 intel_wakeref_t wakeref;
3127 bool ret;
3128
3129 wakeref = intel_display_power_get_if_enabled(dev_priv,
3130 encoder->power_domain);
3131 if (!wakeref)
3132 return false;
3133
3134 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3135 encoder->port, pipe);
3136
3137 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3138
3139 return ret;
3140 }
3141
3142 static void intel_dp_get_config(struct intel_encoder *encoder,
3143 struct intel_crtc_state *pipe_config)
3144 {
3145 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3146 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3147 u32 tmp, flags = 0;
3148 enum port port = encoder->port;
3149 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3150
3151 if (encoder->type == INTEL_OUTPUT_EDP)
3152 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3153 else
3154 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3155
3156 tmp = I915_READ(intel_dp->output_reg);
3157
3158 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3159
3160 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3161 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3162
3163 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3164 flags |= DRM_MODE_FLAG_PHSYNC;
3165 else
3166 flags |= DRM_MODE_FLAG_NHSYNC;
3167
3168 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3169 flags |= DRM_MODE_FLAG_PVSYNC;
3170 else
3171 flags |= DRM_MODE_FLAG_NVSYNC;
3172 } else {
3173 if (tmp & DP_SYNC_HS_HIGH)
3174 flags |= DRM_MODE_FLAG_PHSYNC;
3175 else
3176 flags |= DRM_MODE_FLAG_NHSYNC;
3177
3178 if (tmp & DP_SYNC_VS_HIGH)
3179 flags |= DRM_MODE_FLAG_PVSYNC;
3180 else
3181 flags |= DRM_MODE_FLAG_NVSYNC;
3182 }
3183
3184 pipe_config->base.adjusted_mode.flags |= flags;
3185
3186 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3187 pipe_config->limited_color_range = true;
3188
3189 pipe_config->lane_count =
3190 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3191
3192 intel_dp_get_m_n(crtc, pipe_config);
3193
3194 if (port == PORT_A) {
3195 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3196 pipe_config->port_clock = 162000;
3197 else
3198 pipe_config->port_clock = 270000;
3199 }
3200
3201 pipe_config->base.adjusted_mode.crtc_clock =
3202 intel_dotclock_calculate(pipe_config->port_clock,
3203 &pipe_config->dp_m_n);
3204
3205 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3206 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3221 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3222 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3223 }
3224 }
3225
3226 static void intel_disable_dp(struct intel_encoder *encoder,
3227 const struct intel_crtc_state *old_crtc_state,
3228 const struct drm_connector_state *old_conn_state)
3229 {
3230 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3231
3232 intel_dp->link_trained = false;
3233
3234 if (old_crtc_state->has_audio)
3235 intel_audio_codec_disable(encoder,
3236 old_crtc_state, old_conn_state);
3237
3238
3239
3240 intel_edp_panel_vdd_on(intel_dp);
3241 intel_edp_backlight_off(old_conn_state);
3242 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3243 intel_edp_panel_off(intel_dp);
3244 }
3245
3246 static void g4x_disable_dp(struct intel_encoder *encoder,
3247 const struct intel_crtc_state *old_crtc_state,
3248 const struct drm_connector_state *old_conn_state)
3249 {
3250 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3251 }
3252
3253 static void vlv_disable_dp(struct intel_encoder *encoder,
3254 const struct intel_crtc_state *old_crtc_state,
3255 const struct drm_connector_state *old_conn_state)
3256 {
3257 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3258 }
3259
3260 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3261 const struct intel_crtc_state *old_crtc_state,
3262 const struct drm_connector_state *old_conn_state)
3263 {
3264 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3265 enum port port = encoder->port;
3266
3267
3268
3269
3270
3271
3272
3273 intel_dp_link_down(encoder, old_crtc_state);
3274
3275
3276 if (port == PORT_A)
3277 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3278 }
3279
3280 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3281 const struct intel_crtc_state *old_crtc_state,
3282 const struct drm_connector_state *old_conn_state)
3283 {
3284 intel_dp_link_down(encoder, old_crtc_state);
3285 }
3286
3287 static void chv_post_disable_dp(struct intel_encoder *encoder,
3288 const struct intel_crtc_state *old_crtc_state,
3289 const struct drm_connector_state *old_conn_state)
3290 {
3291 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3292
3293 intel_dp_link_down(encoder, old_crtc_state);
3294
3295 vlv_dpio_get(dev_priv);
3296
3297
3298 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3299
3300 vlv_dpio_put(dev_priv);
3301 }
3302
3303 static void
3304 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3305 u32 *DP,
3306 u8 dp_train_pat)
3307 {
3308 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3310 enum port port = intel_dig_port->base.port;
3311 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3312
3313 if (dp_train_pat & train_pat_mask)
3314 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3315 dp_train_pat & train_pat_mask);
3316
3317 if (HAS_DDI(dev_priv)) {
3318 u32 temp = I915_READ(DP_TP_CTL(port));
3319
3320 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3321 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3322 else
3323 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3324
3325 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3326 switch (dp_train_pat & train_pat_mask) {
3327 case DP_TRAINING_PATTERN_DISABLE:
3328 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3329
3330 break;
3331 case DP_TRAINING_PATTERN_1:
3332 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3333 break;
3334 case DP_TRAINING_PATTERN_2:
3335 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3336 break;
3337 case DP_TRAINING_PATTERN_3:
3338 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3339 break;
3340 case DP_TRAINING_PATTERN_4:
3341 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3342 break;
3343 }
3344 I915_WRITE(DP_TP_CTL(port), temp);
3345
3346 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3347 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3348 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3349
3350 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3351 case DP_TRAINING_PATTERN_DISABLE:
3352 *DP |= DP_LINK_TRAIN_OFF_CPT;
3353 break;
3354 case DP_TRAINING_PATTERN_1:
3355 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3356 break;
3357 case DP_TRAINING_PATTERN_2:
3358 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3359 break;
3360 case DP_TRAINING_PATTERN_3:
3361 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3362 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3363 break;
3364 }
3365
3366 } else {
3367 *DP &= ~DP_LINK_TRAIN_MASK;
3368
3369 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3370 case DP_TRAINING_PATTERN_DISABLE:
3371 *DP |= DP_LINK_TRAIN_OFF;
3372 break;
3373 case DP_TRAINING_PATTERN_1:
3374 *DP |= DP_LINK_TRAIN_PAT_1;
3375 break;
3376 case DP_TRAINING_PATTERN_2:
3377 *DP |= DP_LINK_TRAIN_PAT_2;
3378 break;
3379 case DP_TRAINING_PATTERN_3:
3380 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3381 *DP |= DP_LINK_TRAIN_PAT_2;
3382 break;
3383 }
3384 }
3385 }
3386
3387 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3388 const struct intel_crtc_state *old_crtc_state)
3389 {
3390 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3391
3392
3393
3394 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3395
3396
3397
3398
3399
3400
3401
3402 intel_dp->DP |= DP_PORT_EN;
3403 if (old_crtc_state->has_audio)
3404 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3405
3406 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3407 POSTING_READ(intel_dp->output_reg);
3408 }
3409
3410 static void intel_enable_dp(struct intel_encoder *encoder,
3411 const struct intel_crtc_state *pipe_config,
3412 const struct drm_connector_state *conn_state)
3413 {
3414 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3415 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3416 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3417 u32 dp_reg = I915_READ(intel_dp->output_reg);
3418 enum pipe pipe = crtc->pipe;
3419 intel_wakeref_t wakeref;
3420
3421 if (WARN_ON(dp_reg & DP_PORT_EN))
3422 return;
3423
3424 with_pps_lock(intel_dp, wakeref) {
3425 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3426 vlv_init_panel_power_sequencer(encoder, pipe_config);
3427
3428 intel_dp_enable_port(intel_dp, pipe_config);
3429
3430 edp_panel_vdd_on(intel_dp);
3431 edp_panel_on(intel_dp);
3432 edp_panel_vdd_off(intel_dp, true);
3433 }
3434
3435 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3436 unsigned int lane_mask = 0x0;
3437
3438 if (IS_CHERRYVIEW(dev_priv))
3439 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3440
3441 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3442 lane_mask);
3443 }
3444
3445 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3446 intel_dp_start_link_train(intel_dp);
3447 intel_dp_stop_link_train(intel_dp);
3448
3449 if (pipe_config->has_audio) {
3450 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3451 pipe_name(pipe));
3452 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3453 }
3454 }
3455
3456 static void g4x_enable_dp(struct intel_encoder *encoder,
3457 const struct intel_crtc_state *pipe_config,
3458 const struct drm_connector_state *conn_state)
3459 {
3460 intel_enable_dp(encoder, pipe_config, conn_state);
3461 intel_edp_backlight_on(pipe_config, conn_state);
3462 }
3463
3464 static void vlv_enable_dp(struct intel_encoder *encoder,
3465 const struct intel_crtc_state *pipe_config,
3466 const struct drm_connector_state *conn_state)
3467 {
3468 intel_edp_backlight_on(pipe_config, conn_state);
3469 }
3470
3471 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3472 const struct intel_crtc_state *pipe_config,
3473 const struct drm_connector_state *conn_state)
3474 {
3475 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3476 enum port port = encoder->port;
3477
3478 intel_dp_prepare(encoder, pipe_config);
3479
3480
3481 if (port == PORT_A)
3482 ironlake_edp_pll_on(intel_dp, pipe_config);
3483 }
3484
3485 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3486 {
3487 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3488 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3489 enum pipe pipe = intel_dp->pps_pipe;
3490 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3491
3492 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3493
3494 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3495 return;
3496
3497 edp_panel_vdd_off_sync(intel_dp);
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3509 pipe_name(pipe), port_name(intel_dig_port->base.port));
3510 I915_WRITE(pp_on_reg, 0);
3511 POSTING_READ(pp_on_reg);
3512
3513 intel_dp->pps_pipe = INVALID_PIPE;
3514 }
3515
3516 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3517 enum pipe pipe)
3518 {
3519 struct intel_encoder *encoder;
3520
3521 lockdep_assert_held(&dev_priv->pps_mutex);
3522
3523 for_each_intel_dp(&dev_priv->drm, encoder) {
3524 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3525 enum port port = encoder->port;
3526
3527 WARN(intel_dp->active_pipe == pipe,
3528 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3529 pipe_name(pipe), port_name(port));
3530
3531 if (intel_dp->pps_pipe != pipe)
3532 continue;
3533
3534 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3535 pipe_name(pipe), port_name(port));
3536
3537
3538 vlv_detach_power_sequencer(intel_dp);
3539 }
3540 }
3541
3542 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3543 const struct intel_crtc_state *crtc_state)
3544 {
3545 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3546 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3547 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3548
3549 lockdep_assert_held(&dev_priv->pps_mutex);
3550
3551 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3552
3553 if (intel_dp->pps_pipe != INVALID_PIPE &&
3554 intel_dp->pps_pipe != crtc->pipe) {
3555
3556
3557
3558
3559
3560 vlv_detach_power_sequencer(intel_dp);
3561 }
3562
3563
3564
3565
3566
3567 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3568
3569 intel_dp->active_pipe = crtc->pipe;
3570
3571 if (!intel_dp_is_edp(intel_dp))
3572 return;
3573
3574
3575 intel_dp->pps_pipe = crtc->pipe;
3576
3577 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3578 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3579
3580
3581 intel_dp_init_panel_power_sequencer(intel_dp);
3582 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3583 }
3584
3585 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3586 const struct intel_crtc_state *pipe_config,
3587 const struct drm_connector_state *conn_state)
3588 {
3589 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3590
3591 intel_enable_dp(encoder, pipe_config, conn_state);
3592 }
3593
3594 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3595 const struct intel_crtc_state *pipe_config,
3596 const struct drm_connector_state *conn_state)
3597 {
3598 intel_dp_prepare(encoder, pipe_config);
3599
3600 vlv_phy_pre_pll_enable(encoder, pipe_config);
3601 }
3602
3603 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3604 const struct intel_crtc_state *pipe_config,
3605 const struct drm_connector_state *conn_state)
3606 {
3607 chv_phy_pre_encoder_enable(encoder, pipe_config);
3608
3609 intel_enable_dp(encoder, pipe_config, conn_state);
3610
3611
3612 chv_phy_release_cl2_override(encoder);
3613 }
3614
3615 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3616 const struct intel_crtc_state *pipe_config,
3617 const struct drm_connector_state *conn_state)
3618 {
3619 intel_dp_prepare(encoder, pipe_config);
3620
3621 chv_phy_pre_pll_enable(encoder, pipe_config);
3622 }
3623
3624 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3625 const struct intel_crtc_state *old_crtc_state,
3626 const struct drm_connector_state *old_conn_state)
3627 {
3628 chv_phy_post_pll_disable(encoder, old_crtc_state);
3629 }
3630
3631
3632
3633
3634
3635 bool
3636 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3637 {
3638 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3639 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3640 }
3641
3642
3643 u8
3644 intel_dp_voltage_max(struct intel_dp *intel_dp)
3645 {
3646 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3647 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3648 enum port port = encoder->port;
3649
3650 if (HAS_DDI(dev_priv))
3651 return intel_ddi_dp_voltage_max(encoder);
3652 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3653 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3654 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3655 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3656 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3657 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3658 else
3659 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3660 }
3661
3662 u8
3663 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3664 {
3665 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3666 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3667 enum port port = encoder->port;
3668
3669 if (HAS_DDI(dev_priv)) {
3670 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3671 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3672 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3673 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3674 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3675 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3676 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3677 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3678 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3680 default:
3681 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3682 }
3683 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3684 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3685 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3686 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3687 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3688 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3689 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3690 default:
3691 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3692 }
3693 } else {
3694 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3695 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3696 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3697 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3698 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3699 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3700 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3701 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3702 default:
3703 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3704 }
3705 }
3706 }
3707
3708 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3709 {
3710 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3711 unsigned long demph_reg_value, preemph_reg_value,
3712 uniqtranscale_reg_value;
3713 u8 train_set = intel_dp->train_set[0];
3714
3715 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3716 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3717 preemph_reg_value = 0x0004000;
3718 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3719 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3720 demph_reg_value = 0x2B405555;
3721 uniqtranscale_reg_value = 0x552AB83A;
3722 break;
3723 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3724 demph_reg_value = 0x2B404040;
3725 uniqtranscale_reg_value = 0x5548B83A;
3726 break;
3727 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3728 demph_reg_value = 0x2B245555;
3729 uniqtranscale_reg_value = 0x5560B83A;
3730 break;
3731 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3732 demph_reg_value = 0x2B405555;
3733 uniqtranscale_reg_value = 0x5598DA3A;
3734 break;
3735 default:
3736 return 0;
3737 }
3738 break;
3739 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3740 preemph_reg_value = 0x0002000;
3741 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3742 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3743 demph_reg_value = 0x2B404040;
3744 uniqtranscale_reg_value = 0x5552B83A;
3745 break;
3746 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3747 demph_reg_value = 0x2B404848;
3748 uniqtranscale_reg_value = 0x5580B83A;
3749 break;
3750 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3751 demph_reg_value = 0x2B404040;
3752 uniqtranscale_reg_value = 0x55ADDA3A;
3753 break;
3754 default:
3755 return 0;
3756 }
3757 break;
3758 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3759 preemph_reg_value = 0x0000000;
3760 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3761 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3762 demph_reg_value = 0x2B305555;
3763 uniqtranscale_reg_value = 0x5570B83A;
3764 break;
3765 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3766 demph_reg_value = 0x2B2B4040;
3767 uniqtranscale_reg_value = 0x55ADDA3A;
3768 break;
3769 default:
3770 return 0;
3771 }
3772 break;
3773 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3774 preemph_reg_value = 0x0006000;
3775 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3776 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3777 demph_reg_value = 0x1B405555;
3778 uniqtranscale_reg_value = 0x55ADDA3A;
3779 break;
3780 default:
3781 return 0;
3782 }
3783 break;
3784 default:
3785 return 0;
3786 }
3787
3788 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3789 uniqtranscale_reg_value, 0);
3790
3791 return 0;
3792 }
3793
3794 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3795 {
3796 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3797 u32 deemph_reg_value, margin_reg_value;
3798 bool uniq_trans_scale = false;
3799 u8 train_set = intel_dp->train_set[0];
3800
3801 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3802 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3803 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3804 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3805 deemph_reg_value = 128;
3806 margin_reg_value = 52;
3807 break;
3808 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3809 deemph_reg_value = 128;
3810 margin_reg_value = 77;
3811 break;
3812 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3813 deemph_reg_value = 128;
3814 margin_reg_value = 102;
3815 break;
3816 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3817 deemph_reg_value = 128;
3818 margin_reg_value = 154;
3819 uniq_trans_scale = true;
3820 break;
3821 default:
3822 return 0;
3823 }
3824 break;
3825 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3826 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3827 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3828 deemph_reg_value = 85;
3829 margin_reg_value = 78;
3830 break;
3831 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3832 deemph_reg_value = 85;
3833 margin_reg_value = 116;
3834 break;
3835 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3836 deemph_reg_value = 85;
3837 margin_reg_value = 154;
3838 break;
3839 default:
3840 return 0;
3841 }
3842 break;
3843 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3844 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3845 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3846 deemph_reg_value = 64;
3847 margin_reg_value = 104;
3848 break;
3849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3850 deemph_reg_value = 64;
3851 margin_reg_value = 154;
3852 break;
3853 default:
3854 return 0;
3855 }
3856 break;
3857 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3858 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3860 deemph_reg_value = 43;
3861 margin_reg_value = 154;
3862 break;
3863 default:
3864 return 0;
3865 }
3866 break;
3867 default:
3868 return 0;
3869 }
3870
3871 chv_set_phy_signal_level(encoder, deemph_reg_value,
3872 margin_reg_value, uniq_trans_scale);
3873
3874 return 0;
3875 }
3876
3877 static u32
3878 g4x_signal_levels(u8 train_set)
3879 {
3880 u32 signal_levels = 0;
3881
3882 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3884 default:
3885 signal_levels |= DP_VOLTAGE_0_4;
3886 break;
3887 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3888 signal_levels |= DP_VOLTAGE_0_6;
3889 break;
3890 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3891 signal_levels |= DP_VOLTAGE_0_8;
3892 break;
3893 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3894 signal_levels |= DP_VOLTAGE_1_2;
3895 break;
3896 }
3897 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3898 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3899 default:
3900 signal_levels |= DP_PRE_EMPHASIS_0;
3901 break;
3902 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3903 signal_levels |= DP_PRE_EMPHASIS_3_5;
3904 break;
3905 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3906 signal_levels |= DP_PRE_EMPHASIS_6;
3907 break;
3908 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3909 signal_levels |= DP_PRE_EMPHASIS_9_5;
3910 break;
3911 }
3912 return signal_levels;
3913 }
3914
3915
3916 static u32
3917 snb_cpu_edp_signal_levels(u8 train_set)
3918 {
3919 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3920 DP_TRAIN_PRE_EMPHASIS_MASK);
3921 switch (signal_levels) {
3922 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3924 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3926 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3928 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3929 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3930 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3932 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3934 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3935 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3936 default:
3937 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3938 "0x%x\n", signal_levels);
3939 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3940 }
3941 }
3942
3943
3944 static u32
3945 ivb_cpu_edp_signal_levels(u8 train_set)
3946 {
3947 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3948 DP_TRAIN_PRE_EMPHASIS_MASK);
3949 switch (signal_levels) {
3950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3951 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3953 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3954 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3955 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3956
3957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3958 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3959 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3960 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3961
3962 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3963 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3964 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3965 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3966
3967 default:
3968 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3969 "0x%x\n", signal_levels);
3970 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3971 }
3972 }
3973
3974 void
3975 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3976 {
3977 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3978 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3979 enum port port = intel_dig_port->base.port;
3980 u32 signal_levels, mask = 0;
3981 u8 train_set = intel_dp->train_set[0];
3982
3983 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3984 signal_levels = bxt_signal_levels(intel_dp);
3985 } else if (HAS_DDI(dev_priv)) {
3986 signal_levels = ddi_signal_levels(intel_dp);
3987 mask = DDI_BUF_EMP_MASK;
3988 } else if (IS_CHERRYVIEW(dev_priv)) {
3989 signal_levels = chv_signal_levels(intel_dp);
3990 } else if (IS_VALLEYVIEW(dev_priv)) {
3991 signal_levels = vlv_signal_levels(intel_dp);
3992 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3993 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3994 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3995 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3996 signal_levels = snb_cpu_edp_signal_levels(train_set);
3997 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3998 } else {
3999 signal_levels = g4x_signal_levels(train_set);
4000 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
4001 }
4002
4003 if (mask)
4004 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
4005
4006 DRM_DEBUG_KMS("Using vswing level %d\n",
4007 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
4008 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
4009 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4010 DP_TRAIN_PRE_EMPHASIS_SHIFT);
4011
4012 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
4013
4014 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4015 POSTING_READ(intel_dp->output_reg);
4016 }
4017
4018 void
4019 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4020 u8 dp_train_pat)
4021 {
4022 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4023 struct drm_i915_private *dev_priv =
4024 to_i915(intel_dig_port->base.base.dev);
4025
4026 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
4027
4028 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4029 POSTING_READ(intel_dp->output_reg);
4030 }
4031
4032 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4033 {
4034 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4035 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4036 enum port port = intel_dig_port->base.port;
4037 u32 val;
4038
4039 if (!HAS_DDI(dev_priv))
4040 return;
4041
4042 val = I915_READ(DP_TP_CTL(port));
4043 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
4044 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
4045 I915_WRITE(DP_TP_CTL(port), val);
4046
4047
4048
4049
4050
4051
4052
4053
4054 if (port == PORT_A)
4055 return;
4056
4057 if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
4058 DP_TP_STATUS_IDLE_DONE, 1))
4059 DRM_ERROR("Timed out waiting for DP idle patterns\n");
4060 }
4061
4062 static void
4063 intel_dp_link_down(struct intel_encoder *encoder,
4064 const struct intel_crtc_state *old_crtc_state)
4065 {
4066 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4067 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4068 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4069 enum port port = encoder->port;
4070 u32 DP = intel_dp->DP;
4071
4072 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4073 return;
4074
4075 DRM_DEBUG_KMS("\n");
4076
4077 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4078 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4079 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4080 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4081 } else {
4082 DP &= ~DP_LINK_TRAIN_MASK;
4083 DP |= DP_LINK_TRAIN_PAT_IDLE;
4084 }
4085 I915_WRITE(intel_dp->output_reg, DP);
4086 POSTING_READ(intel_dp->output_reg);
4087
4088 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4089 I915_WRITE(intel_dp->output_reg, DP);
4090 POSTING_READ(intel_dp->output_reg);
4091
4092
4093
4094
4095
4096
4097 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4098
4099
4100
4101
4102 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4103 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4104
4105
4106 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4107 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4108 DP_LINK_TRAIN_PAT_1;
4109 I915_WRITE(intel_dp->output_reg, DP);
4110 POSTING_READ(intel_dp->output_reg);
4111
4112 DP &= ~DP_PORT_EN;
4113 I915_WRITE(intel_dp->output_reg, DP);
4114 POSTING_READ(intel_dp->output_reg);
4115
4116 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4117 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4118 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4119 }
4120
4121 msleep(intel_dp->panel_power_down_delay);
4122
4123 intel_dp->DP = DP;
4124
4125 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4126 intel_wakeref_t wakeref;
4127
4128 with_pps_lock(intel_dp, wakeref)
4129 intel_dp->active_pipe = INVALID_PIPE;
4130 }
4131 }
4132
4133 static void
4134 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4135 {
4136 u8 dpcd_ext[6];
4137
4138
4139
4140
4141
4142
4143
4144
4145 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4146 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4147 return;
4148
4149 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4150 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4151 DRM_ERROR("DPCD failed read at extended capabilities\n");
4152 return;
4153 }
4154
4155 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4156 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4157 return;
4158 }
4159
4160 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4161 return;
4162
4163 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4164 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4165
4166 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4167 }
4168
4169 bool
4170 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4171 {
4172 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4173 sizeof(intel_dp->dpcd)) < 0)
4174 return false;
4175
4176 intel_dp_extended_receiver_capabilities(intel_dp);
4177
4178 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4179
4180 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4181 }
4182
4183 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4184 {
4185 u8 dprx = 0;
4186
4187 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4188 &dprx) != 1)
4189 return false;
4190 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4191 }
4192
4193 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4194 {
4195
4196
4197
4198
4199 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4200
4201
4202 intel_dp->fec_capable = 0;
4203
4204
4205 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4206 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4207 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4208 intel_dp->dsc_dpcd,
4209 sizeof(intel_dp->dsc_dpcd)) < 0)
4210 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4211 DP_DSC_SUPPORT);
4212
4213 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4214 (int)sizeof(intel_dp->dsc_dpcd),
4215 intel_dp->dsc_dpcd);
4216
4217
4218 if (!intel_dp_is_edp(intel_dp) &&
4219 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4220 &intel_dp->fec_capable) < 0)
4221 DRM_ERROR("Failed to read FEC DPCD register\n");
4222
4223 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4224 }
4225 }
4226
4227 static bool
4228 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4229 {
4230 struct drm_i915_private *dev_priv =
4231 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4232
4233
4234 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4235
4236 if (!intel_dp_read_dpcd(intel_dp))
4237 return false;
4238
4239 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4240 drm_dp_is_branch(intel_dp->dpcd));
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4252 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4253 sizeof(intel_dp->edp_dpcd))
4254 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4255 intel_dp->edp_dpcd);
4256
4257
4258
4259
4260
4261 intel_psr_init_dpcd(intel_dp);
4262
4263
4264 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4265 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4266 int i;
4267
4268 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4269 sink_rates, sizeof(sink_rates));
4270
4271 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4272 int val = le16_to_cpu(sink_rates[i]);
4273
4274 if (val == 0)
4275 break;
4276
4277
4278
4279
4280
4281
4282
4283 intel_dp->sink_rates[i] = (val * 200) / 10;
4284 }
4285 intel_dp->num_sink_rates = i;
4286 }
4287
4288
4289
4290
4291
4292 if (intel_dp->num_sink_rates)
4293 intel_dp->use_rate_select = true;
4294 else
4295 intel_dp_set_sink_rates(intel_dp);
4296
4297 intel_dp_set_common_rates(intel_dp);
4298
4299
4300 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4301 intel_dp_get_dsc_sink_cap(intel_dp);
4302
4303 return true;
4304 }
4305
4306
4307 static bool
4308 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4309 {
4310 if (!intel_dp_read_dpcd(intel_dp))
4311 return false;
4312
4313
4314
4315
4316
4317 if (!intel_dp_is_edp(intel_dp)) {
4318 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4319 drm_dp_is_branch(intel_dp->dpcd));
4320
4321 intel_dp_set_sink_rates(intel_dp);
4322 intel_dp_set_common_rates(intel_dp);
4323 }
4324
4325
4326
4327
4328
4329 if (!intel_dp_is_edp(intel_dp) &&
4330 !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
4331 u8 count;
4332 ssize_t r;
4333
4334 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4335 if (r < 1)
4336 return false;
4337
4338
4339
4340
4341
4342
4343 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4344
4345
4346
4347
4348
4349
4350
4351
4352 if (!intel_dp->sink_count)
4353 return false;
4354 }
4355
4356 if (!drm_dp_is_branch(intel_dp->dpcd))
4357 return true;
4358
4359 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4360 return true;
4361
4362 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4363 intel_dp->downstream_ports,
4364 DP_MAX_DOWNSTREAM_PORTS) < 0)
4365 return false;
4366
4367 return true;
4368 }
4369
4370 static bool
4371 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4372 {
4373 u8 mstm_cap;
4374
4375 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4376 return false;
4377
4378 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4379 return false;
4380
4381 return mstm_cap & DP_MST_CAP;
4382 }
4383
4384 static bool
4385 intel_dp_can_mst(struct intel_dp *intel_dp)
4386 {
4387 return i915_modparams.enable_dp_mst &&
4388 intel_dp->can_mst &&
4389 intel_dp_sink_can_mst(intel_dp);
4390 }
4391
4392 static void
4393 intel_dp_configure_mst(struct intel_dp *intel_dp)
4394 {
4395 struct intel_encoder *encoder =
4396 &dp_to_dig_port(intel_dp)->base;
4397 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4398
4399 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4400 port_name(encoder->port), yesno(intel_dp->can_mst),
4401 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4402
4403 if (!intel_dp->can_mst)
4404 return;
4405
4406 intel_dp->is_mst = sink_can_mst &&
4407 i915_modparams.enable_dp_mst;
4408
4409 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4410 intel_dp->is_mst);
4411 }
4412
4413 static bool
4414 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4415 {
4416 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4417 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4418 DP_DPRX_ESI_LEN;
4419 }
4420
4421 static void
4422 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4423 const struct intel_crtc_state *crtc_state)
4424 {
4425 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4426 struct dp_sdp vsc_sdp = {};
4427
4428
4429 vsc_sdp.sdp_header.HB0 = 0;
4430 vsc_sdp.sdp_header.HB1 = 0x7;
4431
4432
4433
4434
4435
4436 vsc_sdp.sdp_header.HB2 = 0x5;
4437
4438
4439
4440
4441
4442 vsc_sdp.sdp_header.HB3 = 0x13;
4443
4444
4445
4446
4447
4448 vsc_sdp.db[16] = 0x3 << 4;
4449
4450 vsc_sdp.db[16] |= 0x1;
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460 switch (crtc_state->pipe_bpp) {
4461 case 24:
4462 vsc_sdp.db[17] = 0x1;
4463 break;
4464 case 30:
4465 vsc_sdp.db[17] = 0x2;
4466 break;
4467 case 36:
4468 vsc_sdp.db[17] = 0x3;
4469 break;
4470 case 48:
4471 vsc_sdp.db[17] = 0x4;
4472 break;
4473 default:
4474 MISSING_CASE(crtc_state->pipe_bpp);
4475 break;
4476 }
4477
4478
4479
4480
4481
4482
4483 vsc_sdp.db[17] |= 0x80;
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496 vsc_sdp.db[18] = 0;
4497
4498 intel_dig_port->write_infoframe(&intel_dig_port->base,
4499 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4500 }
4501
4502 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4503 const struct intel_crtc_state *crtc_state)
4504 {
4505 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4506 return;
4507
4508 intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4509 }
4510
4511 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4512 {
4513 int status = 0;
4514 int test_link_rate;
4515 u8 test_lane_count, test_link_bw;
4516
4517
4518
4519
4520 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4521 &test_lane_count);
4522
4523 if (status <= 0) {
4524 DRM_DEBUG_KMS("Lane count read failed\n");
4525 return DP_TEST_NAK;
4526 }
4527 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4528
4529 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4530 &test_link_bw);
4531 if (status <= 0) {
4532 DRM_DEBUG_KMS("Link Rate read failed\n");
4533 return DP_TEST_NAK;
4534 }
4535 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4536
4537
4538 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4539 test_lane_count))
4540 return DP_TEST_NAK;
4541
4542 intel_dp->compliance.test_lane_count = test_lane_count;
4543 intel_dp->compliance.test_link_rate = test_link_rate;
4544
4545 return DP_TEST_ACK;
4546 }
4547
4548 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4549 {
4550 u8 test_pattern;
4551 u8 test_misc;
4552 __be16 h_width, v_height;
4553 int status = 0;
4554
4555
4556 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4557 &test_pattern);
4558 if (status <= 0) {
4559 DRM_DEBUG_KMS("Test pattern read failed\n");
4560 return DP_TEST_NAK;
4561 }
4562 if (test_pattern != DP_COLOR_RAMP)
4563 return DP_TEST_NAK;
4564
4565 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4566 &h_width, 2);
4567 if (status <= 0) {
4568 DRM_DEBUG_KMS("H Width read failed\n");
4569 return DP_TEST_NAK;
4570 }
4571
4572 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4573 &v_height, 2);
4574 if (status <= 0) {
4575 DRM_DEBUG_KMS("V Height read failed\n");
4576 return DP_TEST_NAK;
4577 }
4578
4579 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4580 &test_misc);
4581 if (status <= 0) {
4582 DRM_DEBUG_KMS("TEST MISC read failed\n");
4583 return DP_TEST_NAK;
4584 }
4585 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4586 return DP_TEST_NAK;
4587 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4588 return DP_TEST_NAK;
4589 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4590 case DP_TEST_BIT_DEPTH_6:
4591 intel_dp->compliance.test_data.bpc = 6;
4592 break;
4593 case DP_TEST_BIT_DEPTH_8:
4594 intel_dp->compliance.test_data.bpc = 8;
4595 break;
4596 default:
4597 return DP_TEST_NAK;
4598 }
4599
4600 intel_dp->compliance.test_data.video_pattern = test_pattern;
4601 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4602 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4603
4604 intel_dp->compliance.test_active = 1;
4605
4606 return DP_TEST_ACK;
4607 }
4608
4609 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4610 {
4611 u8 test_result = DP_TEST_ACK;
4612 struct intel_connector *intel_connector = intel_dp->attached_connector;
4613 struct drm_connector *connector = &intel_connector->base;
4614
4615 if (intel_connector->detect_edid == NULL ||
4616 connector->edid_corrupt ||
4617 intel_dp->aux.i2c_defer_count > 6) {
4618
4619
4620
4621
4622
4623
4624
4625 if (intel_dp->aux.i2c_nack_count > 0 ||
4626 intel_dp->aux.i2c_defer_count > 0)
4627 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4628 intel_dp->aux.i2c_nack_count,
4629 intel_dp->aux.i2c_defer_count);
4630 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4631 } else {
4632 struct edid *block = intel_connector->detect_edid;
4633
4634
4635
4636
4637 block += intel_connector->detect_edid->extensions;
4638
4639 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4640 block->checksum) <= 0)
4641 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4642
4643 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4644 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4645 }
4646
4647
4648 intel_dp->compliance.test_active = 1;
4649
4650 return test_result;
4651 }
4652
4653 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4654 {
4655 u8 test_result = DP_TEST_NAK;
4656 return test_result;
4657 }
4658
4659 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4660 {
4661 u8 response = DP_TEST_NAK;
4662 u8 request = 0;
4663 int status;
4664
4665 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4666 if (status <= 0) {
4667 DRM_DEBUG_KMS("Could not read test request from sink\n");
4668 goto update_status;
4669 }
4670
4671 switch (request) {
4672 case DP_TEST_LINK_TRAINING:
4673 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4674 response = intel_dp_autotest_link_training(intel_dp);
4675 break;
4676 case DP_TEST_LINK_VIDEO_PATTERN:
4677 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4678 response = intel_dp_autotest_video_pattern(intel_dp);
4679 break;
4680 case DP_TEST_LINK_EDID_READ:
4681 DRM_DEBUG_KMS("EDID test requested\n");
4682 response = intel_dp_autotest_edid(intel_dp);
4683 break;
4684 case DP_TEST_LINK_PHY_TEST_PATTERN:
4685 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4686 response = intel_dp_autotest_phy_pattern(intel_dp);
4687 break;
4688 default:
4689 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4690 break;
4691 }
4692
4693 if (response & DP_TEST_ACK)
4694 intel_dp->compliance.test_type = request;
4695
4696 update_status:
4697 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4698 if (status <= 0)
4699 DRM_DEBUG_KMS("Could not write test response to sink\n");
4700 }
4701
4702 static int
4703 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4704 {
4705 bool bret;
4706
4707 if (intel_dp->is_mst) {
4708 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4709 int ret = 0;
4710 int retry;
4711 bool handled;
4712
4713 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4714 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4715 go_again:
4716 if (bret == true) {
4717
4718
4719 if (intel_dp->active_mst_links > 0 &&
4720 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4721 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4722 intel_dp_start_link_train(intel_dp);
4723 intel_dp_stop_link_train(intel_dp);
4724 }
4725
4726 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4727 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4728
4729 if (handled) {
4730 for (retry = 0; retry < 3; retry++) {
4731 int wret;
4732 wret = drm_dp_dpcd_write(&intel_dp->aux,
4733 DP_SINK_COUNT_ESI+1,
4734 &esi[1], 3);
4735 if (wret == 3) {
4736 break;
4737 }
4738 }
4739
4740 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4741 if (bret == true) {
4742 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4743 goto go_again;
4744 }
4745 } else
4746 ret = 0;
4747
4748 return ret;
4749 } else {
4750 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4751 intel_dp->is_mst = false;
4752 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4753 intel_dp->is_mst);
4754 }
4755 }
4756 return -EINVAL;
4757 }
4758
4759 static bool
4760 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4761 {
4762 u8 link_status[DP_LINK_STATUS_SIZE];
4763
4764 if (!intel_dp->link_trained)
4765 return false;
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775 if (intel_psr_enabled(intel_dp))
4776 return false;
4777
4778 if (!intel_dp_get_link_status(intel_dp, link_status))
4779 return false;
4780
4781
4782
4783
4784
4785 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4786 intel_dp->lane_count))
4787 return false;
4788
4789
4790 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4791 }
4792
4793 int intel_dp_retrain_link(struct intel_encoder *encoder,
4794 struct drm_modeset_acquire_ctx *ctx)
4795 {
4796 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4797 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4798 struct intel_connector *connector = intel_dp->attached_connector;
4799 struct drm_connector_state *conn_state;
4800 struct intel_crtc_state *crtc_state;
4801 struct intel_crtc *crtc;
4802 int ret;
4803
4804
4805
4806 if (!connector || connector->base.status != connector_status_connected)
4807 return 0;
4808
4809 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4810 ctx);
4811 if (ret)
4812 return ret;
4813
4814 conn_state = connector->base.state;
4815
4816 crtc = to_intel_crtc(conn_state->crtc);
4817 if (!crtc)
4818 return 0;
4819
4820 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4821 if (ret)
4822 return ret;
4823
4824 crtc_state = to_intel_crtc_state(crtc->base.state);
4825
4826 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4827
4828 if (!crtc_state->base.active)
4829 return 0;
4830
4831 if (conn_state->commit &&
4832 !try_wait_for_completion(&conn_state->commit->hw_done))
4833 return 0;
4834
4835 if (!intel_dp_needs_link_retrain(intel_dp))
4836 return 0;
4837
4838
4839 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4840 if (crtc_state->has_pch_encoder)
4841 intel_set_pch_fifo_underrun_reporting(dev_priv,
4842 intel_crtc_pch_transcoder(crtc), false);
4843
4844 intel_dp_start_link_train(intel_dp);
4845 intel_dp_stop_link_train(intel_dp);
4846
4847
4848 intel_wait_for_vblank(dev_priv, crtc->pipe);
4849
4850 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4851 if (crtc_state->has_pch_encoder)
4852 intel_set_pch_fifo_underrun_reporting(dev_priv,
4853 intel_crtc_pch_transcoder(crtc), true);
4854
4855 return 0;
4856 }
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870 static enum intel_hotplug_state
4871 intel_dp_hotplug(struct intel_encoder *encoder,
4872 struct intel_connector *connector,
4873 bool irq_received)
4874 {
4875 struct drm_modeset_acquire_ctx ctx;
4876 enum intel_hotplug_state state;
4877 int ret;
4878
4879 state = intel_encoder_hotplug(encoder, connector, irq_received);
4880
4881 drm_modeset_acquire_init(&ctx, 0);
4882
4883 for (;;) {
4884 ret = intel_dp_retrain_link(encoder, &ctx);
4885
4886 if (ret == -EDEADLK) {
4887 drm_modeset_backoff(&ctx);
4888 continue;
4889 }
4890
4891 break;
4892 }
4893
4894 drm_modeset_drop_locks(&ctx);
4895 drm_modeset_acquire_fini(&ctx);
4896 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4897
4898
4899
4900
4901
4902 if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
4903 state = INTEL_HOTPLUG_RETRY;
4904
4905 return state;
4906 }
4907
4908 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4909 {
4910 u8 val;
4911
4912 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4913 return;
4914
4915 if (drm_dp_dpcd_readb(&intel_dp->aux,
4916 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4917 return;
4918
4919 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4920
4921 if (val & DP_AUTOMATED_TEST_REQUEST)
4922 intel_dp_handle_test_request(intel_dp);
4923
4924 if (val & DP_CP_IRQ)
4925 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4926
4927 if (val & DP_SINK_SPECIFIC_IRQ)
4928 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4929 }
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944 static bool
4945 intel_dp_short_pulse(struct intel_dp *intel_dp)
4946 {
4947 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4948 u8 old_sink_count = intel_dp->sink_count;
4949 bool ret;
4950
4951
4952
4953
4954
4955 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4956
4957
4958
4959
4960
4961
4962
4963 ret = intel_dp_get_dpcd(intel_dp);
4964
4965 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4966
4967 return false;
4968 }
4969
4970 intel_dp_check_service_irq(intel_dp);
4971
4972
4973 drm_dp_cec_irq(&intel_dp->aux);
4974
4975
4976 if (intel_dp_needs_link_retrain(intel_dp))
4977 return false;
4978
4979 intel_psr_short_pulse(intel_dp);
4980
4981 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4982 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4983
4984 drm_kms_helper_hotplug_event(&dev_priv->drm);
4985 }
4986
4987 return true;
4988 }
4989
4990
4991 static enum drm_connector_status
4992 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4993 {
4994 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4995 u8 *dpcd = intel_dp->dpcd;
4996 u8 type;
4997
4998 if (WARN_ON(intel_dp_is_edp(intel_dp)))
4999 return connector_status_connected;
5000
5001 if (lspcon->active)
5002 lspcon_resume(lspcon);
5003
5004 if (!intel_dp_get_dpcd(intel_dp))
5005 return connector_status_disconnected;
5006
5007
5008 if (!drm_dp_is_branch(dpcd))
5009 return connector_status_connected;
5010
5011
5012 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5013 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5014
5015 return intel_dp->sink_count ?
5016 connector_status_connected : connector_status_disconnected;
5017 }
5018
5019 if (intel_dp_can_mst(intel_dp))
5020 return connector_status_connected;
5021
5022
5023 if (drm_probe_ddc(&intel_dp->aux.ddc))
5024 return connector_status_connected;
5025
5026
5027 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5028 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5029 if (type == DP_DS_PORT_TYPE_VGA ||
5030 type == DP_DS_PORT_TYPE_NON_EDID)
5031 return connector_status_unknown;
5032 } else {
5033 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5034 DP_DWN_STRM_PORT_TYPE_MASK;
5035 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5036 type == DP_DWN_STRM_PORT_TYPE_OTHER)
5037 return connector_status_unknown;
5038 }
5039
5040
5041 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5042 return connector_status_disconnected;
5043 }
5044
5045 static enum drm_connector_status
5046 edp_detect(struct intel_dp *intel_dp)
5047 {
5048 return connector_status_connected;
5049 }
5050
5051 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5052 {
5053 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5054 u32 bit;
5055
5056 switch (encoder->hpd_pin) {
5057 case HPD_PORT_B:
5058 bit = SDE_PORTB_HOTPLUG;
5059 break;
5060 case HPD_PORT_C:
5061 bit = SDE_PORTC_HOTPLUG;
5062 break;
5063 case HPD_PORT_D:
5064 bit = SDE_PORTD_HOTPLUG;
5065 break;
5066 default:
5067 MISSING_CASE(encoder->hpd_pin);
5068 return false;
5069 }
5070
5071 return I915_READ(SDEISR) & bit;
5072 }
5073
5074 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5075 {
5076 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5077 u32 bit;
5078
5079 switch (encoder->hpd_pin) {
5080 case HPD_PORT_B:
5081 bit = SDE_PORTB_HOTPLUG_CPT;
5082 break;
5083 case HPD_PORT_C:
5084 bit = SDE_PORTC_HOTPLUG_CPT;
5085 break;
5086 case HPD_PORT_D:
5087 bit = SDE_PORTD_HOTPLUG_CPT;
5088 break;
5089 default:
5090 MISSING_CASE(encoder->hpd_pin);
5091 return false;
5092 }
5093
5094 return I915_READ(SDEISR) & bit;
5095 }
5096
5097 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5098 {
5099 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5100 u32 bit;
5101
5102 switch (encoder->hpd_pin) {
5103 case HPD_PORT_A:
5104 bit = SDE_PORTA_HOTPLUG_SPT;
5105 break;
5106 case HPD_PORT_E:
5107 bit = SDE_PORTE_HOTPLUG_SPT;
5108 break;
5109 default:
5110 return cpt_digital_port_connected(encoder);
5111 }
5112
5113 return I915_READ(SDEISR) & bit;
5114 }
5115
5116 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5117 {
5118 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5119 u32 bit;
5120
5121 switch (encoder->hpd_pin) {
5122 case HPD_PORT_B:
5123 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5124 break;
5125 case HPD_PORT_C:
5126 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5127 break;
5128 case HPD_PORT_D:
5129 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5130 break;
5131 default:
5132 MISSING_CASE(encoder->hpd_pin);
5133 return false;
5134 }
5135
5136 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5137 }
5138
5139 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5140 {
5141 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5142 u32 bit;
5143
5144 switch (encoder->hpd_pin) {
5145 case HPD_PORT_B:
5146 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5147 break;
5148 case HPD_PORT_C:
5149 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5150 break;
5151 case HPD_PORT_D:
5152 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5153 break;
5154 default:
5155 MISSING_CASE(encoder->hpd_pin);
5156 return false;
5157 }
5158
5159 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5160 }
5161
5162 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5163 {
5164 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5165
5166 if (encoder->hpd_pin == HPD_PORT_A)
5167 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5168 else
5169 return ibx_digital_port_connected(encoder);
5170 }
5171
5172 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5173 {
5174 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5175
5176 if (encoder->hpd_pin == HPD_PORT_A)
5177 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5178 else
5179 return cpt_digital_port_connected(encoder);
5180 }
5181
5182 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5183 {
5184 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5185
5186 if (encoder->hpd_pin == HPD_PORT_A)
5187 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5188 else
5189 return cpt_digital_port_connected(encoder);
5190 }
5191
5192 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5193 {
5194 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5195
5196 if (encoder->hpd_pin == HPD_PORT_A)
5197 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5198 else
5199 return cpt_digital_port_connected(encoder);
5200 }
5201
5202 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5203 {
5204 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5205 u32 bit;
5206
5207 switch (encoder->hpd_pin) {
5208 case HPD_PORT_A:
5209 bit = BXT_DE_PORT_HP_DDIA;
5210 break;
5211 case HPD_PORT_B:
5212 bit = BXT_DE_PORT_HP_DDIB;
5213 break;
5214 case HPD_PORT_C:
5215 bit = BXT_DE_PORT_HP_DDIC;
5216 break;
5217 default:
5218 MISSING_CASE(encoder->hpd_pin);
5219 return false;
5220 }
5221
5222 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5223 }
5224
5225 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5226 struct intel_digital_port *intel_dig_port)
5227 {
5228 enum port port = intel_dig_port->base.port;
5229
5230 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5231 }
5232
5233 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5234 {
5235 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5236 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5237 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
5238
5239 if (intel_phy_is_combo(dev_priv, phy))
5240 return icl_combo_port_connected(dev_priv, dig_port);
5241 else if (intel_phy_is_tc(dev_priv, phy))
5242 return intel_tc_port_connected(dig_port);
5243 else
5244 MISSING_CASE(encoder->hpd_pin);
5245
5246 return false;
5247 }
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5261 {
5262 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5263
5264 if (HAS_GMCH(dev_priv)) {
5265 if (IS_GM45(dev_priv))
5266 return gm45_digital_port_connected(encoder);
5267 else
5268 return g4x_digital_port_connected(encoder);
5269 }
5270
5271 if (INTEL_GEN(dev_priv) >= 11)
5272 return icl_digital_port_connected(encoder);
5273 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5274 return spt_digital_port_connected(encoder);
5275 else if (IS_GEN9_LP(dev_priv))
5276 return bxt_digital_port_connected(encoder);
5277 else if (IS_GEN(dev_priv, 8))
5278 return bdw_digital_port_connected(encoder);
5279 else if (IS_GEN(dev_priv, 7))
5280 return ivb_digital_port_connected(encoder);
5281 else if (IS_GEN(dev_priv, 6))
5282 return snb_digital_port_connected(encoder);
5283 else if (IS_GEN(dev_priv, 5))
5284 return ilk_digital_port_connected(encoder);
5285
5286 MISSING_CASE(INTEL_GEN(dev_priv));
5287 return false;
5288 }
5289
5290 bool intel_digital_port_connected(struct intel_encoder *encoder)
5291 {
5292 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5293 bool is_connected = false;
5294 intel_wakeref_t wakeref;
5295
5296 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5297 is_connected = __intel_digital_port_connected(encoder);
5298
5299 return is_connected;
5300 }
5301
5302 static struct edid *
5303 intel_dp_get_edid(struct intel_dp *intel_dp)
5304 {
5305 struct intel_connector *intel_connector = intel_dp->attached_connector;
5306
5307
5308 if (intel_connector->edid) {
5309
5310 if (IS_ERR(intel_connector->edid))
5311 return NULL;
5312
5313 return drm_edid_duplicate(intel_connector->edid);
5314 } else
5315 return drm_get_edid(&intel_connector->base,
5316 &intel_dp->aux.ddc);
5317 }
5318
5319 static void
5320 intel_dp_set_edid(struct intel_dp *intel_dp)
5321 {
5322 struct intel_connector *intel_connector = intel_dp->attached_connector;
5323 struct edid *edid;
5324
5325 intel_dp_unset_edid(intel_dp);
5326 edid = intel_dp_get_edid(intel_dp);
5327 intel_connector->detect_edid = edid;
5328
5329 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5330 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5331 }
5332
5333 static void
5334 intel_dp_unset_edid(struct intel_dp *intel_dp)
5335 {
5336 struct intel_connector *intel_connector = intel_dp->attached_connector;
5337
5338 drm_dp_cec_unset_edid(&intel_dp->aux);
5339 kfree(intel_connector->detect_edid);
5340 intel_connector->detect_edid = NULL;
5341
5342 intel_dp->has_audio = false;
5343 }
5344
5345 static int
5346 intel_dp_detect(struct drm_connector *connector,
5347 struct drm_modeset_acquire_ctx *ctx,
5348 bool force)
5349 {
5350 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5351 struct intel_dp *intel_dp = intel_attached_dp(connector);
5352 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5353 struct intel_encoder *encoder = &dig_port->base;
5354 enum drm_connector_status status;
5355
5356 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5357 connector->base.id, connector->name);
5358 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5359
5360
5361 if (intel_dp_is_edp(intel_dp))
5362 status = edp_detect(intel_dp);
5363 else if (intel_digital_port_connected(encoder))
5364 status = intel_dp_detect_dpcd(intel_dp);
5365 else
5366 status = connector_status_disconnected;
5367
5368 if (status == connector_status_disconnected) {
5369 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5370 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5371
5372 if (intel_dp->is_mst) {
5373 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5374 intel_dp->is_mst,
5375 intel_dp->mst_mgr.mst_state);
5376 intel_dp->is_mst = false;
5377 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5378 intel_dp->is_mst);
5379 }
5380
5381 goto out;
5382 }
5383
5384 if (intel_dp->reset_link_params) {
5385
5386 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5387
5388
5389 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5390
5391 intel_dp->reset_link_params = false;
5392 }
5393
5394 intel_dp_print_rates(intel_dp);
5395
5396
5397 if (INTEL_GEN(dev_priv) >= 11)
5398 intel_dp_get_dsc_sink_cap(intel_dp);
5399
5400 intel_dp_configure_mst(intel_dp);
5401
5402 if (intel_dp->is_mst) {
5403
5404
5405
5406
5407
5408 status = connector_status_disconnected;
5409 goto out;
5410 }
5411
5412
5413
5414
5415
5416 if (!intel_dp_is_edp(intel_dp)) {
5417 int ret;
5418
5419 ret = intel_dp_retrain_link(encoder, ctx);
5420 if (ret)
5421 return ret;
5422 }
5423
5424
5425
5426
5427
5428
5429 intel_dp->aux.i2c_nack_count = 0;
5430 intel_dp->aux.i2c_defer_count = 0;
5431
5432 intel_dp_set_edid(intel_dp);
5433 if (intel_dp_is_edp(intel_dp) ||
5434 to_intel_connector(connector)->detect_edid)
5435 status = connector_status_connected;
5436
5437 intel_dp_check_service_irq(intel_dp);
5438
5439 out:
5440 if (status != connector_status_connected && !intel_dp->is_mst)
5441 intel_dp_unset_edid(intel_dp);
5442
5443
5444
5445
5446
5447 intel_display_power_flush_work(dev_priv);
5448
5449 return status;
5450 }
5451
5452 static void
5453 intel_dp_force(struct drm_connector *connector)
5454 {
5455 struct intel_dp *intel_dp = intel_attached_dp(connector);
5456 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5457 struct intel_encoder *intel_encoder = &dig_port->base;
5458 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5459 enum intel_display_power_domain aux_domain =
5460 intel_aux_power_domain(dig_port);
5461 intel_wakeref_t wakeref;
5462
5463 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5464 connector->base.id, connector->name);
5465 intel_dp_unset_edid(intel_dp);
5466
5467 if (connector->status != connector_status_connected)
5468 return;
5469
5470 wakeref = intel_display_power_get(dev_priv, aux_domain);
5471
5472 intel_dp_set_edid(intel_dp);
5473
5474 intel_display_power_put(dev_priv, aux_domain, wakeref);
5475 }
5476
5477 static int intel_dp_get_modes(struct drm_connector *connector)
5478 {
5479 struct intel_connector *intel_connector = to_intel_connector(connector);
5480 struct edid *edid;
5481
5482 edid = intel_connector->detect_edid;
5483 if (edid) {
5484 int ret = intel_connector_update_modes(connector, edid);
5485 if (ret)
5486 return ret;
5487 }
5488
5489
5490 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5491 intel_connector->panel.fixed_mode) {
5492 struct drm_display_mode *mode;
5493
5494 mode = drm_mode_duplicate(connector->dev,
5495 intel_connector->panel.fixed_mode);
5496 if (mode) {
5497 drm_mode_probed_add(connector, mode);
5498 return 1;
5499 }
5500 }
5501
5502 return 0;
5503 }
5504
5505 static int
5506 intel_dp_connector_register(struct drm_connector *connector)
5507 {
5508 struct intel_dp *intel_dp = intel_attached_dp(connector);
5509 struct drm_device *dev = connector->dev;
5510 int ret;
5511
5512 ret = intel_connector_register(connector);
5513 if (ret)
5514 return ret;
5515
5516 i915_debugfs_connector_add(connector);
5517
5518 DRM_DEBUG_KMS("registering %s bus for %s\n",
5519 intel_dp->aux.name, connector->kdev->kobj.name);
5520
5521 intel_dp->aux.dev = connector->kdev;
5522 ret = drm_dp_aux_register(&intel_dp->aux);
5523 if (!ret)
5524 drm_dp_cec_register_connector(&intel_dp->aux,
5525 connector->name, dev->dev);
5526 return ret;
5527 }
5528
5529 static void
5530 intel_dp_connector_unregister(struct drm_connector *connector)
5531 {
5532 struct intel_dp *intel_dp = intel_attached_dp(connector);
5533
5534 drm_dp_cec_unregister_connector(&intel_dp->aux);
5535 drm_dp_aux_unregister(&intel_dp->aux);
5536 intel_connector_unregister(connector);
5537 }
5538
5539 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5540 {
5541 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5542 struct intel_dp *intel_dp = &intel_dig_port->dp;
5543
5544 intel_dp_mst_encoder_cleanup(intel_dig_port);
5545 if (intel_dp_is_edp(intel_dp)) {
5546 intel_wakeref_t wakeref;
5547
5548 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5549
5550
5551
5552
5553 with_pps_lock(intel_dp, wakeref)
5554 edp_panel_vdd_off_sync(intel_dp);
5555
5556 if (intel_dp->edp_notifier.notifier_call) {
5557 unregister_reboot_notifier(&intel_dp->edp_notifier);
5558 intel_dp->edp_notifier.notifier_call = NULL;
5559 }
5560 }
5561
5562 intel_dp_aux_fini(intel_dp);
5563 }
5564
5565 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5566 {
5567 intel_dp_encoder_flush_work(encoder);
5568
5569 drm_encoder_cleanup(encoder);
5570 kfree(enc_to_dig_port(encoder));
5571 }
5572
5573 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5574 {
5575 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5576 intel_wakeref_t wakeref;
5577
5578 if (!intel_dp_is_edp(intel_dp))
5579 return;
5580
5581
5582
5583
5584
5585 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5586 with_pps_lock(intel_dp, wakeref)
5587 edp_panel_vdd_off_sync(intel_dp);
5588 }
5589
5590 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5591 {
5592 long ret;
5593
5594 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5595 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5596 msecs_to_jiffies(timeout));
5597
5598 if (!ret)
5599 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5600 }
5601
5602 static
5603 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5604 u8 *an)
5605 {
5606 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5607 static const struct drm_dp_aux_msg msg = {
5608 .request = DP_AUX_NATIVE_WRITE,
5609 .address = DP_AUX_HDCP_AKSV,
5610 .size = DRM_HDCP_KSV_LEN,
5611 };
5612 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5613 ssize_t dpcd_ret;
5614 int ret;
5615
5616
5617 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5618 an, DRM_HDCP_AN_LEN);
5619 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5620 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5621 dpcd_ret);
5622 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5623 }
5624
5625
5626
5627
5628
5629
5630
5631 intel_dp_aux_header(txbuf, &msg);
5632
5633 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5634 rxbuf, sizeof(rxbuf),
5635 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5636 if (ret < 0) {
5637 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5638 return ret;
5639 } else if (ret == 0) {
5640 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5641 return -EIO;
5642 }
5643
5644 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5645 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5646 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5647 reply);
5648 return -EIO;
5649 }
5650 return 0;
5651 }
5652
5653 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5654 u8 *bksv)
5655 {
5656 ssize_t ret;
5657 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5658 DRM_HDCP_KSV_LEN);
5659 if (ret != DRM_HDCP_KSV_LEN) {
5660 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5661 return ret >= 0 ? -EIO : ret;
5662 }
5663 return 0;
5664 }
5665
5666 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5667 u8 *bstatus)
5668 {
5669 ssize_t ret;
5670
5671
5672
5673
5674
5675 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5676 bstatus, DRM_HDCP_BSTATUS_LEN);
5677 if (ret != DRM_HDCP_BSTATUS_LEN) {
5678 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5679 return ret >= 0 ? -EIO : ret;
5680 }
5681 return 0;
5682 }
5683
5684 static
5685 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5686 u8 *bcaps)
5687 {
5688 ssize_t ret;
5689
5690 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5691 bcaps, 1);
5692 if (ret != 1) {
5693 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5694 return ret >= 0 ? -EIO : ret;
5695 }
5696
5697 return 0;
5698 }
5699
5700 static
5701 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5702 bool *repeater_present)
5703 {
5704 ssize_t ret;
5705 u8 bcaps;
5706
5707 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5708 if (ret)
5709 return ret;
5710
5711 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5712 return 0;
5713 }
5714
5715 static
5716 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5717 u8 *ri_prime)
5718 {
5719 ssize_t ret;
5720 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5721 ri_prime, DRM_HDCP_RI_LEN);
5722 if (ret != DRM_HDCP_RI_LEN) {
5723 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5724 return ret >= 0 ? -EIO : ret;
5725 }
5726 return 0;
5727 }
5728
5729 static
5730 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5731 bool *ksv_ready)
5732 {
5733 ssize_t ret;
5734 u8 bstatus;
5735 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5736 &bstatus, 1);
5737 if (ret != 1) {
5738 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5739 return ret >= 0 ? -EIO : ret;
5740 }
5741 *ksv_ready = bstatus & DP_BSTATUS_READY;
5742 return 0;
5743 }
5744
5745 static
5746 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5747 int num_downstream, u8 *ksv_fifo)
5748 {
5749 ssize_t ret;
5750 int i;
5751
5752
5753 for (i = 0; i < num_downstream; i += 3) {
5754 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5755 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5756 DP_AUX_HDCP_KSV_FIFO,
5757 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5758 len);
5759 if (ret != len) {
5760 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5761 i, ret);
5762 return ret >= 0 ? -EIO : ret;
5763 }
5764 }
5765 return 0;
5766 }
5767
5768 static
5769 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5770 int i, u32 *part)
5771 {
5772 ssize_t ret;
5773
5774 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5775 return -EINVAL;
5776
5777 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5778 DP_AUX_HDCP_V_PRIME(i), part,
5779 DRM_HDCP_V_PRIME_PART_LEN);
5780 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5781 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5782 return ret >= 0 ? -EIO : ret;
5783 }
5784 return 0;
5785 }
5786
5787 static
5788 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5789 bool enable)
5790 {
5791
5792 return 0;
5793 }
5794
5795 static
5796 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5797 {
5798 ssize_t ret;
5799 u8 bstatus;
5800
5801 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5802 &bstatus, 1);
5803 if (ret != 1) {
5804 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5805 return false;
5806 }
5807
5808 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5809 }
5810
5811 static
5812 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5813 bool *hdcp_capable)
5814 {
5815 ssize_t ret;
5816 u8 bcaps;
5817
5818 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5819 if (ret)
5820 return ret;
5821
5822 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5823 return 0;
5824 }
5825
5826 struct hdcp2_dp_errata_stream_type {
5827 u8 msg_id;
5828 u8 stream_type;
5829 } __packed;
5830
5831 struct hdcp2_dp_msg_data {
5832 u8 msg_id;
5833 u32 offset;
5834 bool msg_detectable;
5835 u32 timeout;
5836 u32 timeout2;
5837 };
5838
5839 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
5840 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
5841 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5842 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
5843 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5844 false, 0, 0 },
5845 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5846 false, 0, 0 },
5847 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5848 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5849 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
5850 { HDCP_2_2_AKE_SEND_PAIRING_INFO,
5851 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5852 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
5853 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
5854 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5855 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
5856 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5857 0, 0 },
5858 { HDCP_2_2_REP_SEND_RECVID_LIST,
5859 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5860 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
5861 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5862 0, 0 },
5863 { HDCP_2_2_REP_STREAM_MANAGE,
5864 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5865 0, 0 },
5866 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5867 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
5868
5869 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5870 { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5871 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5872 0, 0 },
5873 };
5874
5875 static inline
5876 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5877 u8 *rx_status)
5878 {
5879 ssize_t ret;
5880
5881 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5882 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5883 HDCP_2_2_DP_RXSTATUS_LEN);
5884 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5885 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5886 return ret >= 0 ? -EIO : ret;
5887 }
5888
5889 return 0;
5890 }
5891
5892 static
5893 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5894 u8 msg_id, bool *msg_ready)
5895 {
5896 u8 rx_status;
5897 int ret;
5898
5899 *msg_ready = false;
5900 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5901 if (ret < 0)
5902 return ret;
5903
5904 switch (msg_id) {
5905 case HDCP_2_2_AKE_SEND_HPRIME:
5906 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5907 *msg_ready = true;
5908 break;
5909 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5910 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5911 *msg_ready = true;
5912 break;
5913 case HDCP_2_2_REP_SEND_RECVID_LIST:
5914 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5915 *msg_ready = true;
5916 break;
5917 default:
5918 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5919 return -EINVAL;
5920 }
5921
5922 return 0;
5923 }
5924
5925 static ssize_t
5926 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5927 const struct hdcp2_dp_msg_data *hdcp2_msg_data)
5928 {
5929 struct intel_dp *dp = &intel_dig_port->dp;
5930 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5931 u8 msg_id = hdcp2_msg_data->msg_id;
5932 int ret, timeout;
5933 bool msg_ready = false;
5934
5935 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5936 timeout = hdcp2_msg_data->timeout2;
5937 else
5938 timeout = hdcp2_msg_data->timeout;
5939
5940
5941
5942
5943
5944 if (!hdcp2_msg_data->msg_detectable) {
5945 mdelay(timeout);
5946 ret = 0;
5947 } else {
5948
5949
5950
5951
5952 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
5953 ret = hdcp2_detect_msg_availability(intel_dig_port,
5954 msg_id, &msg_ready);
5955 if (!msg_ready)
5956 ret = -ETIMEDOUT;
5957 }
5958
5959 if (ret)
5960 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
5961 hdcp2_msg_data->msg_id, ret, timeout);
5962
5963 return ret;
5964 }
5965
5966 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
5967 {
5968 int i;
5969
5970 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
5971 if (hdcp2_dp_msg_data[i].msg_id == msg_id)
5972 return &hdcp2_dp_msg_data[i];
5973
5974 return NULL;
5975 }
5976
5977 static
5978 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
5979 void *buf, size_t size)
5980 {
5981 struct intel_dp *dp = &intel_dig_port->dp;
5982 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5983 unsigned int offset;
5984 u8 *byte = buf;
5985 ssize_t ret, bytes_to_write, len;
5986 const struct hdcp2_dp_msg_data *hdcp2_msg_data;
5987
5988 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
5989 if (!hdcp2_msg_data)
5990 return -EINVAL;
5991
5992 offset = hdcp2_msg_data->offset;
5993
5994
5995 bytes_to_write = size - 1;
5996 byte++;
5997
5998 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
5999
6000 while (bytes_to_write) {
6001 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6002 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6003
6004 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6005 offset, (void *)byte, len);
6006 if (ret < 0)
6007 return ret;
6008
6009 bytes_to_write -= ret;
6010 byte += ret;
6011 offset += ret;
6012 }
6013
6014 return size;
6015 }
6016
6017 static
6018 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6019 {
6020 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6021 u32 dev_cnt;
6022 ssize_t ret;
6023
6024 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6025 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6026 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6027 if (ret != HDCP_2_2_RXINFO_LEN)
6028 return ret >= 0 ? -EIO : ret;
6029
6030 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6031 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6032
6033 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6034 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6035
6036 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6037 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6038 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6039
6040 return ret;
6041 }
6042
6043 static
6044 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6045 u8 msg_id, void *buf, size_t size)
6046 {
6047 unsigned int offset;
6048 u8 *byte = buf;
6049 ssize_t ret, bytes_to_recv, len;
6050 const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6051
6052 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6053 if (!hdcp2_msg_data)
6054 return -EINVAL;
6055 offset = hdcp2_msg_data->offset;
6056
6057 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6058 if (ret < 0)
6059 return ret;
6060
6061 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6062 ret = get_receiver_id_list_size(intel_dig_port);
6063 if (ret < 0)
6064 return ret;
6065
6066 size = ret;
6067 }
6068 bytes_to_recv = size - 1;
6069
6070
6071 byte++;
6072
6073 while (bytes_to_recv) {
6074 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6075 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6076
6077 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6078 (void *)byte, len);
6079 if (ret < 0) {
6080 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6081 return ret;
6082 }
6083
6084 bytes_to_recv -= ret;
6085 byte += ret;
6086 offset += ret;
6087 }
6088 byte = buf;
6089 *byte = msg_id;
6090
6091 return size;
6092 }
6093
6094 static
6095 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6096 bool is_repeater, u8 content_type)
6097 {
6098 struct hdcp2_dp_errata_stream_type stream_type_msg;
6099
6100 if (is_repeater)
6101 return 0;
6102
6103
6104
6105
6106
6107
6108
6109
6110 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6111 stream_type_msg.stream_type = content_type;
6112
6113 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6114 sizeof(stream_type_msg));
6115 }
6116
6117 static
6118 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6119 {
6120 u8 rx_status;
6121 int ret;
6122
6123 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6124 if (ret)
6125 return ret;
6126
6127 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6128 ret = HDCP_REAUTH_REQUEST;
6129 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6130 ret = HDCP_LINK_INTEGRITY_FAILURE;
6131 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6132 ret = HDCP_TOPOLOGY_CHANGE;
6133
6134 return ret;
6135 }
6136
6137 static
6138 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6139 bool *capable)
6140 {
6141 u8 rx_caps[3];
6142 int ret;
6143
6144 *capable = false;
6145 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6146 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6147 rx_caps, HDCP_2_2_RXCAPS_LEN);
6148 if (ret != HDCP_2_2_RXCAPS_LEN)
6149 return ret >= 0 ? -EIO : ret;
6150
6151 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6152 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6153 *capable = true;
6154
6155 return 0;
6156 }
6157
6158 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6159 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6160 .read_bksv = intel_dp_hdcp_read_bksv,
6161 .read_bstatus = intel_dp_hdcp_read_bstatus,
6162 .repeater_present = intel_dp_hdcp_repeater_present,
6163 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6164 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6165 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6166 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6167 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6168 .check_link = intel_dp_hdcp_check_link,
6169 .hdcp_capable = intel_dp_hdcp_capable,
6170 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6171 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6172 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6173 .check_2_2_link = intel_dp_hdcp2_check_link,
6174 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6175 .protocol = HDCP_PROTOCOL_DP,
6176 };
6177
6178 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6179 {
6180 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6181 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6182
6183 lockdep_assert_held(&dev_priv->pps_mutex);
6184
6185 if (!edp_have_panel_vdd(intel_dp))
6186 return;
6187
6188
6189
6190
6191
6192
6193
6194 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6195 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6196
6197 edp_panel_vdd_schedule_off(intel_dp);
6198 }
6199
6200 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6201 {
6202 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6203 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6204 enum pipe pipe;
6205
6206 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6207 encoder->port, &pipe))
6208 return pipe;
6209
6210 return INVALID_PIPE;
6211 }
6212
6213 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6214 {
6215 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6216 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6217 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6218 intel_wakeref_t wakeref;
6219
6220 if (!HAS_DDI(dev_priv))
6221 intel_dp->DP = I915_READ(intel_dp->output_reg);
6222
6223 if (lspcon->active)
6224 lspcon_resume(lspcon);
6225
6226 intel_dp->reset_link_params = true;
6227
6228 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6229 !intel_dp_is_edp(intel_dp))
6230 return;
6231
6232 with_pps_lock(intel_dp, wakeref) {
6233 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6234 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6235
6236 if (intel_dp_is_edp(intel_dp)) {
6237
6238
6239
6240
6241 intel_dp_pps_init(intel_dp);
6242 intel_edp_panel_vdd_sanitize(intel_dp);
6243 }
6244 }
6245 }
6246
6247 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6248 .force = intel_dp_force,
6249 .fill_modes = drm_helper_probe_single_connector_modes,
6250 .atomic_get_property = intel_digital_connector_atomic_get_property,
6251 .atomic_set_property = intel_digital_connector_atomic_set_property,
6252 .late_register = intel_dp_connector_register,
6253 .early_unregister = intel_dp_connector_unregister,
6254 .destroy = intel_connector_destroy,
6255 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6256 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6257 };
6258
6259 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6260 .detect_ctx = intel_dp_detect,
6261 .get_modes = intel_dp_get_modes,
6262 .mode_valid = intel_dp_mode_valid,
6263 .atomic_check = intel_digital_connector_atomic_check,
6264 };
6265
6266 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6267 .reset = intel_dp_encoder_reset,
6268 .destroy = intel_dp_encoder_destroy,
6269 };
6270
6271 enum irqreturn
6272 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6273 {
6274 struct intel_dp *intel_dp = &intel_dig_port->dp;
6275
6276 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6277
6278
6279
6280
6281
6282
6283 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6284 port_name(intel_dig_port->base.port));
6285 return IRQ_HANDLED;
6286 }
6287
6288 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6289 port_name(intel_dig_port->base.port),
6290 long_hpd ? "long" : "short");
6291
6292 if (long_hpd) {
6293 intel_dp->reset_link_params = true;
6294 return IRQ_NONE;
6295 }
6296
6297 if (intel_dp->is_mst) {
6298 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6299
6300
6301
6302
6303 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6304 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6305 intel_dp->is_mst = false;
6306 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6307 intel_dp->is_mst);
6308
6309 return IRQ_NONE;
6310 }
6311 }
6312
6313 if (!intel_dp->is_mst) {
6314 bool handled;
6315
6316 handled = intel_dp_short_pulse(intel_dp);
6317
6318 if (!handled)
6319 return IRQ_NONE;
6320 }
6321
6322 return IRQ_HANDLED;
6323 }
6324
6325
6326 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6327 {
6328
6329
6330
6331
6332 if (INTEL_GEN(dev_priv) < 5)
6333 return false;
6334
6335 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6336 return true;
6337
6338 return intel_bios_is_port_edp(dev_priv, port);
6339 }
6340
6341 static void
6342 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6343 {
6344 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6345 enum port port = dp_to_dig_port(intel_dp)->base.port;
6346
6347 if (!IS_G4X(dev_priv) && port != PORT_A)
6348 intel_attach_force_audio_property(connector);
6349
6350 intel_attach_broadcast_rgb_property(connector);
6351 if (HAS_GMCH(dev_priv))
6352 drm_connector_attach_max_bpc_property(connector, 6, 10);
6353 else if (INTEL_GEN(dev_priv) >= 5)
6354 drm_connector_attach_max_bpc_property(connector, 6, 12);
6355
6356 if (intel_dp_is_edp(intel_dp)) {
6357 u32 allowed_scalers;
6358
6359 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6360 if (!HAS_GMCH(dev_priv))
6361 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6362
6363 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6364
6365 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6366
6367 }
6368 }
6369
6370 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6371 {
6372 intel_dp->panel_power_off_time = ktime_get_boottime();
6373 intel_dp->last_power_on = jiffies;
6374 intel_dp->last_backlight_off = jiffies;
6375 }
6376
6377 static void
6378 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6379 {
6380 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6381 u32 pp_on, pp_off, pp_ctl;
6382 struct pps_registers regs;
6383
6384 intel_pps_get_registers(intel_dp, ®s);
6385
6386 pp_ctl = ironlake_get_pp_control(intel_dp);
6387
6388
6389 if (!HAS_DDI(dev_priv))
6390 I915_WRITE(regs.pp_ctrl, pp_ctl);
6391
6392 pp_on = I915_READ(regs.pp_on);
6393 pp_off = I915_READ(regs.pp_off);
6394
6395
6396 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6397 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6398 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6399 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6400
6401 if (i915_mmio_reg_valid(regs.pp_div)) {
6402 u32 pp_div;
6403
6404 pp_div = I915_READ(regs.pp_div);
6405
6406 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6407 } else {
6408 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6409 }
6410 }
6411
6412 static void
6413 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6414 {
6415 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6416 state_name,
6417 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6418 }
6419
6420 static void
6421 intel_pps_verify_state(struct intel_dp *intel_dp)
6422 {
6423 struct edp_power_seq hw;
6424 struct edp_power_seq *sw = &intel_dp->pps_delays;
6425
6426 intel_pps_readout_hw_state(intel_dp, &hw);
6427
6428 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6429 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6430 DRM_ERROR("PPS state mismatch\n");
6431 intel_pps_dump_state("sw", sw);
6432 intel_pps_dump_state("hw", &hw);
6433 }
6434 }
6435
6436 static void
6437 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6438 {
6439 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6440 struct edp_power_seq cur, vbt, spec,
6441 *final = &intel_dp->pps_delays;
6442
6443 lockdep_assert_held(&dev_priv->pps_mutex);
6444
6445
6446 if (final->t11_t12 != 0)
6447 return;
6448
6449 intel_pps_readout_hw_state(intel_dp, &cur);
6450
6451 intel_pps_dump_state("cur", &cur);
6452
6453 vbt = dev_priv->vbt.edp.pps;
6454
6455
6456
6457
6458
6459 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6460 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6461 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6462 vbt.t11_t12);
6463 }
6464
6465
6466
6467
6468 vbt.t11_t12 += 100 * 10;
6469
6470
6471
6472 spec.t1_t3 = 210 * 10;
6473 spec.t8 = 50 * 10;
6474 spec.t9 = 50 * 10;
6475 spec.t10 = 500 * 10;
6476
6477
6478
6479
6480 spec.t11_t12 = (510 + 100) * 10;
6481
6482 intel_pps_dump_state("vbt", &vbt);
6483
6484
6485
6486 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6487 spec.field : \
6488 max(cur.field, vbt.field))
6489 assign_final(t1_t3);
6490 assign_final(t8);
6491 assign_final(t9);
6492 assign_final(t10);
6493 assign_final(t11_t12);
6494 #undef assign_final
6495
6496 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6497 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6498 intel_dp->backlight_on_delay = get_delay(t8);
6499 intel_dp->backlight_off_delay = get_delay(t9);
6500 intel_dp->panel_power_down_delay = get_delay(t10);
6501 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6502 #undef get_delay
6503
6504 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6505 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6506 intel_dp->panel_power_cycle_delay);
6507
6508 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6509 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6510
6511
6512
6513
6514
6515
6516
6517
6518 final->t8 = 1;
6519 final->t9 = 1;
6520
6521
6522
6523
6524
6525 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6526 }
6527
6528 static void
6529 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6530 bool force_disable_vdd)
6531 {
6532 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6533 u32 pp_on, pp_off, port_sel = 0;
6534 int div = dev_priv->rawclk_freq / 1000;
6535 struct pps_registers regs;
6536 enum port port = dp_to_dig_port(intel_dp)->base.port;
6537 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6538
6539 lockdep_assert_held(&dev_priv->pps_mutex);
6540
6541 intel_pps_get_registers(intel_dp, ®s);
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555 if (force_disable_vdd) {
6556 u32 pp = ironlake_get_pp_control(intel_dp);
6557
6558 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6559
6560 if (pp & EDP_FORCE_VDD)
6561 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6562
6563 pp &= ~EDP_FORCE_VDD;
6564
6565 I915_WRITE(regs.pp_ctrl, pp);
6566 }
6567
6568 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6569 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6570 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6571 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6572
6573
6574
6575 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6576 port_sel = PANEL_PORT_SELECT_VLV(port);
6577 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6578 switch (port) {
6579 case PORT_A:
6580 port_sel = PANEL_PORT_SELECT_DPA;
6581 break;
6582 case PORT_C:
6583 port_sel = PANEL_PORT_SELECT_DPC;
6584 break;
6585 case PORT_D:
6586 port_sel = PANEL_PORT_SELECT_DPD;
6587 break;
6588 default:
6589 MISSING_CASE(port);
6590 break;
6591 }
6592 }
6593
6594 pp_on |= port_sel;
6595
6596 I915_WRITE(regs.pp_on, pp_on);
6597 I915_WRITE(regs.pp_off, pp_off);
6598
6599
6600
6601
6602 if (i915_mmio_reg_valid(regs.pp_div)) {
6603 I915_WRITE(regs.pp_div,
6604 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6605 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6606 } else {
6607 u32 pp_ctl;
6608
6609 pp_ctl = I915_READ(regs.pp_ctrl);
6610 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6611 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6612 I915_WRITE(regs.pp_ctrl, pp_ctl);
6613 }
6614
6615 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6616 I915_READ(regs.pp_on),
6617 I915_READ(regs.pp_off),
6618 i915_mmio_reg_valid(regs.pp_div) ?
6619 I915_READ(regs.pp_div) :
6620 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6621 }
6622
6623 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6624 {
6625 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6626
6627 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6628 vlv_initial_power_sequencer_setup(intel_dp);
6629 } else {
6630 intel_dp_init_panel_power_sequencer(intel_dp);
6631 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6632 }
6633 }
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6649 const struct intel_crtc_state *crtc_state,
6650 int refresh_rate)
6651 {
6652 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6653 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6654 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6655
6656 if (refresh_rate <= 0) {
6657 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6658 return;
6659 }
6660
6661 if (intel_dp == NULL) {
6662 DRM_DEBUG_KMS("DRRS not supported.\n");
6663 return;
6664 }
6665
6666 if (!intel_crtc) {
6667 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6668 return;
6669 }
6670
6671 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6672 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6673 return;
6674 }
6675
6676 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6677 refresh_rate)
6678 index = DRRS_LOW_RR;
6679
6680 if (index == dev_priv->drrs.refresh_rate_type) {
6681 DRM_DEBUG_KMS(
6682 "DRRS requested for previously set RR...ignoring\n");
6683 return;
6684 }
6685
6686 if (!crtc_state->base.active) {
6687 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6688 return;
6689 }
6690
6691 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6692 switch (index) {
6693 case DRRS_HIGH_RR:
6694 intel_dp_set_m_n(crtc_state, M1_N1);
6695 break;
6696 case DRRS_LOW_RR:
6697 intel_dp_set_m_n(crtc_state, M2_N2);
6698 break;
6699 case DRRS_MAX_RR:
6700 default:
6701 DRM_ERROR("Unsupported refreshrate type\n");
6702 }
6703 } else if (INTEL_GEN(dev_priv) > 6) {
6704 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6705 u32 val;
6706
6707 val = I915_READ(reg);
6708 if (index > DRRS_HIGH_RR) {
6709 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6710 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6711 else
6712 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6713 } else {
6714 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6715 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6716 else
6717 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6718 }
6719 I915_WRITE(reg, val);
6720 }
6721
6722 dev_priv->drrs.refresh_rate_type = index;
6723
6724 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6725 }
6726
6727
6728
6729
6730
6731
6732
6733
6734 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6735 const struct intel_crtc_state *crtc_state)
6736 {
6737 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6738
6739 if (!crtc_state->has_drrs) {
6740 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6741 return;
6742 }
6743
6744 if (dev_priv->psr.enabled) {
6745 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6746 return;
6747 }
6748
6749 mutex_lock(&dev_priv->drrs.mutex);
6750 if (dev_priv->drrs.dp) {
6751 DRM_DEBUG_KMS("DRRS already enabled\n");
6752 goto unlock;
6753 }
6754
6755 dev_priv->drrs.busy_frontbuffer_bits = 0;
6756
6757 dev_priv->drrs.dp = intel_dp;
6758
6759 unlock:
6760 mutex_unlock(&dev_priv->drrs.mutex);
6761 }
6762
6763
6764
6765
6766
6767
6768
6769 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6770 const struct intel_crtc_state *old_crtc_state)
6771 {
6772 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6773
6774 if (!old_crtc_state->has_drrs)
6775 return;
6776
6777 mutex_lock(&dev_priv->drrs.mutex);
6778 if (!dev_priv->drrs.dp) {
6779 mutex_unlock(&dev_priv->drrs.mutex);
6780 return;
6781 }
6782
6783 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6784 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6785 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6786
6787 dev_priv->drrs.dp = NULL;
6788 mutex_unlock(&dev_priv->drrs.mutex);
6789
6790 cancel_delayed_work_sync(&dev_priv->drrs.work);
6791 }
6792
6793 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6794 {
6795 struct drm_i915_private *dev_priv =
6796 container_of(work, typeof(*dev_priv), drrs.work.work);
6797 struct intel_dp *intel_dp;
6798
6799 mutex_lock(&dev_priv->drrs.mutex);
6800
6801 intel_dp = dev_priv->drrs.dp;
6802
6803 if (!intel_dp)
6804 goto unlock;
6805
6806
6807
6808
6809
6810
6811 if (dev_priv->drrs.busy_frontbuffer_bits)
6812 goto unlock;
6813
6814 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6815 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6816
6817 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6818 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6819 }
6820
6821 unlock:
6822 mutex_unlock(&dev_priv->drrs.mutex);
6823 }
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6836 unsigned int frontbuffer_bits)
6837 {
6838 struct drm_crtc *crtc;
6839 enum pipe pipe;
6840
6841 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6842 return;
6843
6844 cancel_delayed_work(&dev_priv->drrs.work);
6845
6846 mutex_lock(&dev_priv->drrs.mutex);
6847 if (!dev_priv->drrs.dp) {
6848 mutex_unlock(&dev_priv->drrs.mutex);
6849 return;
6850 }
6851
6852 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6853 pipe = to_intel_crtc(crtc)->pipe;
6854
6855 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6856 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6857
6858
6859 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6860 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6861 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6862
6863 mutex_unlock(&dev_priv->drrs.mutex);
6864 }
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6879 unsigned int frontbuffer_bits)
6880 {
6881 struct drm_crtc *crtc;
6882 enum pipe pipe;
6883
6884 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6885 return;
6886
6887 cancel_delayed_work(&dev_priv->drrs.work);
6888
6889 mutex_lock(&dev_priv->drrs.mutex);
6890 if (!dev_priv->drrs.dp) {
6891 mutex_unlock(&dev_priv->drrs.mutex);
6892 return;
6893 }
6894
6895 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6896 pipe = to_intel_crtc(crtc)->pipe;
6897
6898 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6899 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6900
6901
6902 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6903 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6904 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6905
6906
6907
6908
6909
6910 if (!dev_priv->drrs.busy_frontbuffer_bits)
6911 schedule_delayed_work(&dev_priv->drrs.work,
6912 msecs_to_jiffies(1000));
6913 mutex_unlock(&dev_priv->drrs.mutex);
6914 }
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966 static struct drm_display_mode *
6967 intel_dp_drrs_init(struct intel_connector *connector,
6968 struct drm_display_mode *fixed_mode)
6969 {
6970 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6971 struct drm_display_mode *downclock_mode = NULL;
6972
6973 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6974 mutex_init(&dev_priv->drrs.mutex);
6975
6976 if (INTEL_GEN(dev_priv) <= 6) {
6977 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6978 return NULL;
6979 }
6980
6981 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6982 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6983 return NULL;
6984 }
6985
6986 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
6987 if (!downclock_mode) {
6988 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6989 return NULL;
6990 }
6991
6992 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6993
6994 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6995 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6996 return downclock_mode;
6997 }
6998
6999 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7000 struct intel_connector *intel_connector)
7001 {
7002 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7003 struct drm_device *dev = &dev_priv->drm;
7004 struct drm_connector *connector = &intel_connector->base;
7005 struct drm_display_mode *fixed_mode = NULL;
7006 struct drm_display_mode *downclock_mode = NULL;
7007 bool has_dpcd;
7008 enum pipe pipe = INVALID_PIPE;
7009 intel_wakeref_t wakeref;
7010 struct edid *edid;
7011
7012 if (!intel_dp_is_edp(intel_dp))
7013 return true;
7014
7015 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7016
7017
7018
7019
7020
7021
7022
7023 if (intel_get_lvds_encoder(dev_priv)) {
7024 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7025 DRM_INFO("LVDS was detected, not registering eDP\n");
7026
7027 return false;
7028 }
7029
7030 with_pps_lock(intel_dp, wakeref) {
7031 intel_dp_init_panel_power_timestamps(intel_dp);
7032 intel_dp_pps_init(intel_dp);
7033 intel_edp_panel_vdd_sanitize(intel_dp);
7034 }
7035
7036
7037 has_dpcd = intel_edp_init_dpcd(intel_dp);
7038
7039 if (!has_dpcd) {
7040
7041 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7042 goto out_vdd_off;
7043 }
7044
7045 mutex_lock(&dev->mode_config.mutex);
7046 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7047 if (edid) {
7048 if (drm_add_edid_modes(connector, edid)) {
7049 drm_connector_update_edid_property(connector,
7050 edid);
7051 } else {
7052 kfree(edid);
7053 edid = ERR_PTR(-EINVAL);
7054 }
7055 } else {
7056 edid = ERR_PTR(-ENOENT);
7057 }
7058 intel_connector->edid = edid;
7059
7060 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7061 if (fixed_mode)
7062 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7063
7064
7065 if (!fixed_mode)
7066 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7067 mutex_unlock(&dev->mode_config.mutex);
7068
7069 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7070 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7071 register_reboot_notifier(&intel_dp->edp_notifier);
7072
7073
7074
7075
7076
7077
7078 pipe = vlv_active_pipe(intel_dp);
7079
7080 if (pipe != PIPE_A && pipe != PIPE_B)
7081 pipe = intel_dp->pps_pipe;
7082
7083 if (pipe != PIPE_A && pipe != PIPE_B)
7084 pipe = PIPE_A;
7085
7086 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7087 pipe_name(pipe));
7088 }
7089
7090 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7091 intel_connector->panel.backlight.power = intel_edp_backlight_power;
7092 intel_panel_setup_backlight(connector, pipe);
7093
7094 if (fixed_mode)
7095 drm_connector_init_panel_orientation_property(
7096 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7097
7098 return true;
7099
7100 out_vdd_off:
7101 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7102
7103
7104
7105
7106 with_pps_lock(intel_dp, wakeref)
7107 edp_panel_vdd_off_sync(intel_dp);
7108
7109 return false;
7110 }
7111
7112 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7113 {
7114 struct intel_connector *intel_connector;
7115 struct drm_connector *connector;
7116
7117 intel_connector = container_of(work, typeof(*intel_connector),
7118 modeset_retry_work);
7119 connector = &intel_connector->base;
7120 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7121 connector->name);
7122
7123
7124 mutex_lock(&connector->dev->mode_config.mutex);
7125
7126
7127
7128 drm_connector_set_link_status_property(connector,
7129 DRM_MODE_LINK_STATUS_BAD);
7130 mutex_unlock(&connector->dev->mode_config.mutex);
7131
7132 drm_kms_helper_hotplug_event(connector->dev);
7133 }
7134
7135 bool
7136 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7137 struct intel_connector *intel_connector)
7138 {
7139 struct drm_connector *connector = &intel_connector->base;
7140 struct intel_dp *intel_dp = &intel_dig_port->dp;
7141 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7142 struct drm_device *dev = intel_encoder->base.dev;
7143 struct drm_i915_private *dev_priv = to_i915(dev);
7144 enum port port = intel_encoder->port;
7145 enum phy phy = intel_port_to_phy(dev_priv, port);
7146 int type;
7147
7148
7149 INIT_WORK(&intel_connector->modeset_retry_work,
7150 intel_dp_modeset_retry_work_fn);
7151
7152 if (WARN(intel_dig_port->max_lanes < 1,
7153 "Not enough lanes (%d) for DP on port %c\n",
7154 intel_dig_port->max_lanes, port_name(port)))
7155 return false;
7156
7157 intel_dp_set_source_rates(intel_dp);
7158
7159 intel_dp->reset_link_params = true;
7160 intel_dp->pps_pipe = INVALID_PIPE;
7161 intel_dp->active_pipe = INVALID_PIPE;
7162
7163
7164 intel_dp->DP = I915_READ(intel_dp->output_reg);
7165 intel_dp->attached_connector = intel_connector;
7166
7167 if (intel_dp_is_port_edp(dev_priv, port)) {
7168
7169
7170
7171
7172 WARN_ON(intel_phy_is_tc(dev_priv, phy));
7173 type = DRM_MODE_CONNECTOR_eDP;
7174 } else {
7175 type = DRM_MODE_CONNECTOR_DisplayPort;
7176 }
7177
7178 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7179 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7180
7181
7182
7183
7184
7185
7186 if (type == DRM_MODE_CONNECTOR_eDP)
7187 intel_encoder->type = INTEL_OUTPUT_EDP;
7188
7189
7190 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7191 intel_dp_is_edp(intel_dp) &&
7192 port != PORT_B && port != PORT_C))
7193 return false;
7194
7195 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7196 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7197 port_name(port));
7198
7199 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7200 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7201
7202 if (!HAS_GMCH(dev_priv))
7203 connector->interlace_allowed = true;
7204 connector->doublescan_allowed = 0;
7205
7206 if (INTEL_GEN(dev_priv) >= 11)
7207 connector->ycbcr_420_allowed = true;
7208
7209 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7210
7211 intel_dp_aux_init(intel_dp);
7212
7213 intel_connector_attach_encoder(intel_connector, intel_encoder);
7214
7215 if (HAS_DDI(dev_priv))
7216 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7217 else
7218 intel_connector->get_hw_state = intel_connector_get_hw_state;
7219
7220
7221 intel_dp_mst_encoder_init(intel_dig_port,
7222 intel_connector->base.base.id);
7223
7224 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7225 intel_dp_aux_fini(intel_dp);
7226 intel_dp_mst_encoder_cleanup(intel_dig_port);
7227 goto fail;
7228 }
7229
7230 intel_dp_add_properties(intel_dp, connector);
7231
7232 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7233 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7234 if (ret)
7235 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7236 }
7237
7238
7239
7240
7241
7242 if (IS_G45(dev_priv)) {
7243 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7244 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7245 }
7246
7247 return true;
7248
7249 fail:
7250 drm_connector_cleanup(connector);
7251
7252 return false;
7253 }
7254
7255 bool intel_dp_init(struct drm_i915_private *dev_priv,
7256 i915_reg_t output_reg,
7257 enum port port)
7258 {
7259 struct intel_digital_port *intel_dig_port;
7260 struct intel_encoder *intel_encoder;
7261 struct drm_encoder *encoder;
7262 struct intel_connector *intel_connector;
7263
7264 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7265 if (!intel_dig_port)
7266 return false;
7267
7268 intel_connector = intel_connector_alloc();
7269 if (!intel_connector)
7270 goto err_connector_alloc;
7271
7272 intel_encoder = &intel_dig_port->base;
7273 encoder = &intel_encoder->base;
7274
7275 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7276 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7277 "DP %c", port_name(port)))
7278 goto err_encoder_init;
7279
7280 intel_encoder->hotplug = intel_dp_hotplug;
7281 intel_encoder->compute_config = intel_dp_compute_config;
7282 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7283 intel_encoder->get_config = intel_dp_get_config;
7284 intel_encoder->update_pipe = intel_panel_update_backlight;
7285 intel_encoder->suspend = intel_dp_encoder_suspend;
7286 if (IS_CHERRYVIEW(dev_priv)) {
7287 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7288 intel_encoder->pre_enable = chv_pre_enable_dp;
7289 intel_encoder->enable = vlv_enable_dp;
7290 intel_encoder->disable = vlv_disable_dp;
7291 intel_encoder->post_disable = chv_post_disable_dp;
7292 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7293 } else if (IS_VALLEYVIEW(dev_priv)) {
7294 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7295 intel_encoder->pre_enable = vlv_pre_enable_dp;
7296 intel_encoder->enable = vlv_enable_dp;
7297 intel_encoder->disable = vlv_disable_dp;
7298 intel_encoder->post_disable = vlv_post_disable_dp;
7299 } else {
7300 intel_encoder->pre_enable = g4x_pre_enable_dp;
7301 intel_encoder->enable = g4x_enable_dp;
7302 intel_encoder->disable = g4x_disable_dp;
7303 intel_encoder->post_disable = g4x_post_disable_dp;
7304 }
7305
7306 intel_dig_port->dp.output_reg = output_reg;
7307 intel_dig_port->max_lanes = 4;
7308
7309 intel_encoder->type = INTEL_OUTPUT_DP;
7310 intel_encoder->power_domain = intel_port_to_power_domain(port);
7311 if (IS_CHERRYVIEW(dev_priv)) {
7312 if (port == PORT_D)
7313 intel_encoder->crtc_mask = 1 << 2;
7314 else
7315 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7316 } else {
7317 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7318 }
7319 intel_encoder->cloneable = 0;
7320 intel_encoder->port = port;
7321
7322 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7323
7324 if (port != PORT_A)
7325 intel_infoframe_init(intel_dig_port);
7326
7327 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7328 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7329 goto err_init_connector;
7330
7331 return true;
7332
7333 err_init_connector:
7334 drm_encoder_cleanup(encoder);
7335 err_encoder_init:
7336 kfree(intel_connector);
7337 err_connector_alloc:
7338 kfree(intel_dig_port);
7339 return false;
7340 }
7341
7342 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7343 {
7344 struct intel_encoder *encoder;
7345
7346 for_each_intel_encoder(&dev_priv->drm, encoder) {
7347 struct intel_dp *intel_dp;
7348
7349 if (encoder->type != INTEL_OUTPUT_DDI)
7350 continue;
7351
7352 intel_dp = enc_to_intel_dp(&encoder->base);
7353
7354 if (!intel_dp->can_mst)
7355 continue;
7356
7357 if (intel_dp->is_mst)
7358 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7359 }
7360 }
7361
7362 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7363 {
7364 struct intel_encoder *encoder;
7365
7366 for_each_intel_encoder(&dev_priv->drm, encoder) {
7367 struct intel_dp *intel_dp;
7368 int ret;
7369
7370 if (encoder->type != INTEL_OUTPUT_DDI)
7371 continue;
7372
7373 intel_dp = enc_to_intel_dp(&encoder->base);
7374
7375 if (!intel_dp->can_mst)
7376 continue;
7377
7378 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7379 if (ret) {
7380 intel_dp->is_mst = false;
7381 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7382 false);
7383 }
7384 }
7385 }