This source file includes following definitions.
- drm_dp_mst_req_type_str
- drm_dp_mst_nak_reason_str
- drm_dp_msg_header_crc4
- drm_dp_msg_data_crc4
- drm_dp_calc_sb_hdr_size
- drm_dp_encode_sideband_msg_hdr
- drm_dp_decode_sideband_msg_hdr
- drm_dp_encode_sideband_req
- drm_dp_crc_sideband_chunk_req
- drm_dp_encode_sideband_reply
- drm_dp_sideband_msg_build
- drm_dp_sideband_parse_link_address
- drm_dp_sideband_parse_remote_dpcd_read
- drm_dp_sideband_parse_remote_dpcd_write
- drm_dp_sideband_parse_remote_i2c_read_ack
- drm_dp_sideband_parse_enum_path_resources_ack
- drm_dp_sideband_parse_allocate_payload_ack
- drm_dp_sideband_parse_query_payload_ack
- drm_dp_sideband_parse_power_updown_phy_ack
- drm_dp_sideband_parse_reply
- drm_dp_sideband_parse_connection_status_notify
- drm_dp_sideband_parse_resource_status_notify
- drm_dp_sideband_parse_req
- build_dpcd_write
- build_link_address
- build_enum_path_resources
- build_allocate_payload
- build_power_updown_phy
- drm_dp_mst_assign_payload_id
- drm_dp_mst_put_payload_id
- check_txmsg_state
- drm_dp_mst_wait_tx_reply
- drm_dp_add_mst_branch_device
- drm_dp_free_mst_branch_device
- drm_dp_mst_get_mstb_malloc
- drm_dp_mst_put_mstb_malloc
- drm_dp_free_mst_port
- drm_dp_mst_get_port_malloc
- drm_dp_mst_put_port_malloc
- drm_dp_destroy_mst_branch_device
- drm_dp_mst_topology_try_get_mstb
- drm_dp_mst_topology_get_mstb
- drm_dp_mst_topology_put_mstb
- drm_dp_port_teardown_pdt
- drm_dp_destroy_port
- drm_dp_mst_topology_try_get_port
- drm_dp_mst_topology_get_port
- drm_dp_mst_topology_put_port
- drm_dp_mst_topology_get_mstb_validated_locked
- drm_dp_mst_topology_get_mstb_validated
- drm_dp_mst_topology_get_port_validated_locked
- drm_dp_mst_topology_get_port_validated
- drm_dp_get_port
- drm_dp_calculate_rad
- drm_dp_port_setup_pdt
- drm_dp_mst_dpcd_read
- drm_dp_mst_dpcd_write
- drm_dp_check_mstb_guid
- build_mst_prop_path
- drm_dp_mst_connector_late_register
- drm_dp_mst_connector_early_unregister
- drm_dp_add_port
- drm_dp_update_port
- drm_dp_get_mst_branch_device
- get_mst_branch_device_by_guid_helper
- drm_dp_get_mst_branch_device_by_guid
- drm_dp_check_and_send_link_address
- drm_dp_mst_link_probe_work
- drm_dp_validate_guid
- build_dpcd_read
- drm_dp_send_sideband_msg
- set_hdr_from_dst_qlock
- process_single_tx_qlock
- process_single_down_tx_qlock
- process_single_up_tx_qlock
- drm_dp_queue_down_tx
- drm_dp_send_link_address
- drm_dp_send_enum_path_resources
- drm_dp_get_last_connected_port_to_mstb
- drm_dp_get_last_connected_port_and_mstb
- drm_dp_payload_send_msg
- drm_dp_send_power_updown_phy
- drm_dp_create_payload_step1
- drm_dp_create_payload_step2
- drm_dp_destroy_payload_step1
- drm_dp_destroy_payload_step2
- drm_dp_update_payload_part1
- drm_dp_update_payload_part2
- drm_dp_send_dpcd_read
- drm_dp_send_dpcd_write
- drm_dp_encode_up_ack_reply
- drm_dp_send_up_ack_reply
- drm_dp_get_vc_payload_bw
- drm_dp_mst_topology_mgr_set_mst
- drm_dp_mst_topology_mgr_suspend
- drm_dp_mst_topology_mgr_resume
- drm_dp_get_one_sb_msg
- drm_dp_mst_handle_down_rep
- drm_dp_mst_handle_up_req
- drm_dp_mst_hpd_irq
- drm_dp_mst_detect_port
- drm_dp_mst_port_has_audio
- drm_dp_mst_get_edid
- drm_dp_find_vcpi_slots
- drm_dp_init_vcpi
- drm_dp_atomic_find_vcpi_slots
- drm_dp_atomic_release_vcpi_slots
- drm_dp_mst_allocate_vcpi
- drm_dp_mst_get_vcpi_slots
- drm_dp_mst_reset_vcpi_slots
- drm_dp_mst_deallocate_vcpi
- drm_dp_dpcd_write_payload
- drm_dp_check_act_status
- drm_dp_calc_pbn_mode
- test_calc_pbn_mode
- drm_dp_mst_kick_tx
- drm_dp_mst_dump_mstb
- dump_dp_payload_table
- fetch_monitor_name
- drm_dp_mst_dump_topology
- drm_dp_tx_work
- drm_dp_destroy_connector_work
- drm_dp_mst_duplicate_state
- drm_dp_mst_destroy_state
- drm_dp_mst_atomic_check_topology_state
- drm_dp_mst_atomic_check
- drm_atomic_get_mst_topology_state
- drm_dp_mst_topology_mgr_init
- drm_dp_mst_topology_mgr_destroy
- remote_i2c_read_ok
- drm_dp_mst_i2c_xfer
- drm_dp_mst_i2c_functionality
- drm_dp_mst_register_i2c_bus
- drm_dp_mst_unregister_i2c_bus
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_dp_mst_helper.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_fixed.h>
36 #include <drm/drm_print.h>
37 #include <drm/drm_probe_helper.h>
38
39 #include "drm_crtc_helper_internal.h"
40
41
42
43
44
45
46
47
48 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
49 char *buf);
50 static int test_calc_pbn_mode(void);
51
52 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
53
54 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
55 int id,
56 struct drm_dp_payload *payload);
57
58 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_port *port,
60 int offset, int size, u8 *bytes);
61 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
62 struct drm_dp_mst_port *port,
63 int offset, int size, u8 *bytes);
64
65 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
66 struct drm_dp_mst_branch *mstb);
67 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
68 struct drm_dp_mst_branch *mstb,
69 struct drm_dp_mst_port *port);
70 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
71 u8 *guid);
72
73 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
74 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
75 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
76
77 #define DP_STR(x) [DP_ ## x] = #x
78
79 static const char *drm_dp_mst_req_type_str(u8 req_type)
80 {
81 static const char * const req_type_str[] = {
82 DP_STR(GET_MSG_TRANSACTION_VERSION),
83 DP_STR(LINK_ADDRESS),
84 DP_STR(CONNECTION_STATUS_NOTIFY),
85 DP_STR(ENUM_PATH_RESOURCES),
86 DP_STR(ALLOCATE_PAYLOAD),
87 DP_STR(QUERY_PAYLOAD),
88 DP_STR(RESOURCE_STATUS_NOTIFY),
89 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
90 DP_STR(REMOTE_DPCD_READ),
91 DP_STR(REMOTE_DPCD_WRITE),
92 DP_STR(REMOTE_I2C_READ),
93 DP_STR(REMOTE_I2C_WRITE),
94 DP_STR(POWER_UP_PHY),
95 DP_STR(POWER_DOWN_PHY),
96 DP_STR(SINK_EVENT_NOTIFY),
97 DP_STR(QUERY_STREAM_ENC_STATUS),
98 };
99
100 if (req_type >= ARRAY_SIZE(req_type_str) ||
101 !req_type_str[req_type])
102 return "unknown";
103
104 return req_type_str[req_type];
105 }
106
107 #undef DP_STR
108 #define DP_STR(x) [DP_NAK_ ## x] = #x
109
110 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
111 {
112 static const char * const nak_reason_str[] = {
113 DP_STR(WRITE_FAILURE),
114 DP_STR(INVALID_READ),
115 DP_STR(CRC_FAILURE),
116 DP_STR(BAD_PARAM),
117 DP_STR(DEFER),
118 DP_STR(LINK_FAILURE),
119 DP_STR(NO_RESOURCES),
120 DP_STR(DPCD_FAIL),
121 DP_STR(I2C_NAK),
122 DP_STR(ALLOCATE_FAIL),
123 };
124
125 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
126 !nak_reason_str[nak_reason])
127 return "unknown";
128
129 return nak_reason_str[nak_reason];
130 }
131
132 #undef DP_STR
133
134
135 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
136 {
137 u8 bitmask = 0x80;
138 u8 bitshift = 7;
139 u8 array_index = 0;
140 int number_of_bits = num_nibbles * 4;
141 u8 remainder = 0;
142
143 while (number_of_bits != 0) {
144 number_of_bits--;
145 remainder <<= 1;
146 remainder |= (data[array_index] & bitmask) >> bitshift;
147 bitmask >>= 1;
148 bitshift--;
149 if (bitmask == 0) {
150 bitmask = 0x80;
151 bitshift = 7;
152 array_index++;
153 }
154 if ((remainder & 0x10) == 0x10)
155 remainder ^= 0x13;
156 }
157
158 number_of_bits = 4;
159 while (number_of_bits != 0) {
160 number_of_bits--;
161 remainder <<= 1;
162 if ((remainder & 0x10) != 0)
163 remainder ^= 0x13;
164 }
165
166 return remainder;
167 }
168
169 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
170 {
171 u8 bitmask = 0x80;
172 u8 bitshift = 7;
173 u8 array_index = 0;
174 int number_of_bits = number_of_bytes * 8;
175 u16 remainder = 0;
176
177 while (number_of_bits != 0) {
178 number_of_bits--;
179 remainder <<= 1;
180 remainder |= (data[array_index] & bitmask) >> bitshift;
181 bitmask >>= 1;
182 bitshift--;
183 if (bitmask == 0) {
184 bitmask = 0x80;
185 bitshift = 7;
186 array_index++;
187 }
188 if ((remainder & 0x100) == 0x100)
189 remainder ^= 0xd5;
190 }
191
192 number_of_bits = 8;
193 while (number_of_bits != 0) {
194 number_of_bits--;
195 remainder <<= 1;
196 if ((remainder & 0x100) != 0)
197 remainder ^= 0xd5;
198 }
199
200 return remainder & 0xff;
201 }
202 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
203 {
204 u8 size = 3;
205 size += (hdr->lct / 2);
206 return size;
207 }
208
209 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
210 u8 *buf, int *len)
211 {
212 int idx = 0;
213 int i;
214 u8 crc4;
215 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
216 for (i = 0; i < (hdr->lct / 2); i++)
217 buf[idx++] = hdr->rad[i];
218 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
219 (hdr->msg_len & 0x3f);
220 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
221
222 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
223 buf[idx - 1] |= (crc4 & 0xf);
224
225 *len = idx;
226 }
227
228 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
229 u8 *buf, int buflen, u8 *hdrlen)
230 {
231 u8 crc4;
232 u8 len;
233 int i;
234 u8 idx;
235 if (buf[0] == 0)
236 return false;
237 len = 3;
238 len += ((buf[0] & 0xf0) >> 4) / 2;
239 if (len > buflen)
240 return false;
241 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
242
243 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
244 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
245 return false;
246 }
247
248 hdr->lct = (buf[0] & 0xf0) >> 4;
249 hdr->lcr = (buf[0] & 0xf);
250 idx = 1;
251 for (i = 0; i < (hdr->lct / 2); i++)
252 hdr->rad[i] = buf[idx++];
253 hdr->broadcast = (buf[idx] >> 7) & 0x1;
254 hdr->path_msg = (buf[idx] >> 6) & 0x1;
255 hdr->msg_len = buf[idx] & 0x3f;
256 idx++;
257 hdr->somt = (buf[idx] >> 7) & 0x1;
258 hdr->eomt = (buf[idx] >> 6) & 0x1;
259 hdr->seqno = (buf[idx] >> 4) & 0x1;
260 idx++;
261 *hdrlen = idx;
262 return true;
263 }
264
265 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
266 struct drm_dp_sideband_msg_tx *raw)
267 {
268 int idx = 0;
269 int i;
270 u8 *buf = raw->msg;
271 buf[idx++] = req->req_type & 0x7f;
272
273 switch (req->req_type) {
274 case DP_ENUM_PATH_RESOURCES:
275 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
276 idx++;
277 break;
278 case DP_ALLOCATE_PAYLOAD:
279 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
280 (req->u.allocate_payload.number_sdp_streams & 0xf);
281 idx++;
282 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
283 idx++;
284 buf[idx] = (req->u.allocate_payload.pbn >> 8);
285 idx++;
286 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
287 idx++;
288 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
289 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
290 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
291 idx++;
292 }
293 if (req->u.allocate_payload.number_sdp_streams & 1) {
294 i = req->u.allocate_payload.number_sdp_streams - 1;
295 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
296 idx++;
297 }
298 break;
299 case DP_QUERY_PAYLOAD:
300 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
301 idx++;
302 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
303 idx++;
304 break;
305 case DP_REMOTE_DPCD_READ:
306 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
307 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
308 idx++;
309 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
310 idx++;
311 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
312 idx++;
313 buf[idx] = (req->u.dpcd_read.num_bytes);
314 idx++;
315 break;
316
317 case DP_REMOTE_DPCD_WRITE:
318 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
319 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
320 idx++;
321 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
322 idx++;
323 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
324 idx++;
325 buf[idx] = (req->u.dpcd_write.num_bytes);
326 idx++;
327 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
328 idx += req->u.dpcd_write.num_bytes;
329 break;
330 case DP_REMOTE_I2C_READ:
331 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
332 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
333 idx++;
334 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
335 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
336 idx++;
337 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
338 idx++;
339 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
340 idx += req->u.i2c_read.transactions[i].num_bytes;
341
342 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
343 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
344 idx++;
345 }
346 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
347 idx++;
348 buf[idx] = (req->u.i2c_read.num_bytes_read);
349 idx++;
350 break;
351
352 case DP_REMOTE_I2C_WRITE:
353 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
354 idx++;
355 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
356 idx++;
357 buf[idx] = (req->u.i2c_write.num_bytes);
358 idx++;
359 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
360 idx += req->u.i2c_write.num_bytes;
361 break;
362
363 case DP_POWER_DOWN_PHY:
364 case DP_POWER_UP_PHY:
365 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
366 idx++;
367 break;
368 }
369 raw->cur_len = idx;
370 }
371
372 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
373 {
374 u8 crc4;
375 crc4 = drm_dp_msg_data_crc4(msg, len);
376 msg[len] = crc4;
377 }
378
379 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
380 struct drm_dp_sideband_msg_tx *raw)
381 {
382 int idx = 0;
383 u8 *buf = raw->msg;
384
385 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
386
387 raw->cur_len = idx;
388 }
389
390
391 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
392 u8 *replybuf, u8 replybuflen, bool hdr)
393 {
394 int ret;
395 u8 crc4;
396
397 if (hdr) {
398 u8 hdrlen;
399 struct drm_dp_sideband_msg_hdr recv_hdr;
400 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
401 if (ret == false) {
402 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
403 return false;
404 }
405
406
407
408
409
410 if (!recv_hdr.somt && !msg->have_somt)
411 return false;
412
413
414 msg->curchunk_len = recv_hdr.msg_len;
415 msg->curchunk_hdrlen = hdrlen;
416
417
418 if (recv_hdr.somt && msg->have_somt)
419 return false;
420
421 if (recv_hdr.somt) {
422 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
423 msg->have_somt = true;
424 }
425 if (recv_hdr.eomt)
426 msg->have_eomt = true;
427
428
429 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
430 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
431 } else {
432 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
433 msg->curchunk_idx += replybuflen;
434 }
435
436 if (msg->curchunk_idx >= msg->curchunk_len) {
437
438 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
439
440 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
441 msg->curlen += msg->curchunk_len - 1;
442 }
443 return true;
444 }
445
446 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
447 struct drm_dp_sideband_msg_reply_body *repmsg)
448 {
449 int idx = 1;
450 int i;
451 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
452 idx += 16;
453 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
454 idx++;
455 if (idx > raw->curlen)
456 goto fail_len;
457 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
458 if (raw->msg[idx] & 0x80)
459 repmsg->u.link_addr.ports[i].input_port = 1;
460
461 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
462 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
463
464 idx++;
465 if (idx > raw->curlen)
466 goto fail_len;
467 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
468 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
469 if (repmsg->u.link_addr.ports[i].input_port == 0)
470 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
471 idx++;
472 if (idx > raw->curlen)
473 goto fail_len;
474 if (repmsg->u.link_addr.ports[i].input_port == 0) {
475 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
476 idx++;
477 if (idx > raw->curlen)
478 goto fail_len;
479 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
480 idx += 16;
481 if (idx > raw->curlen)
482 goto fail_len;
483 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
484 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
485 idx++;
486
487 }
488 if (idx > raw->curlen)
489 goto fail_len;
490 }
491
492 return true;
493 fail_len:
494 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
495 return false;
496 }
497
498 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
499 struct drm_dp_sideband_msg_reply_body *repmsg)
500 {
501 int idx = 1;
502 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
503 idx++;
504 if (idx > raw->curlen)
505 goto fail_len;
506 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
507 idx++;
508 if (idx > raw->curlen)
509 goto fail_len;
510
511 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
512 return true;
513 fail_len:
514 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
515 return false;
516 }
517
518 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
519 struct drm_dp_sideband_msg_reply_body *repmsg)
520 {
521 int idx = 1;
522 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
523 idx++;
524 if (idx > raw->curlen)
525 goto fail_len;
526 return true;
527 fail_len:
528 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
529 return false;
530 }
531
532 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
533 struct drm_dp_sideband_msg_reply_body *repmsg)
534 {
535 int idx = 1;
536
537 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
538 idx++;
539 if (idx > raw->curlen)
540 goto fail_len;
541 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
542 idx++;
543
544 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
545 return true;
546 fail_len:
547 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
548 return false;
549 }
550
551 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
552 struct drm_dp_sideband_msg_reply_body *repmsg)
553 {
554 int idx = 1;
555 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
556 idx++;
557 if (idx > raw->curlen)
558 goto fail_len;
559 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
560 idx += 2;
561 if (idx > raw->curlen)
562 goto fail_len;
563 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
564 idx += 2;
565 if (idx > raw->curlen)
566 goto fail_len;
567 return true;
568 fail_len:
569 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
570 return false;
571 }
572
573 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
574 struct drm_dp_sideband_msg_reply_body *repmsg)
575 {
576 int idx = 1;
577 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
578 idx++;
579 if (idx > raw->curlen)
580 goto fail_len;
581 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
582 idx++;
583 if (idx > raw->curlen)
584 goto fail_len;
585 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
586 idx += 2;
587 if (idx > raw->curlen)
588 goto fail_len;
589 return true;
590 fail_len:
591 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
592 return false;
593 }
594
595 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
596 struct drm_dp_sideband_msg_reply_body *repmsg)
597 {
598 int idx = 1;
599 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
600 idx++;
601 if (idx > raw->curlen)
602 goto fail_len;
603 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
604 idx += 2;
605 if (idx > raw->curlen)
606 goto fail_len;
607 return true;
608 fail_len:
609 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
610 return false;
611 }
612
613 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
614 struct drm_dp_sideband_msg_reply_body *repmsg)
615 {
616 int idx = 1;
617
618 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
619 idx++;
620 if (idx > raw->curlen) {
621 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
622 idx, raw->curlen);
623 return false;
624 }
625 return true;
626 }
627
628 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
629 struct drm_dp_sideband_msg_reply_body *msg)
630 {
631 memset(msg, 0, sizeof(*msg));
632 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
633 msg->req_type = (raw->msg[0] & 0x7f);
634
635 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
636 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
637 msg->u.nak.reason = raw->msg[17];
638 msg->u.nak.nak_data = raw->msg[18];
639 return false;
640 }
641
642 switch (msg->req_type) {
643 case DP_LINK_ADDRESS:
644 return drm_dp_sideband_parse_link_address(raw, msg);
645 case DP_QUERY_PAYLOAD:
646 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
647 case DP_REMOTE_DPCD_READ:
648 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
649 case DP_REMOTE_DPCD_WRITE:
650 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
651 case DP_REMOTE_I2C_READ:
652 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
653 case DP_ENUM_PATH_RESOURCES:
654 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
655 case DP_ALLOCATE_PAYLOAD:
656 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
657 case DP_POWER_DOWN_PHY:
658 case DP_POWER_UP_PHY:
659 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
660 default:
661 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
662 drm_dp_mst_req_type_str(msg->req_type));
663 return false;
664 }
665 }
666
667 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
668 struct drm_dp_sideband_msg_req_body *msg)
669 {
670 int idx = 1;
671
672 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
673 idx++;
674 if (idx > raw->curlen)
675 goto fail_len;
676
677 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
678 idx += 16;
679 if (idx > raw->curlen)
680 goto fail_len;
681
682 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
683 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
684 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
685 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
686 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
687 idx++;
688 return true;
689 fail_len:
690 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
691 return false;
692 }
693
694 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
695 struct drm_dp_sideband_msg_req_body *msg)
696 {
697 int idx = 1;
698
699 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
700 idx++;
701 if (idx > raw->curlen)
702 goto fail_len;
703
704 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
705 idx += 16;
706 if (idx > raw->curlen)
707 goto fail_len;
708
709 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
710 idx++;
711 return true;
712 fail_len:
713 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
714 return false;
715 }
716
717 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
718 struct drm_dp_sideband_msg_req_body *msg)
719 {
720 memset(msg, 0, sizeof(*msg));
721 msg->req_type = (raw->msg[0] & 0x7f);
722
723 switch (msg->req_type) {
724 case DP_CONNECTION_STATUS_NOTIFY:
725 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
726 case DP_RESOURCE_STATUS_NOTIFY:
727 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
728 default:
729 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
730 drm_dp_mst_req_type_str(msg->req_type));
731 return false;
732 }
733 }
734
735 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
736 {
737 struct drm_dp_sideband_msg_req_body req;
738
739 req.req_type = DP_REMOTE_DPCD_WRITE;
740 req.u.dpcd_write.port_number = port_num;
741 req.u.dpcd_write.dpcd_address = offset;
742 req.u.dpcd_write.num_bytes = num_bytes;
743 req.u.dpcd_write.bytes = bytes;
744 drm_dp_encode_sideband_req(&req, msg);
745
746 return 0;
747 }
748
749 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
750 {
751 struct drm_dp_sideband_msg_req_body req;
752
753 req.req_type = DP_LINK_ADDRESS;
754 drm_dp_encode_sideband_req(&req, msg);
755 return 0;
756 }
757
758 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
759 {
760 struct drm_dp_sideband_msg_req_body req;
761
762 req.req_type = DP_ENUM_PATH_RESOURCES;
763 req.u.port_num.port_number = port_num;
764 drm_dp_encode_sideband_req(&req, msg);
765 msg->path_msg = true;
766 return 0;
767 }
768
769 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
770 u8 vcpi, uint16_t pbn,
771 u8 number_sdp_streams,
772 u8 *sdp_stream_sink)
773 {
774 struct drm_dp_sideband_msg_req_body req;
775 memset(&req, 0, sizeof(req));
776 req.req_type = DP_ALLOCATE_PAYLOAD;
777 req.u.allocate_payload.port_number = port_num;
778 req.u.allocate_payload.vcpi = vcpi;
779 req.u.allocate_payload.pbn = pbn;
780 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
781 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
782 number_sdp_streams);
783 drm_dp_encode_sideband_req(&req, msg);
784 msg->path_msg = true;
785 return 0;
786 }
787
788 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
789 int port_num, bool power_up)
790 {
791 struct drm_dp_sideband_msg_req_body req;
792
793 if (power_up)
794 req.req_type = DP_POWER_UP_PHY;
795 else
796 req.req_type = DP_POWER_DOWN_PHY;
797
798 req.u.port_num.port_number = port_num;
799 drm_dp_encode_sideband_req(&req, msg);
800 msg->path_msg = true;
801 return 0;
802 }
803
804 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
805 struct drm_dp_vcpi *vcpi)
806 {
807 int ret, vcpi_ret;
808
809 mutex_lock(&mgr->payload_lock);
810 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
811 if (ret > mgr->max_payloads) {
812 ret = -EINVAL;
813 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
814 goto out_unlock;
815 }
816
817 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
818 if (vcpi_ret > mgr->max_payloads) {
819 ret = -EINVAL;
820 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
821 goto out_unlock;
822 }
823
824 set_bit(ret, &mgr->payload_mask);
825 set_bit(vcpi_ret, &mgr->vcpi_mask);
826 vcpi->vcpi = vcpi_ret + 1;
827 mgr->proposed_vcpis[ret - 1] = vcpi;
828 out_unlock:
829 mutex_unlock(&mgr->payload_lock);
830 return ret;
831 }
832
833 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
834 int vcpi)
835 {
836 int i;
837 if (vcpi == 0)
838 return;
839
840 mutex_lock(&mgr->payload_lock);
841 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
842 clear_bit(vcpi - 1, &mgr->vcpi_mask);
843
844 for (i = 0; i < mgr->max_payloads; i++) {
845 if (mgr->proposed_vcpis[i])
846 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
847 mgr->proposed_vcpis[i] = NULL;
848 clear_bit(i + 1, &mgr->payload_mask);
849 }
850 }
851 mutex_unlock(&mgr->payload_lock);
852 }
853
854 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
855 struct drm_dp_sideband_msg_tx *txmsg)
856 {
857 unsigned int state;
858
859
860
861
862
863
864 state = READ_ONCE(txmsg->state);
865 return (state == DRM_DP_SIDEBAND_TX_RX ||
866 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
867 }
868
869 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
870 struct drm_dp_sideband_msg_tx *txmsg)
871 {
872 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
873 int ret;
874
875 ret = wait_event_timeout(mgr->tx_waitq,
876 check_txmsg_state(mgr, txmsg),
877 (4 * HZ));
878 mutex_lock(&mstb->mgr->qlock);
879 if (ret > 0) {
880 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
881 ret = -EIO;
882 goto out;
883 }
884 } else {
885 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
886
887
888 ret = -EIO;
889
890
891 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
892 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
893 list_del(&txmsg->next);
894 }
895
896 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
897 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
898 mstb->tx_slots[txmsg->seqno] = NULL;
899 }
900 }
901 out:
902 mutex_unlock(&mgr->qlock);
903
904 return ret;
905 }
906
907 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
908 {
909 struct drm_dp_mst_branch *mstb;
910
911 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
912 if (!mstb)
913 return NULL;
914
915 mstb->lct = lct;
916 if (lct > 1)
917 memcpy(mstb->rad, rad, lct / 2);
918 INIT_LIST_HEAD(&mstb->ports);
919 kref_init(&mstb->topology_kref);
920 kref_init(&mstb->malloc_kref);
921 return mstb;
922 }
923
924 static void drm_dp_free_mst_branch_device(struct kref *kref)
925 {
926 struct drm_dp_mst_branch *mstb =
927 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
928
929 if (mstb->port_parent)
930 drm_dp_mst_put_port_malloc(mstb->port_parent);
931
932 kfree(mstb);
933 }
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 static void
1035 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1036 {
1037 kref_get(&mstb->malloc_kref);
1038 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1039 }
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 static void
1053 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1054 {
1055 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1056 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1057 }
1058
1059 static void drm_dp_free_mst_port(struct kref *kref)
1060 {
1061 struct drm_dp_mst_port *port =
1062 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1063
1064 drm_dp_mst_put_mstb_malloc(port->parent);
1065 kfree(port);
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 void
1086 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1087 {
1088 kref_get(&port->malloc_kref);
1089 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1090 }
1091 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 void
1104 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1105 {
1106 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1107 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1108 }
1109 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1110
1111 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1112 {
1113 struct drm_dp_mst_branch *mstb =
1114 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1115 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1116 struct drm_dp_mst_port *port, *tmp;
1117 bool wake_tx = false;
1118
1119 mutex_lock(&mgr->lock);
1120 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1121 list_del(&port->next);
1122 drm_dp_mst_topology_put_port(port);
1123 }
1124 mutex_unlock(&mgr->lock);
1125
1126
1127 mutex_lock(&mstb->mgr->qlock);
1128 if (mstb->tx_slots[0]) {
1129 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1130 mstb->tx_slots[0] = NULL;
1131 wake_tx = true;
1132 }
1133 if (mstb->tx_slots[1]) {
1134 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1135 mstb->tx_slots[1] = NULL;
1136 wake_tx = true;
1137 }
1138 mutex_unlock(&mstb->mgr->qlock);
1139
1140 if (wake_tx)
1141 wake_up_all(&mstb->mgr->tx_waitq);
1142
1143 drm_dp_mst_put_mstb_malloc(mstb);
1144 }
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 static int __must_check
1169 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1170 {
1171 int ret = kref_get_unless_zero(&mstb->topology_kref);
1172
1173 if (ret)
1174 DRM_DEBUG("mstb %p (%d)\n", mstb,
1175 kref_read(&mstb->topology_kref));
1176
1177 return ret;
1178 }
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1195 {
1196 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1197 kref_get(&mstb->topology_kref);
1198 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1199 }
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 static void
1214 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1215 {
1216 DRM_DEBUG("mstb %p (%d)\n",
1217 mstb, kref_read(&mstb->topology_kref) - 1);
1218 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1219 }
1220
1221 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1222 {
1223 struct drm_dp_mst_branch *mstb;
1224
1225 switch (old_pdt) {
1226 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1227 case DP_PEER_DEVICE_SST_SINK:
1228
1229 drm_dp_mst_unregister_i2c_bus(&port->aux);
1230 break;
1231 case DP_PEER_DEVICE_MST_BRANCHING:
1232 mstb = port->mstb;
1233 port->mstb = NULL;
1234 drm_dp_mst_topology_put_mstb(mstb);
1235 break;
1236 }
1237 }
1238
1239 static void drm_dp_destroy_port(struct kref *kref)
1240 {
1241 struct drm_dp_mst_port *port =
1242 container_of(kref, struct drm_dp_mst_port, topology_kref);
1243 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1244
1245 if (!port->input) {
1246 kfree(port->cached_edid);
1247
1248
1249
1250
1251
1252
1253 if (port->connector) {
1254
1255
1256
1257
1258 mutex_lock(&mgr->destroy_connector_lock);
1259 list_add(&port->next, &mgr->destroy_connector_list);
1260 mutex_unlock(&mgr->destroy_connector_lock);
1261 schedule_work(&mgr->destroy_connector_work);
1262 return;
1263 }
1264
1265
1266 drm_dp_port_teardown_pdt(port, port->pdt);
1267 port->pdt = DP_PEER_DEVICE_NONE;
1268 }
1269 drm_dp_mst_put_port_malloc(port);
1270 }
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 static int __must_check
1295 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1296 {
1297 int ret = kref_get_unless_zero(&port->topology_kref);
1298
1299 if (ret)
1300 DRM_DEBUG("port %p (%d)\n", port,
1301 kref_read(&port->topology_kref));
1302
1303 return ret;
1304 }
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1320 {
1321 WARN_ON(kref_read(&port->topology_kref) == 0);
1322 kref_get(&port->topology_kref);
1323 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1324 }
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1338 {
1339 DRM_DEBUG("port %p (%d)\n",
1340 port, kref_read(&port->topology_kref) - 1);
1341 kref_put(&port->topology_kref, drm_dp_destroy_port);
1342 }
1343
1344 static struct drm_dp_mst_branch *
1345 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1346 struct drm_dp_mst_branch *to_find)
1347 {
1348 struct drm_dp_mst_port *port;
1349 struct drm_dp_mst_branch *rmstb;
1350
1351 if (to_find == mstb)
1352 return mstb;
1353
1354 list_for_each_entry(port, &mstb->ports, next) {
1355 if (port->mstb) {
1356 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1357 port->mstb, to_find);
1358 if (rmstb)
1359 return rmstb;
1360 }
1361 }
1362 return NULL;
1363 }
1364
1365 static struct drm_dp_mst_branch *
1366 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1367 struct drm_dp_mst_branch *mstb)
1368 {
1369 struct drm_dp_mst_branch *rmstb = NULL;
1370
1371 mutex_lock(&mgr->lock);
1372 if (mgr->mst_primary) {
1373 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1374 mgr->mst_primary, mstb);
1375
1376 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1377 rmstb = NULL;
1378 }
1379 mutex_unlock(&mgr->lock);
1380 return rmstb;
1381 }
1382
1383 static struct drm_dp_mst_port *
1384 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1385 struct drm_dp_mst_port *to_find)
1386 {
1387 struct drm_dp_mst_port *port, *mport;
1388
1389 list_for_each_entry(port, &mstb->ports, next) {
1390 if (port == to_find)
1391 return port;
1392
1393 if (port->mstb) {
1394 mport = drm_dp_mst_topology_get_port_validated_locked(
1395 port->mstb, to_find);
1396 if (mport)
1397 return mport;
1398 }
1399 }
1400 return NULL;
1401 }
1402
1403 static struct drm_dp_mst_port *
1404 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1405 struct drm_dp_mst_port *port)
1406 {
1407 struct drm_dp_mst_port *rport = NULL;
1408
1409 mutex_lock(&mgr->lock);
1410 if (mgr->mst_primary) {
1411 rport = drm_dp_mst_topology_get_port_validated_locked(
1412 mgr->mst_primary, port);
1413
1414 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1415 rport = NULL;
1416 }
1417 mutex_unlock(&mgr->lock);
1418 return rport;
1419 }
1420
1421 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1422 {
1423 struct drm_dp_mst_port *port;
1424 int ret;
1425
1426 list_for_each_entry(port, &mstb->ports, next) {
1427 if (port->port_num == port_num) {
1428 ret = drm_dp_mst_topology_try_get_port(port);
1429 return ret ? port : NULL;
1430 }
1431 }
1432
1433 return NULL;
1434 }
1435
1436
1437
1438
1439
1440
1441 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1442 u8 *rad)
1443 {
1444 int parent_lct = port->parent->lct;
1445 int shift = 4;
1446 int idx = (parent_lct - 1) / 2;
1447 if (parent_lct > 1) {
1448 memcpy(rad, port->parent->rad, idx + 1);
1449 shift = (parent_lct % 2) ? 4 : 0;
1450 } else
1451 rad[0] = 0;
1452
1453 rad[idx] |= port->port_num << shift;
1454 return parent_lct + 1;
1455 }
1456
1457
1458
1459
1460 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1461 {
1462 int ret;
1463 u8 rad[6], lct;
1464 bool send_link = false;
1465 switch (port->pdt) {
1466 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1467 case DP_PEER_DEVICE_SST_SINK:
1468
1469 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1470 break;
1471 case DP_PEER_DEVICE_MST_BRANCHING:
1472 lct = drm_dp_calculate_rad(port, rad);
1473
1474 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1475 if (port->mstb) {
1476 port->mstb->mgr = port->mgr;
1477 port->mstb->port_parent = port;
1478
1479
1480
1481
1482 drm_dp_mst_get_port_malloc(port);
1483
1484 send_link = true;
1485 }
1486 break;
1487 }
1488 return send_link;
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
1505 unsigned int offset, void *buffer, size_t size)
1506 {
1507 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1508 aux);
1509
1510 return drm_dp_send_dpcd_read(port->mgr, port,
1511 offset, size, buffer);
1512 }
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
1528 unsigned int offset, void *buffer, size_t size)
1529 {
1530 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1531 aux);
1532
1533 return drm_dp_send_dpcd_write(port->mgr, port,
1534 offset, size, buffer);
1535 }
1536
1537 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1538 {
1539 int ret;
1540
1541 memcpy(mstb->guid, guid, 16);
1542
1543 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1544 if (mstb->port_parent) {
1545 ret = drm_dp_send_dpcd_write(
1546 mstb->mgr,
1547 mstb->port_parent,
1548 DP_GUID,
1549 16,
1550 mstb->guid);
1551 } else {
1552
1553 ret = drm_dp_dpcd_write(
1554 mstb->mgr->aux,
1555 DP_GUID,
1556 mstb->guid,
1557 16);
1558 }
1559 }
1560 }
1561
1562 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1563 int pnum,
1564 char *proppath,
1565 size_t proppath_size)
1566 {
1567 int i;
1568 char temp[8];
1569 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1570 for (i = 0; i < (mstb->lct - 1); i++) {
1571 int shift = (i % 2) ? 0 : 4;
1572 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1573 snprintf(temp, sizeof(temp), "-%d", port_num);
1574 strlcat(proppath, temp, proppath_size);
1575 }
1576 snprintf(temp, sizeof(temp), "-%d", pnum);
1577 strlcat(proppath, temp, proppath_size);
1578 }
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
1592 struct drm_dp_mst_port *port)
1593 {
1594 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
1595 port->aux.name, connector->kdev->kobj.name);
1596
1597 port->aux.dev = connector->kdev;
1598 return drm_dp_aux_register_devnode(&port->aux);
1599 }
1600 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
1612 struct drm_dp_mst_port *port)
1613 {
1614 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
1615 port->aux.name, connector->kdev->kobj.name);
1616 drm_dp_aux_unregister_devnode(&port->aux);
1617 }
1618 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
1619
1620 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1621 struct drm_device *dev,
1622 struct drm_dp_link_addr_reply_port *port_msg)
1623 {
1624 struct drm_dp_mst_port *port;
1625 bool ret;
1626 bool created = false;
1627 int old_pdt = 0;
1628 int old_ddps = 0;
1629
1630 port = drm_dp_get_port(mstb, port_msg->port_number);
1631 if (!port) {
1632 port = kzalloc(sizeof(*port), GFP_KERNEL);
1633 if (!port)
1634 return;
1635 kref_init(&port->topology_kref);
1636 kref_init(&port->malloc_kref);
1637 port->parent = mstb;
1638 port->port_num = port_msg->port_number;
1639 port->mgr = mstb->mgr;
1640 port->aux.name = "DPMST";
1641 port->aux.dev = dev->dev;
1642 port->aux.is_remote = true;
1643
1644
1645
1646
1647
1648 drm_dp_mst_get_mstb_malloc(mstb);
1649
1650 created = true;
1651 } else {
1652 old_pdt = port->pdt;
1653 old_ddps = port->ddps;
1654 }
1655
1656 port->pdt = port_msg->peer_device_type;
1657 port->input = port_msg->input_port;
1658 port->mcs = port_msg->mcs;
1659 port->ddps = port_msg->ddps;
1660 port->ldps = port_msg->legacy_device_plug_status;
1661 port->dpcd_rev = port_msg->dpcd_revision;
1662 port->num_sdp_streams = port_msg->num_sdp_streams;
1663 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1664
1665
1666
1667 if (created) {
1668 mutex_lock(&mstb->mgr->lock);
1669 drm_dp_mst_topology_get_port(port);
1670 list_add(&port->next, &mstb->ports);
1671 mutex_unlock(&mstb->mgr->lock);
1672 }
1673
1674 if (old_ddps != port->ddps) {
1675 if (port->ddps) {
1676 if (!port->input) {
1677 drm_dp_send_enum_path_resources(mstb->mgr,
1678 mstb, port);
1679 }
1680 } else {
1681 port->available_pbn = 0;
1682 }
1683 }
1684
1685 if (old_pdt != port->pdt && !port->input) {
1686 drm_dp_port_teardown_pdt(port, old_pdt);
1687
1688 ret = drm_dp_port_setup_pdt(port);
1689 if (ret == true)
1690 drm_dp_send_link_address(mstb->mgr, port->mstb);
1691 }
1692
1693 if (created && !port->input) {
1694 char proppath[255];
1695
1696 build_mst_prop_path(mstb, port->port_num, proppath,
1697 sizeof(proppath));
1698 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1699 port,
1700 proppath);
1701 if (!port->connector) {
1702
1703 mutex_lock(&mstb->mgr->lock);
1704 list_del(&port->next);
1705 mutex_unlock(&mstb->mgr->lock);
1706
1707 drm_dp_mst_topology_put_port(port);
1708 goto out;
1709 }
1710 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1711 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1712 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1713 port->cached_edid = drm_get_edid(port->connector,
1714 &port->aux.ddc);
1715 drm_connector_set_tile_property(port->connector);
1716 }
1717 (*mstb->mgr->cbs->register_connector)(port->connector);
1718 }
1719
1720 out:
1721
1722 drm_dp_mst_topology_put_port(port);
1723 }
1724
1725 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1726 struct drm_dp_connection_status_notify *conn_stat)
1727 {
1728 struct drm_dp_mst_port *port;
1729 int old_pdt;
1730 int old_ddps;
1731 bool dowork = false;
1732 port = drm_dp_get_port(mstb, conn_stat->port_number);
1733 if (!port)
1734 return;
1735
1736 old_ddps = port->ddps;
1737 old_pdt = port->pdt;
1738 port->pdt = conn_stat->peer_device_type;
1739 port->mcs = conn_stat->message_capability_status;
1740 port->ldps = conn_stat->legacy_device_plug_status;
1741 port->ddps = conn_stat->displayport_device_plug_status;
1742
1743 if (old_ddps != port->ddps) {
1744 if (port->ddps) {
1745 dowork = true;
1746 } else {
1747 port->available_pbn = 0;
1748 }
1749 }
1750 if (old_pdt != port->pdt && !port->input) {
1751 drm_dp_port_teardown_pdt(port, old_pdt);
1752
1753 if (drm_dp_port_setup_pdt(port))
1754 dowork = true;
1755 }
1756
1757 drm_dp_mst_topology_put_port(port);
1758 if (dowork)
1759 queue_work(system_long_wq, &mstb->mgr->work);
1760
1761 }
1762
1763 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1764 u8 lct, u8 *rad)
1765 {
1766 struct drm_dp_mst_branch *mstb;
1767 struct drm_dp_mst_port *port;
1768 int i, ret;
1769
1770
1771 mutex_lock(&mgr->lock);
1772 mstb = mgr->mst_primary;
1773
1774 if (!mstb)
1775 goto out;
1776
1777 for (i = 0; i < lct - 1; i++) {
1778 int shift = (i % 2) ? 0 : 4;
1779 int port_num = (rad[i / 2] >> shift) & 0xf;
1780
1781 list_for_each_entry(port, &mstb->ports, next) {
1782 if (port->port_num == port_num) {
1783 mstb = port->mstb;
1784 if (!mstb) {
1785 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1786 goto out;
1787 }
1788
1789 break;
1790 }
1791 }
1792 }
1793 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1794 if (!ret)
1795 mstb = NULL;
1796 out:
1797 mutex_unlock(&mgr->lock);
1798 return mstb;
1799 }
1800
1801 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1802 struct drm_dp_mst_branch *mstb,
1803 uint8_t *guid)
1804 {
1805 struct drm_dp_mst_branch *found_mstb;
1806 struct drm_dp_mst_port *port;
1807
1808 if (memcmp(mstb->guid, guid, 16) == 0)
1809 return mstb;
1810
1811
1812 list_for_each_entry(port, &mstb->ports, next) {
1813 if (!port->mstb)
1814 continue;
1815
1816 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1817
1818 if (found_mstb)
1819 return found_mstb;
1820 }
1821
1822 return NULL;
1823 }
1824
1825 static struct drm_dp_mst_branch *
1826 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1827 uint8_t *guid)
1828 {
1829 struct drm_dp_mst_branch *mstb;
1830 int ret;
1831
1832
1833 mutex_lock(&mgr->lock);
1834
1835 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1836 if (mstb) {
1837 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1838 if (!ret)
1839 mstb = NULL;
1840 }
1841
1842 mutex_unlock(&mgr->lock);
1843 return mstb;
1844 }
1845
1846 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1847 struct drm_dp_mst_branch *mstb)
1848 {
1849 struct drm_dp_mst_port *port;
1850 struct drm_dp_mst_branch *mstb_child;
1851 if (!mstb->link_address_sent)
1852 drm_dp_send_link_address(mgr, mstb);
1853
1854 list_for_each_entry(port, &mstb->ports, next) {
1855 if (port->input)
1856 continue;
1857
1858 if (!port->ddps)
1859 continue;
1860
1861 if (!port->available_pbn)
1862 drm_dp_send_enum_path_resources(mgr, mstb, port);
1863
1864 if (port->mstb) {
1865 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1866 mgr, port->mstb);
1867 if (mstb_child) {
1868 drm_dp_check_and_send_link_address(mgr, mstb_child);
1869 drm_dp_mst_topology_put_mstb(mstb_child);
1870 }
1871 }
1872 }
1873 }
1874
1875 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1876 {
1877 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1878 struct drm_dp_mst_branch *mstb;
1879 int ret;
1880
1881 mutex_lock(&mgr->lock);
1882 mstb = mgr->mst_primary;
1883 if (mstb) {
1884 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1885 if (!ret)
1886 mstb = NULL;
1887 }
1888 mutex_unlock(&mgr->lock);
1889 if (mstb) {
1890 drm_dp_check_and_send_link_address(mgr, mstb);
1891 drm_dp_mst_topology_put_mstb(mstb);
1892 }
1893 }
1894
1895 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1896 u8 *guid)
1897 {
1898 u64 salt;
1899
1900 if (memchr_inv(guid, 0, 16))
1901 return true;
1902
1903 salt = get_jiffies_64();
1904
1905 memcpy(&guid[0], &salt, sizeof(u64));
1906 memcpy(&guid[8], &salt, sizeof(u64));
1907
1908 return false;
1909 }
1910
1911 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1912 {
1913 struct drm_dp_sideband_msg_req_body req;
1914
1915 req.req_type = DP_REMOTE_DPCD_READ;
1916 req.u.dpcd_read.port_number = port_num;
1917 req.u.dpcd_read.dpcd_address = offset;
1918 req.u.dpcd_read.num_bytes = num_bytes;
1919 drm_dp_encode_sideband_req(&req, msg);
1920
1921 return 0;
1922 }
1923
1924 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1925 bool up, u8 *msg, int len)
1926 {
1927 int ret;
1928 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1929 int tosend, total, offset;
1930 int retries = 0;
1931
1932 retry:
1933 total = len;
1934 offset = 0;
1935 do {
1936 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1937
1938 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1939 &msg[offset],
1940 tosend);
1941 if (ret != tosend) {
1942 if (ret == -EIO && retries < 5) {
1943 retries++;
1944 goto retry;
1945 }
1946 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1947
1948 return -EIO;
1949 }
1950 offset += tosend;
1951 total -= tosend;
1952 } while (total > 0);
1953 return 0;
1954 }
1955
1956 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1957 struct drm_dp_sideband_msg_tx *txmsg)
1958 {
1959 struct drm_dp_mst_branch *mstb = txmsg->dst;
1960 u8 req_type;
1961
1962
1963 if (txmsg->seqno == -1) {
1964 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1965 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1966 return -EAGAIN;
1967 }
1968 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1969 txmsg->seqno = mstb->last_seqno;
1970 mstb->last_seqno ^= 1;
1971 } else if (mstb->tx_slots[0] == NULL)
1972 txmsg->seqno = 0;
1973 else
1974 txmsg->seqno = 1;
1975 mstb->tx_slots[txmsg->seqno] = txmsg;
1976 }
1977
1978 req_type = txmsg->msg[0] & 0x7f;
1979 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1980 req_type == DP_RESOURCE_STATUS_NOTIFY)
1981 hdr->broadcast = 1;
1982 else
1983 hdr->broadcast = 0;
1984 hdr->path_msg = txmsg->path_msg;
1985 hdr->lct = mstb->lct;
1986 hdr->lcr = mstb->lct - 1;
1987 if (mstb->lct > 1)
1988 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1989 hdr->seqno = txmsg->seqno;
1990 return 0;
1991 }
1992
1993
1994
1995 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1996 struct drm_dp_sideband_msg_tx *txmsg,
1997 bool up)
1998 {
1999 u8 chunk[48];
2000 struct drm_dp_sideband_msg_hdr hdr;
2001 int len, space, idx, tosend;
2002 int ret;
2003
2004 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2005
2006 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2007 txmsg->seqno = -1;
2008 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2009 }
2010
2011
2012
2013 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2014 if (ret < 0)
2015 return ret;
2016
2017
2018 len = txmsg->cur_len - txmsg->cur_offset;
2019
2020
2021 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2022
2023 tosend = min(len, space);
2024 if (len == txmsg->cur_len)
2025 hdr.somt = 1;
2026 if (space >= len)
2027 hdr.eomt = 1;
2028
2029
2030 hdr.msg_len = tosend + 1;
2031 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2032 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2033
2034 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2035 idx += tosend + 1;
2036
2037 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2038 if (ret) {
2039 DRM_DEBUG_KMS("sideband msg failed to send\n");
2040 return ret;
2041 }
2042
2043 txmsg->cur_offset += tosend;
2044 if (txmsg->cur_offset == txmsg->cur_len) {
2045 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2046 return 1;
2047 }
2048 return 0;
2049 }
2050
2051 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2052 {
2053 struct drm_dp_sideband_msg_tx *txmsg;
2054 int ret;
2055
2056 WARN_ON(!mutex_is_locked(&mgr->qlock));
2057
2058
2059 if (list_empty(&mgr->tx_msg_downq))
2060 return;
2061
2062 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2063 ret = process_single_tx_qlock(mgr, txmsg, false);
2064 if (ret == 1) {
2065
2066 list_del(&txmsg->next);
2067 } else if (ret) {
2068 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2069 list_del(&txmsg->next);
2070 if (txmsg->seqno != -1)
2071 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2072 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2073 wake_up_all(&mgr->tx_waitq);
2074 }
2075 }
2076
2077
2078 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2079 struct drm_dp_sideband_msg_tx *txmsg)
2080 {
2081 int ret;
2082
2083
2084 ret = process_single_tx_qlock(mgr, txmsg, true);
2085
2086 if (ret != 1)
2087 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2088
2089 if (txmsg->seqno != -1) {
2090 WARN_ON((unsigned int)txmsg->seqno >
2091 ARRAY_SIZE(txmsg->dst->tx_slots));
2092 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2093 }
2094 }
2095
2096 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2097 struct drm_dp_sideband_msg_tx *txmsg)
2098 {
2099 mutex_lock(&mgr->qlock);
2100 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2101 if (list_is_singular(&mgr->tx_msg_downq))
2102 process_single_down_tx_qlock(mgr);
2103 mutex_unlock(&mgr->qlock);
2104 }
2105
2106 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2107 struct drm_dp_mst_branch *mstb)
2108 {
2109 int len;
2110 struct drm_dp_sideband_msg_tx *txmsg;
2111 int ret;
2112
2113 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2114 if (!txmsg)
2115 return;
2116
2117 txmsg->dst = mstb;
2118 len = build_link_address(txmsg);
2119
2120 mstb->link_address_sent = true;
2121 drm_dp_queue_down_tx(mgr, txmsg);
2122
2123 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2124 if (ret > 0) {
2125 int i;
2126
2127 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2128 DRM_DEBUG_KMS("link address nak received\n");
2129 } else {
2130 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2131 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2132 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2133 txmsg->reply.u.link_addr.ports[i].input_port,
2134 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2135 txmsg->reply.u.link_addr.ports[i].port_number,
2136 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2137 txmsg->reply.u.link_addr.ports[i].mcs,
2138 txmsg->reply.u.link_addr.ports[i].ddps,
2139 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2140 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2141 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2142 }
2143
2144 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2145
2146 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2147 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2148 }
2149 drm_kms_helper_hotplug_event(mgr->dev);
2150 }
2151 } else {
2152 mstb->link_address_sent = false;
2153 DRM_DEBUG_KMS("link address failed %d\n", ret);
2154 }
2155
2156 kfree(txmsg);
2157 }
2158
2159 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2160 struct drm_dp_mst_branch *mstb,
2161 struct drm_dp_mst_port *port)
2162 {
2163 int len;
2164 struct drm_dp_sideband_msg_tx *txmsg;
2165 int ret;
2166
2167 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2168 if (!txmsg)
2169 return -ENOMEM;
2170
2171 txmsg->dst = mstb;
2172 len = build_enum_path_resources(txmsg, port->port_num);
2173
2174 drm_dp_queue_down_tx(mgr, txmsg);
2175
2176 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2177 if (ret > 0) {
2178 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2179 DRM_DEBUG_KMS("enum path resources nak received\n");
2180 } else {
2181 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2182 DRM_ERROR("got incorrect port in response\n");
2183 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2184 txmsg->reply.u.path_resources.avail_payload_bw_number);
2185 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2186 }
2187 }
2188
2189 kfree(txmsg);
2190 return 0;
2191 }
2192
2193 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2194 {
2195 if (!mstb->port_parent)
2196 return NULL;
2197
2198 if (mstb->port_parent->mstb != mstb)
2199 return mstb->port_parent;
2200
2201 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2202 }
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212 static struct drm_dp_mst_branch *
2213 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2214 struct drm_dp_mst_branch *mstb,
2215 int *port_num)
2216 {
2217 struct drm_dp_mst_branch *rmstb = NULL;
2218 struct drm_dp_mst_port *found_port;
2219
2220 mutex_lock(&mgr->lock);
2221 if (!mgr->mst_primary)
2222 goto out;
2223
2224 do {
2225 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2226 if (!found_port)
2227 break;
2228
2229 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2230 rmstb = found_port->parent;
2231 *port_num = found_port->port_num;
2232 } else {
2233
2234 mstb = found_port->parent;
2235 }
2236 } while (!rmstb);
2237 out:
2238 mutex_unlock(&mgr->lock);
2239 return rmstb;
2240 }
2241
2242 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2243 struct drm_dp_mst_port *port,
2244 int id,
2245 int pbn)
2246 {
2247 struct drm_dp_sideband_msg_tx *txmsg;
2248 struct drm_dp_mst_branch *mstb;
2249 int len, ret, port_num;
2250 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2251 int i;
2252
2253 port_num = port->port_num;
2254 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2255 if (!mstb) {
2256 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2257 port->parent,
2258 &port_num);
2259
2260 if (!mstb)
2261 return -EINVAL;
2262 }
2263
2264 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2265 if (!txmsg) {
2266 ret = -ENOMEM;
2267 goto fail_put;
2268 }
2269
2270 for (i = 0; i < port->num_sdp_streams; i++)
2271 sinks[i] = i;
2272
2273 txmsg->dst = mstb;
2274 len = build_allocate_payload(txmsg, port_num,
2275 id,
2276 pbn, port->num_sdp_streams, sinks);
2277
2278 drm_dp_queue_down_tx(mgr, txmsg);
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2289 if (ret > 0) {
2290 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2291 ret = -EINVAL;
2292 else
2293 ret = 0;
2294 }
2295 kfree(txmsg);
2296 fail_put:
2297 drm_dp_mst_topology_put_mstb(mstb);
2298 return ret;
2299 }
2300
2301 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2302 struct drm_dp_mst_port *port, bool power_up)
2303 {
2304 struct drm_dp_sideband_msg_tx *txmsg;
2305 int len, ret;
2306
2307 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2308 if (!port)
2309 return -EINVAL;
2310
2311 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2312 if (!txmsg) {
2313 drm_dp_mst_topology_put_port(port);
2314 return -ENOMEM;
2315 }
2316
2317 txmsg->dst = port->parent;
2318 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2319 drm_dp_queue_down_tx(mgr, txmsg);
2320
2321 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2322 if (ret > 0) {
2323 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2324 ret = -EINVAL;
2325 else
2326 ret = 0;
2327 }
2328 kfree(txmsg);
2329 drm_dp_mst_topology_put_port(port);
2330
2331 return ret;
2332 }
2333 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2334
2335 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2336 int id,
2337 struct drm_dp_payload *payload)
2338 {
2339 int ret;
2340
2341 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2342 if (ret < 0) {
2343 payload->payload_state = 0;
2344 return ret;
2345 }
2346 payload->payload_state = DP_PAYLOAD_LOCAL;
2347 return 0;
2348 }
2349
2350 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2351 struct drm_dp_mst_port *port,
2352 int id,
2353 struct drm_dp_payload *payload)
2354 {
2355 int ret;
2356 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2357 if (ret < 0)
2358 return ret;
2359 payload->payload_state = DP_PAYLOAD_REMOTE;
2360 return ret;
2361 }
2362
2363 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2364 struct drm_dp_mst_port *port,
2365 int id,
2366 struct drm_dp_payload *payload)
2367 {
2368 DRM_DEBUG_KMS("\n");
2369
2370 if (port) {
2371 drm_dp_payload_send_msg(mgr, port, id, 0);
2372 }
2373
2374 drm_dp_dpcd_write_payload(mgr, id, payload);
2375 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2376 return 0;
2377 }
2378
2379 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2380 int id,
2381 struct drm_dp_payload *payload)
2382 {
2383 payload->payload_state = 0;
2384 return 0;
2385 }
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2401 {
2402 struct drm_dp_payload req_payload;
2403 struct drm_dp_mst_port *port;
2404 int i, j;
2405 int cur_slots = 1;
2406
2407 mutex_lock(&mgr->payload_lock);
2408 for (i = 0; i < mgr->max_payloads; i++) {
2409 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2410 struct drm_dp_payload *payload = &mgr->payloads[i];
2411 bool put_port = false;
2412
2413
2414
2415 req_payload.start_slot = cur_slots;
2416 if (vcpi) {
2417 port = container_of(vcpi, struct drm_dp_mst_port,
2418 vcpi);
2419
2420
2421
2422
2423 if (vcpi->num_slots) {
2424 port = drm_dp_mst_topology_get_port_validated(
2425 mgr, port);
2426 if (!port) {
2427 mutex_unlock(&mgr->payload_lock);
2428 return -EINVAL;
2429 }
2430 put_port = true;
2431 }
2432
2433 req_payload.num_slots = vcpi->num_slots;
2434 req_payload.vcpi = vcpi->vcpi;
2435 } else {
2436 port = NULL;
2437 req_payload.num_slots = 0;
2438 }
2439
2440 payload->start_slot = req_payload.start_slot;
2441
2442 if (payload->num_slots != req_payload.num_slots) {
2443
2444
2445 if (req_payload.num_slots) {
2446 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2447 &req_payload);
2448 payload->num_slots = req_payload.num_slots;
2449 payload->vcpi = req_payload.vcpi;
2450
2451 } else if (payload->num_slots) {
2452 payload->num_slots = 0;
2453 drm_dp_destroy_payload_step1(mgr, port,
2454 payload->vcpi,
2455 payload);
2456 req_payload.payload_state =
2457 payload->payload_state;
2458 payload->start_slot = 0;
2459 }
2460 payload->payload_state = req_payload.payload_state;
2461 }
2462 cur_slots += req_payload.num_slots;
2463
2464 if (put_port)
2465 drm_dp_mst_topology_put_port(port);
2466 }
2467
2468 for (i = 0; i < mgr->max_payloads; ) {
2469 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
2470 i++;
2471 continue;
2472 }
2473
2474 DRM_DEBUG_KMS("removing payload %d\n", i);
2475 for (j = i; j < mgr->max_payloads - 1; j++) {
2476 mgr->payloads[j] = mgr->payloads[j + 1];
2477 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2478
2479 if (mgr->proposed_vcpis[j] &&
2480 mgr->proposed_vcpis[j]->num_slots) {
2481 set_bit(j + 1, &mgr->payload_mask);
2482 } else {
2483 clear_bit(j + 1, &mgr->payload_mask);
2484 }
2485 }
2486
2487 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2488 sizeof(struct drm_dp_payload));
2489 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2490 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2491 }
2492 mutex_unlock(&mgr->payload_lock);
2493
2494 return 0;
2495 }
2496 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2508 {
2509 struct drm_dp_mst_port *port;
2510 int i;
2511 int ret = 0;
2512 mutex_lock(&mgr->payload_lock);
2513 for (i = 0; i < mgr->max_payloads; i++) {
2514
2515 if (!mgr->proposed_vcpis[i])
2516 continue;
2517
2518 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2519
2520 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2521 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2522 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2523 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2524 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2525 }
2526 if (ret) {
2527 mutex_unlock(&mgr->payload_lock);
2528 return ret;
2529 }
2530 }
2531 mutex_unlock(&mgr->payload_lock);
2532 return 0;
2533 }
2534 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2535
2536 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2537 struct drm_dp_mst_port *port,
2538 int offset, int size, u8 *bytes)
2539 {
2540 int len;
2541 int ret = 0;
2542 struct drm_dp_sideband_msg_tx *txmsg;
2543 struct drm_dp_mst_branch *mstb;
2544
2545 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2546 if (!mstb)
2547 return -EINVAL;
2548
2549 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2550 if (!txmsg) {
2551 ret = -ENOMEM;
2552 goto fail_put;
2553 }
2554
2555 len = build_dpcd_read(txmsg, port->port_num, offset, size);
2556 txmsg->dst = port->parent;
2557
2558 drm_dp_queue_down_tx(mgr, txmsg);
2559
2560 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2561 if (ret < 0)
2562 goto fail_free;
2563
2564
2565 if (txmsg->reply.reply_type == 1) {
2566 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
2567 mstb, port->port_num, offset, size);
2568 ret = -EIO;
2569 goto fail_free;
2570 }
2571
2572 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
2573 ret = -EPROTO;
2574 goto fail_free;
2575 }
2576
2577 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
2578 size);
2579 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
2580
2581 fail_free:
2582 kfree(txmsg);
2583 fail_put:
2584 drm_dp_mst_topology_put_mstb(mstb);
2585
2586 return ret;
2587 }
2588
2589 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2590 struct drm_dp_mst_port *port,
2591 int offset, int size, u8 *bytes)
2592 {
2593 int len;
2594 int ret;
2595 struct drm_dp_sideband_msg_tx *txmsg;
2596 struct drm_dp_mst_branch *mstb;
2597
2598 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2599 if (!mstb)
2600 return -EINVAL;
2601
2602 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2603 if (!txmsg) {
2604 ret = -ENOMEM;
2605 goto fail_put;
2606 }
2607
2608 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2609 txmsg->dst = mstb;
2610
2611 drm_dp_queue_down_tx(mgr, txmsg);
2612
2613 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2614 if (ret > 0) {
2615 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2616 ret = -EIO;
2617 else
2618 ret = 0;
2619 }
2620 kfree(txmsg);
2621 fail_put:
2622 drm_dp_mst_topology_put_mstb(mstb);
2623 return ret;
2624 }
2625
2626 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2627 {
2628 struct drm_dp_sideband_msg_reply_body reply;
2629
2630 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2631 reply.req_type = req_type;
2632 drm_dp_encode_sideband_reply(&reply, msg);
2633 return 0;
2634 }
2635
2636 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2637 struct drm_dp_mst_branch *mstb,
2638 int req_type, int seqno, bool broadcast)
2639 {
2640 struct drm_dp_sideband_msg_tx *txmsg;
2641
2642 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2643 if (!txmsg)
2644 return -ENOMEM;
2645
2646 txmsg->dst = mstb;
2647 txmsg->seqno = seqno;
2648 drm_dp_encode_up_ack_reply(txmsg, req_type);
2649
2650 mutex_lock(&mgr->qlock);
2651
2652 process_single_up_tx_qlock(mgr, txmsg);
2653
2654 mutex_unlock(&mgr->qlock);
2655
2656 kfree(txmsg);
2657 return 0;
2658 }
2659
2660 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2661 int dp_link_count,
2662 int *out)
2663 {
2664 switch (dp_link_bw) {
2665 default:
2666 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2667 dp_link_bw, dp_link_count);
2668 return false;
2669
2670 case DP_LINK_BW_1_62:
2671 *out = 3 * dp_link_count;
2672 break;
2673 case DP_LINK_BW_2_7:
2674 *out = 5 * dp_link_count;
2675 break;
2676 case DP_LINK_BW_5_4:
2677 *out = 10 * dp_link_count;
2678 break;
2679 case DP_LINK_BW_8_1:
2680 *out = 15 * dp_link_count;
2681 break;
2682 }
2683 return true;
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2695 {
2696 int ret = 0;
2697 struct drm_dp_mst_branch *mstb = NULL;
2698
2699 mutex_lock(&mgr->payload_lock);
2700 mutex_lock(&mgr->lock);
2701 if (mst_state == mgr->mst_state)
2702 goto out_unlock;
2703
2704 mgr->mst_state = mst_state;
2705
2706 if (mst_state) {
2707 WARN_ON(mgr->mst_primary);
2708
2709
2710 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2711 if (ret != DP_RECEIVER_CAP_SIZE) {
2712 DRM_DEBUG_KMS("failed to read DPCD\n");
2713 goto out_unlock;
2714 }
2715
2716 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2717 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2718 &mgr->pbn_div)) {
2719 ret = -EINVAL;
2720 goto out_unlock;
2721 }
2722
2723
2724 mstb = drm_dp_add_mst_branch_device(1, NULL);
2725 if (mstb == NULL) {
2726 ret = -ENOMEM;
2727 goto out_unlock;
2728 }
2729 mstb->mgr = mgr;
2730
2731
2732 mgr->mst_primary = mstb;
2733 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2734
2735 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2736 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2737 if (ret < 0) {
2738 goto out_unlock;
2739 }
2740
2741 {
2742 struct drm_dp_payload reset_pay;
2743 reset_pay.start_slot = 0;
2744 reset_pay.num_slots = 0x3f;
2745 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2746 }
2747
2748 queue_work(system_long_wq, &mgr->work);
2749
2750 ret = 0;
2751 } else {
2752
2753 mstb = mgr->mst_primary;
2754 mgr->mst_primary = NULL;
2755
2756 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2757 ret = 0;
2758 memset(mgr->payloads, 0,
2759 mgr->max_payloads * sizeof(mgr->payloads[0]));
2760 memset(mgr->proposed_vcpis, 0,
2761 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
2762 mgr->payload_mask = 0;
2763 set_bit(0, &mgr->payload_mask);
2764 mgr->vcpi_mask = 0;
2765 }
2766
2767 out_unlock:
2768 mutex_unlock(&mgr->lock);
2769 mutex_unlock(&mgr->payload_lock);
2770 if (mstb)
2771 drm_dp_mst_topology_put_mstb(mstb);
2772 return ret;
2773
2774 }
2775 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2776
2777
2778
2779
2780
2781
2782
2783
2784 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2785 {
2786 mutex_lock(&mgr->lock);
2787 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2788 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2789 mutex_unlock(&mgr->lock);
2790 flush_work(&mgr->work);
2791 flush_work(&mgr->destroy_connector_work);
2792 }
2793 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2806 {
2807 int ret = 0;
2808
2809 mutex_lock(&mgr->lock);
2810
2811 if (mgr->mst_primary) {
2812 int sret;
2813 u8 guid[16];
2814
2815 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2816 if (sret != DP_RECEIVER_CAP_SIZE) {
2817 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2818 ret = -1;
2819 goto out_unlock;
2820 }
2821
2822 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2823 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2824 if (ret < 0) {
2825 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2826 ret = -1;
2827 goto out_unlock;
2828 }
2829
2830
2831 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2832 if (sret != 16) {
2833 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2834 ret = -1;
2835 goto out_unlock;
2836 }
2837 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2838
2839 ret = 0;
2840 } else
2841 ret = -1;
2842
2843 out_unlock:
2844 mutex_unlock(&mgr->lock);
2845 return ret;
2846 }
2847 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2848
2849 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2850 {
2851 int len;
2852 u8 replyblock[32];
2853 int replylen, origlen, curreply;
2854 int ret;
2855 struct drm_dp_sideband_msg_rx *msg;
2856 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2857 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2858
2859 len = min(mgr->max_dpcd_transaction_bytes, 16);
2860 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2861 replyblock, len);
2862 if (ret != len) {
2863 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2864 return false;
2865 }
2866 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2867 if (!ret) {
2868 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2869 return false;
2870 }
2871 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2872
2873 origlen = replylen;
2874 replylen -= len;
2875 curreply = len;
2876 while (replylen > 0) {
2877 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2878 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2879 replyblock, len);
2880 if (ret != len) {
2881 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2882 len, ret);
2883 return false;
2884 }
2885
2886 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2887 if (!ret) {
2888 DRM_DEBUG_KMS("failed to build sideband msg\n");
2889 return false;
2890 }
2891
2892 curreply += len;
2893 replylen -= len;
2894 }
2895 return true;
2896 }
2897
2898 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2899 {
2900 int ret = 0;
2901
2902 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2903 memset(&mgr->down_rep_recv, 0,
2904 sizeof(struct drm_dp_sideband_msg_rx));
2905 return 0;
2906 }
2907
2908 if (mgr->down_rep_recv.have_eomt) {
2909 struct drm_dp_sideband_msg_tx *txmsg;
2910 struct drm_dp_mst_branch *mstb;
2911 int slot = -1;
2912 mstb = drm_dp_get_mst_branch_device(mgr,
2913 mgr->down_rep_recv.initial_hdr.lct,
2914 mgr->down_rep_recv.initial_hdr.rad);
2915
2916 if (!mstb) {
2917 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2918 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2919 return 0;
2920 }
2921
2922
2923 slot = mgr->down_rep_recv.initial_hdr.seqno;
2924 mutex_lock(&mgr->qlock);
2925 txmsg = mstb->tx_slots[slot];
2926
2927 mutex_unlock(&mgr->qlock);
2928
2929 if (!txmsg) {
2930 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2931 mstb,
2932 mgr->down_rep_recv.initial_hdr.seqno,
2933 mgr->down_rep_recv.initial_hdr.lct,
2934 mgr->down_rep_recv.initial_hdr.rad[0],
2935 mgr->down_rep_recv.msg[0]);
2936 drm_dp_mst_topology_put_mstb(mstb);
2937 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2938 return 0;
2939 }
2940
2941 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2942
2943 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2944 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2945 txmsg->reply.req_type,
2946 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2947 txmsg->reply.u.nak.reason,
2948 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2949 txmsg->reply.u.nak.nak_data);
2950
2951 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2952 drm_dp_mst_topology_put_mstb(mstb);
2953
2954 mutex_lock(&mgr->qlock);
2955 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2956 mstb->tx_slots[slot] = NULL;
2957 mutex_unlock(&mgr->qlock);
2958
2959 wake_up_all(&mgr->tx_waitq);
2960 }
2961 return ret;
2962 }
2963
2964 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2965 {
2966 int ret = 0;
2967
2968 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2969 memset(&mgr->up_req_recv, 0,
2970 sizeof(struct drm_dp_sideband_msg_rx));
2971 return 0;
2972 }
2973
2974 if (mgr->up_req_recv.have_eomt) {
2975 struct drm_dp_sideband_msg_req_body msg;
2976 struct drm_dp_mst_branch *mstb = NULL;
2977 bool seqno;
2978
2979 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2980 mstb = drm_dp_get_mst_branch_device(mgr,
2981 mgr->up_req_recv.initial_hdr.lct,
2982 mgr->up_req_recv.initial_hdr.rad);
2983 if (!mstb) {
2984 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2985 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2986 return 0;
2987 }
2988 }
2989
2990 seqno = mgr->up_req_recv.initial_hdr.seqno;
2991 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2992
2993 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2994 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2995
2996 if (!mstb)
2997 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2998
2999 if (!mstb) {
3000 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3001 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3002 return 0;
3003 }
3004
3005 drm_dp_update_port(mstb, &msg.u.conn_stat);
3006
3007 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
3008 drm_kms_helper_hotplug_event(mgr->dev);
3009
3010 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3011 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3012 if (!mstb)
3013 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
3014
3015 if (!mstb) {
3016 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3017 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3018 return 0;
3019 }
3020
3021 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
3022 }
3023
3024 if (mstb)
3025 drm_dp_mst_topology_put_mstb(mstb);
3026
3027 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3028 }
3029 return ret;
3030 }
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3044 {
3045 int ret = 0;
3046 int sc;
3047 *handled = false;
3048 sc = esi[0] & 0x3f;
3049
3050 if (sc != mgr->sink_count) {
3051 mgr->sink_count = sc;
3052 *handled = true;
3053 }
3054
3055 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3056 ret = drm_dp_mst_handle_down_rep(mgr);
3057 *handled = true;
3058 }
3059
3060 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3061 ret |= drm_dp_mst_handle_up_req(mgr);
3062 *handled = true;
3063 }
3064
3065 drm_dp_mst_kick_tx(mgr);
3066 return ret;
3067 }
3068 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
3080 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3081 {
3082 enum drm_connector_status status = connector_status_disconnected;
3083
3084
3085 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3086 if (!port)
3087 return connector_status_disconnected;
3088
3089 if (!port->ddps)
3090 goto out;
3091
3092 switch (port->pdt) {
3093 case DP_PEER_DEVICE_NONE:
3094 case DP_PEER_DEVICE_MST_BRANCHING:
3095 break;
3096
3097 case DP_PEER_DEVICE_SST_SINK:
3098 status = connector_status_connected;
3099
3100 if (port->port_num >= 8 && !port->cached_edid) {
3101 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3102 }
3103 break;
3104 case DP_PEER_DEVICE_DP_LEGACY_CONV:
3105 if (port->ldps)
3106 status = connector_status_connected;
3107 break;
3108 }
3109 out:
3110 drm_dp_mst_topology_put_port(port);
3111 return status;
3112 }
3113 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3114
3115
3116
3117
3118
3119
3120
3121
3122 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3123 struct drm_dp_mst_port *port)
3124 {
3125 bool ret = false;
3126
3127 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3128 if (!port)
3129 return ret;
3130 ret = port->has_audio;
3131 drm_dp_mst_topology_put_port(port);
3132 return ret;
3133 }
3134 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3147 {
3148 struct edid *edid = NULL;
3149
3150
3151 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3152 if (!port)
3153 return NULL;
3154
3155 if (port->cached_edid)
3156 edid = drm_edid_duplicate(port->cached_edid);
3157 else {
3158 edid = drm_get_edid(connector, &port->aux.ddc);
3159 }
3160 port->has_audio = drm_detect_monitor_audio(edid);
3161 drm_dp_mst_topology_put_port(port);
3162 return edid;
3163 }
3164 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3179 int pbn)
3180 {
3181 int num_slots;
3182
3183 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3184
3185
3186 if (num_slots > 63)
3187 return -ENOSPC;
3188 return num_slots;
3189 }
3190 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3191
3192 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3193 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3194 {
3195 int ret;
3196
3197
3198 if (slots > 63)
3199 return -ENOSPC;
3200
3201 vcpi->pbn = pbn;
3202 vcpi->aligned_pbn = slots * mgr->pbn_div;
3203 vcpi->num_slots = slots;
3204
3205 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3206 if (ret < 0)
3207 return ret;
3208 return 0;
3209 }
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3242 struct drm_dp_mst_topology_mgr *mgr,
3243 struct drm_dp_mst_port *port, int pbn)
3244 {
3245 struct drm_dp_mst_topology_state *topology_state;
3246 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3247 int prev_slots, req_slots, ret;
3248
3249 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3250 if (IS_ERR(topology_state))
3251 return PTR_ERR(topology_state);
3252
3253
3254 list_for_each_entry(pos, &topology_state->vcpis, next) {
3255 if (pos->port == port) {
3256 vcpi = pos;
3257 prev_slots = vcpi->vcpi;
3258
3259
3260
3261
3262
3263
3264 if (WARN_ON(!prev_slots)) {
3265 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3266 port);
3267 return -EINVAL;
3268 }
3269
3270 break;
3271 }
3272 }
3273 if (!vcpi)
3274 prev_slots = 0;
3275
3276 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3277
3278 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3279 port->connector->base.id, port->connector->name,
3280 port, prev_slots, req_slots);
3281
3282
3283 if (!vcpi) {
3284 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3285 if (!vcpi)
3286 return -ENOMEM;
3287
3288 drm_dp_mst_get_port_malloc(port);
3289 vcpi->port = port;
3290 list_add(&vcpi->next, &topology_state->vcpis);
3291 }
3292 vcpi->vcpi = req_slots;
3293
3294 ret = req_slots;
3295 return ret;
3296 }
3297 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3326 struct drm_dp_mst_topology_mgr *mgr,
3327 struct drm_dp_mst_port *port)
3328 {
3329 struct drm_dp_mst_topology_state *topology_state;
3330 struct drm_dp_vcpi_allocation *pos;
3331 bool found = false;
3332
3333 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3334 if (IS_ERR(topology_state))
3335 return PTR_ERR(topology_state);
3336
3337 list_for_each_entry(pos, &topology_state->vcpis, next) {
3338 if (pos->port == port) {
3339 found = true;
3340 break;
3341 }
3342 }
3343 if (WARN_ON(!found)) {
3344 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3345 port, &topology_state->base);
3346 return -EINVAL;
3347 }
3348
3349 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3350 if (pos->vcpi) {
3351 drm_dp_mst_put_port_malloc(port);
3352 pos->vcpi = 0;
3353 }
3354
3355 return 0;
3356 }
3357 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3358
3359
3360
3361
3362
3363
3364
3365
3366 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3367 struct drm_dp_mst_port *port, int pbn, int slots)
3368 {
3369 int ret;
3370
3371 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3372 if (!port)
3373 return false;
3374
3375 if (slots < 0)
3376 return false;
3377
3378 if (port->vcpi.vcpi > 0) {
3379 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3380 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3381 if (pbn == port->vcpi.pbn) {
3382 drm_dp_mst_topology_put_port(port);
3383 return true;
3384 }
3385 }
3386
3387 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3388 if (ret) {
3389 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3390 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3391 goto out;
3392 }
3393 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3394 pbn, port->vcpi.num_slots);
3395
3396
3397 drm_dp_mst_get_port_malloc(port);
3398 drm_dp_mst_topology_put_port(port);
3399 return true;
3400 out:
3401 return false;
3402 }
3403 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3404
3405 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3406 {
3407 int slots = 0;
3408 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3409 if (!port)
3410 return slots;
3411
3412 slots = port->vcpi.num_slots;
3413 drm_dp_mst_topology_put_port(port);
3414 return slots;
3415 }
3416 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3417
3418
3419
3420
3421
3422
3423
3424
3425 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3426 {
3427
3428
3429
3430
3431
3432 port->vcpi.num_slots = 0;
3433 }
3434 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3445 struct drm_dp_mst_port *port)
3446 {
3447 if (!port->vcpi.vcpi)
3448 return;
3449
3450 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3451 port->vcpi.num_slots = 0;
3452 port->vcpi.pbn = 0;
3453 port->vcpi.aligned_pbn = 0;
3454 port->vcpi.vcpi = 0;
3455 drm_dp_mst_put_port_malloc(port);
3456 }
3457 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3458
3459 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3460 int id, struct drm_dp_payload *payload)
3461 {
3462 u8 payload_alloc[3], status;
3463 int ret;
3464 int retries = 0;
3465
3466 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3467 DP_PAYLOAD_TABLE_UPDATED);
3468
3469 payload_alloc[0] = id;
3470 payload_alloc[1] = payload->start_slot;
3471 payload_alloc[2] = payload->num_slots;
3472
3473 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3474 if (ret != 3) {
3475 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3476 goto fail;
3477 }
3478
3479 retry:
3480 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3481 if (ret < 0) {
3482 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3483 goto fail;
3484 }
3485
3486 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3487 retries++;
3488 if (retries < 20) {
3489 usleep_range(10000, 20000);
3490 goto retry;
3491 }
3492 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3493 ret = -EINVAL;
3494 goto fail;
3495 }
3496 ret = 0;
3497 fail:
3498 return ret;
3499 }
3500
3501
3502
3503
3504
3505
3506
3507
3508 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3509 {
3510 u8 status;
3511 int ret;
3512 int count = 0;
3513
3514 do {
3515 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3516
3517 if (ret < 0) {
3518 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3519 goto fail;
3520 }
3521
3522 if (status & DP_PAYLOAD_ACT_HANDLED)
3523 break;
3524 count++;
3525 udelay(100);
3526
3527 } while (count < 30);
3528
3529 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3530 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3531 ret = -EINVAL;
3532 goto fail;
3533 }
3534 return 0;
3535 fail:
3536 return ret;
3537 }
3538 EXPORT_SYMBOL(drm_dp_check_act_status);
3539
3540
3541
3542
3543
3544
3545
3546
3547 int drm_dp_calc_pbn_mode(int clock, int bpp)
3548 {
3549 u64 kbps;
3550 s64 peak_kbps;
3551 u32 numerator;
3552 u32 denominator;
3553
3554 kbps = clock * bpp;
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567 numerator = 64 * 1006;
3568 denominator = 54 * 8 * 1000 * 1000;
3569
3570 kbps *= numerator;
3571 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3572
3573 return drm_fixp2int_ceil(peak_kbps);
3574 }
3575 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3576
3577 static int test_calc_pbn_mode(void)
3578 {
3579 int ret;
3580 ret = drm_dp_calc_pbn_mode(154000, 30);
3581 if (ret != 689) {
3582 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3583 154000, 30, 689, ret);
3584 return -EINVAL;
3585 }
3586 ret = drm_dp_calc_pbn_mode(234000, 30);
3587 if (ret != 1047) {
3588 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3589 234000, 30, 1047, ret);
3590 return -EINVAL;
3591 }
3592 ret = drm_dp_calc_pbn_mode(297000, 24);
3593 if (ret != 1063) {
3594 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3595 297000, 24, 1063, ret);
3596 return -EINVAL;
3597 }
3598 return 0;
3599 }
3600
3601
3602 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3603 {
3604 queue_work(system_long_wq, &mgr->tx_work);
3605 }
3606
3607 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3608 struct drm_dp_mst_branch *mstb)
3609 {
3610 struct drm_dp_mst_port *port;
3611 int tabs = mstb->lct;
3612 char prefix[10];
3613 int i;
3614
3615 for (i = 0; i < tabs; i++)
3616 prefix[i] = '\t';
3617 prefix[i] = '\0';
3618
3619 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3620 list_for_each_entry(port, &mstb->ports, next) {
3621 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3622 if (port->mstb)
3623 drm_dp_mst_dump_mstb(m, port->mstb);
3624 }
3625 }
3626
3627 #define DP_PAYLOAD_TABLE_SIZE 64
3628
3629 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3630 char *buf)
3631 {
3632 int i;
3633
3634 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3635 if (drm_dp_dpcd_read(mgr->aux,
3636 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3637 &buf[i], 16) != 16)
3638 return false;
3639 }
3640 return true;
3641 }
3642
3643 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3644 struct drm_dp_mst_port *port, char *name,
3645 int namelen)
3646 {
3647 struct edid *mst_edid;
3648
3649 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3650 drm_edid_get_monitor_name(mst_edid, name, namelen);
3651 }
3652
3653
3654
3655
3656
3657
3658
3659
3660 void drm_dp_mst_dump_topology(struct seq_file *m,
3661 struct drm_dp_mst_topology_mgr *mgr)
3662 {
3663 int i;
3664 struct drm_dp_mst_port *port;
3665
3666 mutex_lock(&mgr->lock);
3667 if (mgr->mst_primary)
3668 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3669
3670
3671 mutex_unlock(&mgr->lock);
3672
3673 mutex_lock(&mgr->payload_lock);
3674 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3675 mgr->max_payloads);
3676
3677 for (i = 0; i < mgr->max_payloads; i++) {
3678 if (mgr->proposed_vcpis[i]) {
3679 char name[14];
3680
3681 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3682 fetch_monitor_name(mgr, port, name, sizeof(name));
3683 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3684 port->port_num, port->vcpi.vcpi,
3685 port->vcpi.num_slots,
3686 (*name != 0) ? name : "Unknown");
3687 } else
3688 seq_printf(m, "vcpi %d:unused\n", i);
3689 }
3690 for (i = 0; i < mgr->max_payloads; i++) {
3691 seq_printf(m, "payload %d: %d, %d, %d\n",
3692 i,
3693 mgr->payloads[i].payload_state,
3694 mgr->payloads[i].start_slot,
3695 mgr->payloads[i].num_slots);
3696
3697
3698 }
3699 mutex_unlock(&mgr->payload_lock);
3700
3701 mutex_lock(&mgr->lock);
3702 if (mgr->mst_primary) {
3703 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3704 int ret;
3705
3706 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3707 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3708 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3709 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3710 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3711 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3712
3713
3714 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3715 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3716 for (i = 0x3; i < 0x8 && buf[i]; i++)
3717 seq_printf(m, "%c", buf[i]);
3718 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3719 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3720 if (dump_dp_payload_table(mgr, buf))
3721 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3722 }
3723
3724 mutex_unlock(&mgr->lock);
3725
3726 }
3727 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3728
3729 static void drm_dp_tx_work(struct work_struct *work)
3730 {
3731 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3732
3733 mutex_lock(&mgr->qlock);
3734 if (!list_empty(&mgr->tx_msg_downq))
3735 process_single_down_tx_qlock(mgr);
3736 mutex_unlock(&mgr->qlock);
3737 }
3738
3739 static void drm_dp_destroy_connector_work(struct work_struct *work)
3740 {
3741 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3742 struct drm_dp_mst_port *port;
3743 bool send_hotplug = false;
3744
3745
3746
3747
3748
3749 for (;;) {
3750 mutex_lock(&mgr->destroy_connector_lock);
3751 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3752 if (!port) {
3753 mutex_unlock(&mgr->destroy_connector_lock);
3754 break;
3755 }
3756 list_del(&port->next);
3757 mutex_unlock(&mgr->destroy_connector_lock);
3758
3759 INIT_LIST_HEAD(&port->next);
3760
3761 mgr->cbs->destroy_connector(mgr, port->connector);
3762
3763 drm_dp_port_teardown_pdt(port, port->pdt);
3764 port->pdt = DP_PEER_DEVICE_NONE;
3765
3766 drm_dp_mst_put_port_malloc(port);
3767 send_hotplug = true;
3768 }
3769 if (send_hotplug)
3770 drm_kms_helper_hotplug_event(mgr->dev);
3771 }
3772
3773 static struct drm_private_state *
3774 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3775 {
3776 struct drm_dp_mst_topology_state *state, *old_state =
3777 to_dp_mst_topology_state(obj->state);
3778 struct drm_dp_vcpi_allocation *pos, *vcpi;
3779
3780 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3781 if (!state)
3782 return NULL;
3783
3784 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3785
3786 INIT_LIST_HEAD(&state->vcpis);
3787
3788 list_for_each_entry(pos, &old_state->vcpis, next) {
3789
3790 if (!pos->vcpi)
3791 continue;
3792
3793 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3794 if (!vcpi)
3795 goto fail;
3796
3797 drm_dp_mst_get_port_malloc(vcpi->port);
3798 list_add(&vcpi->next, &state->vcpis);
3799 }
3800
3801 return &state->base;
3802
3803 fail:
3804 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3805 drm_dp_mst_put_port_malloc(pos->port);
3806 kfree(pos);
3807 }
3808 kfree(state);
3809
3810 return NULL;
3811 }
3812
3813 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3814 struct drm_private_state *state)
3815 {
3816 struct drm_dp_mst_topology_state *mst_state =
3817 to_dp_mst_topology_state(state);
3818 struct drm_dp_vcpi_allocation *pos, *tmp;
3819
3820 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3821
3822 if (pos->vcpi)
3823 drm_dp_mst_put_port_malloc(pos->port);
3824 kfree(pos);
3825 }
3826
3827 kfree(mst_state);
3828 }
3829
3830 static inline int
3831 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3832 struct drm_dp_mst_topology_state *mst_state)
3833 {
3834 struct drm_dp_vcpi_allocation *vcpi;
3835 int avail_slots = 63, payload_count = 0;
3836
3837 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3838
3839 if (!vcpi->vcpi) {
3840 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3841 vcpi->port);
3842 continue;
3843 }
3844
3845 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3846 vcpi->port, vcpi->vcpi);
3847
3848 avail_slots -= vcpi->vcpi;
3849 if (avail_slots < 0) {
3850 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3851 vcpi->port, mst_state,
3852 avail_slots + vcpi->vcpi);
3853 return -ENOSPC;
3854 }
3855
3856 if (++payload_count > mgr->max_payloads) {
3857 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3858 mgr, mst_state, mgr->max_payloads);
3859 return -EINVAL;
3860 }
3861 }
3862 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3863 mgr, mst_state, avail_slots,
3864 63 - avail_slots);
3865
3866 return 0;
3867 }
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3891 {
3892 struct drm_dp_mst_topology_mgr *mgr;
3893 struct drm_dp_mst_topology_state *mst_state;
3894 int i, ret = 0;
3895
3896 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3897 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3898 if (ret)
3899 break;
3900 }
3901
3902 return ret;
3903 }
3904 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3905
3906 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3907 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3908 .atomic_destroy_state = drm_dp_mst_destroy_state,
3909 };
3910 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3928 struct drm_dp_mst_topology_mgr *mgr)
3929 {
3930 struct drm_device *dev = mgr->dev;
3931
3932 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3933 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3934 }
3935 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3949 struct drm_device *dev, struct drm_dp_aux *aux,
3950 int max_dpcd_transaction_bytes,
3951 int max_payloads, int conn_base_id)
3952 {
3953 struct drm_dp_mst_topology_state *mst_state;
3954
3955 mutex_init(&mgr->lock);
3956 mutex_init(&mgr->qlock);
3957 mutex_init(&mgr->payload_lock);
3958 mutex_init(&mgr->destroy_connector_lock);
3959 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3960 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3961 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3962 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3963 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3964 init_waitqueue_head(&mgr->tx_waitq);
3965 mgr->dev = dev;
3966 mgr->aux = aux;
3967 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3968 mgr->max_payloads = max_payloads;
3969 mgr->conn_base_id = conn_base_id;
3970 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3971 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3972 return -EINVAL;
3973 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3974 if (!mgr->payloads)
3975 return -ENOMEM;
3976 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3977 if (!mgr->proposed_vcpis)
3978 return -ENOMEM;
3979 set_bit(0, &mgr->payload_mask);
3980 if (test_calc_pbn_mode() < 0)
3981 DRM_ERROR("MST PBN self-test failed\n");
3982
3983 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3984 if (mst_state == NULL)
3985 return -ENOMEM;
3986
3987 mst_state->mgr = mgr;
3988 INIT_LIST_HEAD(&mst_state->vcpis);
3989
3990 drm_atomic_private_obj_init(dev, &mgr->base,
3991 &mst_state->base,
3992 &drm_dp_mst_topology_state_funcs);
3993
3994 return 0;
3995 }
3996 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3997
3998
3999
4000
4001
4002 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4003 {
4004 drm_dp_mst_topology_mgr_set_mst(mgr, false);
4005 flush_work(&mgr->work);
4006 flush_work(&mgr->destroy_connector_work);
4007 mutex_lock(&mgr->payload_lock);
4008 kfree(mgr->payloads);
4009 mgr->payloads = NULL;
4010 kfree(mgr->proposed_vcpis);
4011 mgr->proposed_vcpis = NULL;
4012 mutex_unlock(&mgr->payload_lock);
4013 mgr->dev = NULL;
4014 mgr->aux = NULL;
4015 drm_atomic_private_obj_fini(&mgr->base);
4016 mgr->funcs = NULL;
4017 }
4018 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4019
4020 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4021 {
4022 int i;
4023
4024 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4025 return false;
4026
4027 for (i = 0; i < num - 1; i++) {
4028 if (msgs[i].flags & I2C_M_RD ||
4029 msgs[i].len > 0xff)
4030 return false;
4031 }
4032
4033 return msgs[num - 1].flags & I2C_M_RD &&
4034 msgs[num - 1].len <= 0xff;
4035 }
4036
4037
4038 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4039 int num)
4040 {
4041 struct drm_dp_aux *aux = adapter->algo_data;
4042 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4043 struct drm_dp_mst_branch *mstb;
4044 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4045 unsigned int i;
4046 struct drm_dp_sideband_msg_req_body msg;
4047 struct drm_dp_sideband_msg_tx *txmsg = NULL;
4048 int ret;
4049
4050 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4051 if (!mstb)
4052 return -EREMOTEIO;
4053
4054 if (!remote_i2c_read_ok(msgs, num)) {
4055 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4056 ret = -EIO;
4057 goto out;
4058 }
4059
4060 memset(&msg, 0, sizeof(msg));
4061 msg.req_type = DP_REMOTE_I2C_READ;
4062 msg.u.i2c_read.num_transactions = num - 1;
4063 msg.u.i2c_read.port_number = port->port_num;
4064 for (i = 0; i < num - 1; i++) {
4065 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4066 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4067 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4068 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4069 }
4070 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4071 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4072
4073 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4074 if (!txmsg) {
4075 ret = -ENOMEM;
4076 goto out;
4077 }
4078
4079 txmsg->dst = mstb;
4080 drm_dp_encode_sideband_req(&msg, txmsg);
4081
4082 drm_dp_queue_down_tx(mgr, txmsg);
4083
4084 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4085 if (ret > 0) {
4086
4087 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4088 ret = -EREMOTEIO;
4089 goto out;
4090 }
4091 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4092 ret = -EIO;
4093 goto out;
4094 }
4095 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4096 ret = num;
4097 }
4098 out:
4099 kfree(txmsg);
4100 drm_dp_mst_topology_put_mstb(mstb);
4101 return ret;
4102 }
4103
4104 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4105 {
4106 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4107 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4108 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4109 I2C_FUNC_10BIT_ADDR;
4110 }
4111
4112 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4113 .functionality = drm_dp_mst_i2c_functionality,
4114 .master_xfer = drm_dp_mst_i2c_xfer,
4115 };
4116
4117
4118
4119
4120
4121
4122
4123 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4124 {
4125 aux->ddc.algo = &drm_dp_mst_i2c_algo;
4126 aux->ddc.algo_data = aux;
4127 aux->ddc.retries = 3;
4128
4129 aux->ddc.class = I2C_CLASS_DDC;
4130 aux->ddc.owner = THIS_MODULE;
4131 aux->ddc.dev.parent = aux->dev;
4132 aux->ddc.dev.of_node = aux->dev->of_node;
4133
4134 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4135 sizeof(aux->ddc.name));
4136
4137 return i2c_add_adapter(&aux->ddc);
4138 }
4139
4140
4141
4142
4143
4144 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4145 {
4146 i2c_del_adapter(&aux->ddc);
4147 }