This source file includes following definitions.
- drm_dp_cec_adap_enable
- drm_dp_cec_adap_log_addr
- drm_dp_cec_adap_transmit
- drm_dp_cec_adap_monitor_all_enable
- drm_dp_cec_adap_status
- drm_dp_cec_received
- drm_dp_cec_handle_irq
- drm_dp_cec_irq
- drm_dp_cec_cap
- drm_dp_cec_unregister_work
- drm_dp_cec_set_edid
- drm_dp_cec_unset_edid
- drm_dp_cec_register_connector
- drm_dp_cec_unregister_connector
1
2
3
4
5
6
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <drm/drm_dp_helper.h>
12 #include <media/cec.h>
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 #define NEVER_UNREG_DELAY 1000
83 static unsigned int drm_dp_cec_unregister_delay = 1;
84 module_param(drm_dp_cec_unregister_delay, uint, 0600);
85 MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
86 "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
87
88 static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
89 {
90 struct drm_dp_aux *aux = cec_get_drvdata(adap);
91 u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
92 ssize_t err = 0;
93
94 err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
95 return (enable && err < 0) ? err : 0;
96 }
97
98 static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
99 {
100 struct drm_dp_aux *aux = cec_get_drvdata(adap);
101
102 u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
103 u8 mask[2];
104 ssize_t err;
105
106 if (addr != CEC_LOG_ADDR_INVALID)
107 la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
108 mask[0] = la_mask & 0xff;
109 mask[1] = la_mask >> 8;
110 err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
111 return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
112 }
113
114 static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
115 u32 signal_free_time, struct cec_msg *msg)
116 {
117 struct drm_dp_aux *aux = cec_get_drvdata(adap);
118 unsigned int retries = min(5, attempts - 1);
119 ssize_t err;
120
121 err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
122 msg->msg, msg->len);
123 if (err < 0)
124 return err;
125
126 err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
127 (msg->len - 1) | (retries << 4) |
128 DP_CEC_TX_MESSAGE_SEND);
129 return err < 0 ? err : 0;
130 }
131
132 static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
133 bool enable)
134 {
135 struct drm_dp_aux *aux = cec_get_drvdata(adap);
136 ssize_t err;
137 u8 val;
138
139 if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
140 return 0;
141
142 err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
143 if (err >= 0) {
144 if (enable)
145 val |= DP_CEC_SNOOPING_ENABLE;
146 else
147 val &= ~DP_CEC_SNOOPING_ENABLE;
148 err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
149 }
150 return (enable && err < 0) ? err : 0;
151 }
152
153 static void drm_dp_cec_adap_status(struct cec_adapter *adap,
154 struct seq_file *file)
155 {
156 struct drm_dp_aux *aux = cec_get_drvdata(adap);
157 struct drm_dp_desc desc;
158 struct drm_dp_dpcd_ident *id = &desc.ident;
159
160 if (drm_dp_read_desc(aux, &desc, true))
161 return;
162 seq_printf(file, "OUI: %*phD\n",
163 (int)sizeof(id->oui), id->oui);
164 seq_printf(file, "ID: %*pE\n",
165 (int)strnlen(id->device_id, sizeof(id->device_id)),
166 id->device_id);
167 seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
168
169
170
171
172 seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
173 id->sw_major_rev, id->sw_minor_rev,
174 id->sw_major_rev, id->sw_minor_rev);
175 }
176
177 static const struct cec_adap_ops drm_dp_cec_adap_ops = {
178 .adap_enable = drm_dp_cec_adap_enable,
179 .adap_log_addr = drm_dp_cec_adap_log_addr,
180 .adap_transmit = drm_dp_cec_adap_transmit,
181 .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
182 .adap_status = drm_dp_cec_adap_status,
183 };
184
185 static int drm_dp_cec_received(struct drm_dp_aux *aux)
186 {
187 struct cec_adapter *adap = aux->cec.adap;
188 struct cec_msg msg;
189 u8 rx_msg_info;
190 ssize_t err;
191
192 err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
193 if (err < 0)
194 return err;
195
196 if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
197 return 0;
198
199 msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
200 err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
201 if (err < 0)
202 return err;
203
204 cec_received_msg(adap, &msg);
205 return 0;
206 }
207
208 static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
209 {
210 struct cec_adapter *adap = aux->cec.adap;
211 u8 flags;
212
213 if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
214 return;
215
216 if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
217 drm_dp_cec_received(aux);
218
219 if (flags & DP_CEC_TX_MESSAGE_SENT)
220 cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
221 else if (flags & DP_CEC_TX_LINE_ERROR)
222 cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
223 CEC_TX_STATUS_MAX_RETRIES);
224 else if (flags &
225 (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
226 cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
227 CEC_TX_STATUS_MAX_RETRIES);
228 drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
229 }
230
231
232
233
234
235
236
237
238 void drm_dp_cec_irq(struct drm_dp_aux *aux)
239 {
240 u8 cec_irq;
241 int ret;
242
243
244 if (!aux->transfer)
245 return;
246
247 mutex_lock(&aux->cec.lock);
248 if (!aux->cec.adap)
249 goto unlock;
250
251 ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
252 &cec_irq);
253 if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
254 goto unlock;
255
256 drm_dp_cec_handle_irq(aux);
257 drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
258 unlock:
259 mutex_unlock(&aux->cec.lock);
260 }
261 EXPORT_SYMBOL(drm_dp_cec_irq);
262
263 static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
264 {
265 u8 cap = 0;
266
267 if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
268 !(cap & DP_CEC_TUNNELING_CAPABLE))
269 return false;
270 if (cec_cap)
271 *cec_cap = cap;
272 return true;
273 }
274
275
276
277
278
279 static void drm_dp_cec_unregister_work(struct work_struct *work)
280 {
281 struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
282 cec.unregister_work.work);
283
284 mutex_lock(&aux->cec.lock);
285 cec_unregister_adapter(aux->cec.adap);
286 aux->cec.adap = NULL;
287 mutex_unlock(&aux->cec.lock);
288 }
289
290
291
292
293
294
295
296 void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
297 {
298 u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
299 unsigned int num_las = 1;
300 u8 cap;
301
302
303 if (!aux->transfer)
304 return;
305
306 #ifndef CONFIG_MEDIA_CEC_RC
307
308
309
310
311
312
313
314 cec_caps &= ~CEC_CAP_RC;
315 #endif
316 cancel_delayed_work_sync(&aux->cec.unregister_work);
317
318 mutex_lock(&aux->cec.lock);
319 if (!drm_dp_cec_cap(aux, &cap)) {
320
321 cec_unregister_adapter(aux->cec.adap);
322 aux->cec.adap = NULL;
323 goto unlock;
324 }
325
326 if (cap & DP_CEC_SNOOPING_CAPABLE)
327 cec_caps |= CEC_CAP_MONITOR_ALL;
328 if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
329 num_las = CEC_MAX_LOG_ADDRS;
330
331 if (aux->cec.adap) {
332 if (aux->cec.adap->capabilities == cec_caps &&
333 aux->cec.adap->available_log_addrs == num_las) {
334
335 cec_s_phys_addr_from_edid(aux->cec.adap, edid);
336 goto unlock;
337 }
338
339
340
341
342 cec_unregister_adapter(aux->cec.adap);
343 }
344
345
346 aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
347 aux, aux->cec.name, cec_caps,
348 num_las);
349 if (IS_ERR(aux->cec.adap)) {
350 aux->cec.adap = NULL;
351 goto unlock;
352 }
353 if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
354 cec_delete_adapter(aux->cec.adap);
355 aux->cec.adap = NULL;
356 } else {
357
358
359
360
361
362 cec_s_phys_addr_from_edid(aux->cec.adap, edid);
363 }
364 unlock:
365 mutex_unlock(&aux->cec.lock);
366 }
367 EXPORT_SYMBOL(drm_dp_cec_set_edid);
368
369
370
371
372 void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
373 {
374
375 if (!aux->transfer)
376 return;
377
378 cancel_delayed_work_sync(&aux->cec.unregister_work);
379
380 mutex_lock(&aux->cec.lock);
381 if (!aux->cec.adap)
382 goto unlock;
383
384 cec_phys_addr_invalidate(aux->cec.adap);
385
386
387
388
389
390
391 if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
392 !drm_dp_cec_cap(aux, NULL)) {
393
394
395
396
397
398 schedule_delayed_work(&aux->cec.unregister_work,
399 drm_dp_cec_unregister_delay * HZ);
400 }
401 unlock:
402 mutex_unlock(&aux->cec.lock);
403 }
404 EXPORT_SYMBOL(drm_dp_cec_unset_edid);
405
406
407
408
409
410
411
412
413
414
415
416
417 void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
418 struct device *parent)
419 {
420 WARN_ON(aux->cec.adap);
421 if (WARN_ON(!aux->transfer))
422 return;
423 aux->cec.name = name;
424 aux->cec.parent = parent;
425 INIT_DELAYED_WORK(&aux->cec.unregister_work,
426 drm_dp_cec_unregister_work);
427 }
428 EXPORT_SYMBOL(drm_dp_cec_register_connector);
429
430
431
432
433
434 void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
435 {
436 if (!aux->cec.adap)
437 return;
438 cancel_delayed_work_sync(&aux->cec.unregister_work);
439 cec_unregister_adapter(aux->cec.adap);
440 aux->cec.adap = NULL;
441 }
442 EXPORT_SYMBOL(drm_dp_cec_unregister_connector);