This source file includes following definitions.
- nvkm_disp_vblank_fini
- nvkm_disp_vblank_init
- nvkm_disp_vblank_ctor
- nvkm_disp_vblank
- nvkm_disp_hpd_ctor
- nvkm_disp_ntfy
- nvkm_disp_class_del
- nvkm_disp_class_new
- nvkm_disp_class_get
- nvkm_disp_intr
- nvkm_disp_fini
- nvkm_disp_init
- nvkm_disp_oneinit
- nvkm_disp_dtor
- nvkm_disp_ctor
- nvkm_disp_new_
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include "priv.h"
25 #include "conn.h"
26 #include "dp.h"
27 #include "head.h"
28 #include "ior.h"
29 #include "outp.h"
30
31 #include <core/client.h>
32 #include <core/notify.h>
33 #include <core/oproxy.h>
34 #include <subdev/bios.h>
35 #include <subdev/bios/dcb.h>
36
37 #include <nvif/class.h>
38 #include <nvif/cl0046.h>
39 #include <nvif/event.h>
40 #include <nvif/unpack.h>
41
42 static void
43 nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int id)
44 {
45 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
46 struct nvkm_head *head = nvkm_head_find(disp, id);
47 if (head)
48 head->func->vblank_put(head);
49 }
50
51 static void
52 nvkm_disp_vblank_init(struct nvkm_event *event, int type, int id)
53 {
54 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
55 struct nvkm_head *head = nvkm_head_find(disp, id);
56 if (head)
57 head->func->vblank_get(head);
58 }
59
60 static int
61 nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
62 struct nvkm_notify *notify)
63 {
64 struct nvkm_disp *disp =
65 container_of(notify->event, typeof(*disp), vblank);
66 union {
67 struct nvif_notify_head_req_v0 v0;
68 } *req = data;
69 int ret = -ENOSYS;
70
71 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
72 notify->size = sizeof(struct nvif_notify_head_rep_v0);
73 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
74 notify->types = 1;
75 notify->index = req->v0.head;
76 return 0;
77 }
78 }
79
80 return ret;
81 }
82
83 static const struct nvkm_event_func
84 nvkm_disp_vblank_func = {
85 .ctor = nvkm_disp_vblank_ctor,
86 .init = nvkm_disp_vblank_init,
87 .fini = nvkm_disp_vblank_fini,
88 };
89
90 void
91 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
92 {
93 struct nvif_notify_head_rep_v0 rep = {};
94 nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
95 }
96
97 static int
98 nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
99 struct nvkm_notify *notify)
100 {
101 struct nvkm_disp *disp =
102 container_of(notify->event, typeof(*disp), hpd);
103 union {
104 struct nvif_notify_conn_req_v0 v0;
105 } *req = data;
106 struct nvkm_outp *outp;
107 int ret = -ENOSYS;
108
109 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
110 notify->size = sizeof(struct nvif_notify_conn_rep_v0);
111 list_for_each_entry(outp, &disp->outp, head) {
112 if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
113 if (ret = -ENODEV, outp->conn->hpd.event) {
114 notify->types = req->v0.mask;
115 notify->index = req->v0.conn;
116 ret = 0;
117 }
118 break;
119 }
120 }
121 }
122
123 return ret;
124 }
125
126 static const struct nvkm_event_func
127 nvkm_disp_hpd_func = {
128 .ctor = nvkm_disp_hpd_ctor
129 };
130
131 int
132 nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
133 {
134 struct nvkm_disp *disp = nvkm_disp(object->engine);
135 switch (type) {
136 case NV04_DISP_NTFY_VBLANK:
137 *event = &disp->vblank;
138 return 0;
139 case NV04_DISP_NTFY_CONN:
140 *event = &disp->hpd;
141 return 0;
142 default:
143 break;
144 }
145 return -EINVAL;
146 }
147
148 static void
149 nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
150 {
151 struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
152 mutex_lock(&disp->engine.subdev.mutex);
153 if (disp->client == oproxy)
154 disp->client = NULL;
155 mutex_unlock(&disp->engine.subdev.mutex);
156 }
157
158 static const struct nvkm_oproxy_func
159 nvkm_disp_class = {
160 .dtor[1] = nvkm_disp_class_del,
161 };
162
163 static int
164 nvkm_disp_class_new(struct nvkm_device *device,
165 const struct nvkm_oclass *oclass, void *data, u32 size,
166 struct nvkm_object **pobject)
167 {
168 const struct nvkm_disp_oclass *sclass = oclass->engn;
169 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
170 struct nvkm_oproxy *oproxy;
171 int ret;
172
173 ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
174 if (ret)
175 return ret;
176 *pobject = &oproxy->base;
177
178 mutex_lock(&disp->engine.subdev.mutex);
179 if (disp->client) {
180 mutex_unlock(&disp->engine.subdev.mutex);
181 return -EBUSY;
182 }
183 disp->client = oproxy;
184 mutex_unlock(&disp->engine.subdev.mutex);
185
186 return sclass->ctor(disp, oclass, data, size, &oproxy->object);
187 }
188
189 static const struct nvkm_device_oclass
190 nvkm_disp_sclass = {
191 .ctor = nvkm_disp_class_new,
192 };
193
194 static int
195 nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
196 const struct nvkm_device_oclass **class)
197 {
198 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
199 if (index == 0) {
200 const struct nvkm_disp_oclass *root = disp->func->root(disp);
201 oclass->base = root->base;
202 oclass->engn = root;
203 *class = &nvkm_disp_sclass;
204 return 0;
205 }
206 return 1;
207 }
208
209 static void
210 nvkm_disp_intr(struct nvkm_engine *engine)
211 {
212 struct nvkm_disp *disp = nvkm_disp(engine);
213 disp->func->intr(disp);
214 }
215
216 static int
217 nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
218 {
219 struct nvkm_disp *disp = nvkm_disp(engine);
220 struct nvkm_conn *conn;
221 struct nvkm_outp *outp;
222
223 if (disp->func->fini)
224 disp->func->fini(disp);
225
226 list_for_each_entry(outp, &disp->outp, head) {
227 nvkm_outp_fini(outp);
228 }
229
230 list_for_each_entry(conn, &disp->conn, head) {
231 nvkm_conn_fini(conn);
232 }
233
234 return 0;
235 }
236
237 static int
238 nvkm_disp_init(struct nvkm_engine *engine)
239 {
240 struct nvkm_disp *disp = nvkm_disp(engine);
241 struct nvkm_conn *conn;
242 struct nvkm_outp *outp;
243 struct nvkm_ior *ior;
244
245 list_for_each_entry(conn, &disp->conn, head) {
246 nvkm_conn_init(conn);
247 }
248
249 list_for_each_entry(outp, &disp->outp, head) {
250 nvkm_outp_init(outp);
251 }
252
253 if (disp->func->init) {
254 int ret = disp->func->init(disp);
255 if (ret)
256 return ret;
257 }
258
259
260
261
262 list_for_each_entry(ior, &disp->ior, head) {
263 ior->func->power(ior, true, true, true, true, true);
264 }
265
266 return 0;
267 }
268
269 static int
270 nvkm_disp_oneinit(struct nvkm_engine *engine)
271 {
272 struct nvkm_disp *disp = nvkm_disp(engine);
273 struct nvkm_subdev *subdev = &disp->engine.subdev;
274 struct nvkm_bios *bios = subdev->device->bios;
275 struct nvkm_outp *outp, *outt, *pair;
276 struct nvkm_conn *conn;
277 struct nvkm_head *head;
278 struct nvkm_ior *ior;
279 struct nvbios_connE connE;
280 struct dcb_output dcbE;
281 u8 hpd = 0, ver, hdr;
282 u32 data;
283 int ret, i;
284
285
286 i = -1;
287 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
288 if (ver < 0x40)
289 break;
290 if (dcbE.type == DCB_OUTPUT_UNUSED)
291 continue;
292 if (dcbE.type == DCB_OUTPUT_EOL)
293 break;
294 outp = NULL;
295
296 switch (dcbE.type) {
297 case DCB_OUTPUT_ANALOG:
298 case DCB_OUTPUT_TV:
299 case DCB_OUTPUT_TMDS:
300 case DCB_OUTPUT_LVDS:
301 ret = nvkm_outp_new(disp, i, &dcbE, &outp);
302 break;
303 case DCB_OUTPUT_DP:
304 ret = nvkm_dp_new(disp, i, &dcbE, &outp);
305 break;
306 case DCB_OUTPUT_WFD:
307
308 ret = -ENODEV;
309 continue;
310 default:
311 nvkm_warn(subdev, "dcb %d type %d unknown\n",
312 i, dcbE.type);
313 continue;
314 }
315
316 if (ret) {
317 if (outp) {
318 if (ret != -ENODEV)
319 OUTP_ERR(outp, "ctor failed: %d", ret);
320 else
321 OUTP_DBG(outp, "not supported");
322 nvkm_outp_del(&outp);
323 continue;
324 }
325 nvkm_error(subdev, "failed to create outp %d\n", i);
326 continue;
327 }
328
329 list_add_tail(&outp->head, &disp->outp);
330 hpd = max(hpd, (u8)(dcbE.connector + 1));
331 }
332
333
334 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
335
336 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
337 &connE);
338
339
340 if (!data) {
341
342
343
344
345
346 int ccb_index = outp->info.i2c_index;
347 if (ccb_index != 0xf) {
348 list_for_each_entry(pair, &disp->outp, head) {
349 if (pair->info.i2c_index == ccb_index) {
350 outp->conn = pair->conn;
351 break;
352 }
353 }
354 }
355
356
357 if (outp->conn)
358 continue;
359
360 memset(&connE, 0x00, sizeof(connE));
361 connE.type = DCB_CONNECTOR_NONE;
362 i = -1;
363 } else {
364 i = outp->info.connector;
365 }
366
367
368 list_for_each_entry(conn, &disp->conn, head) {
369 if (conn->index == outp->info.connector) {
370 outp->conn = conn;
371 break;
372 }
373 }
374
375 if (outp->conn)
376 continue;
377
378
379 ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
380 if (ret) {
381 nvkm_error(&disp->engine.subdev,
382 "failed to create outp %d conn: %d\n",
383 outp->index, ret);
384 nvkm_conn_del(&outp->conn);
385 list_del(&outp->head);
386 nvkm_outp_del(&outp);
387 continue;
388 }
389
390 list_add_tail(&outp->conn->head, &disp->conn);
391 }
392
393 ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
394 if (ret)
395 return ret;
396
397 if (disp->func->oneinit) {
398 ret = disp->func->oneinit(disp);
399 if (ret)
400 return ret;
401 }
402
403
404
405
406 list_for_each_entry(outp, &disp->outp, head) {
407 if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
408 outp->conn->info.type == DCB_CONNECTOR_eDP) {
409 ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
410 if (!WARN_ON(!ior))
411 ior->identity = true;
412 outp->identity = true;
413 }
414 }
415
416 i = 0;
417 list_for_each_entry(head, &disp->head, head)
418 i = max(i, head->id + 1);
419
420 return nvkm_event_init(&nvkm_disp_vblank_func, 1, i, &disp->vblank);
421 }
422
423 static void *
424 nvkm_disp_dtor(struct nvkm_engine *engine)
425 {
426 struct nvkm_disp *disp = nvkm_disp(engine);
427 struct nvkm_conn *conn;
428 struct nvkm_outp *outp;
429 void *data = disp;
430
431 if (disp->func->dtor)
432 data = disp->func->dtor(disp);
433
434 nvkm_event_fini(&disp->vblank);
435 nvkm_event_fini(&disp->hpd);
436
437 while (!list_empty(&disp->conn)) {
438 conn = list_first_entry(&disp->conn, typeof(*conn), head);
439 list_del(&conn->head);
440 nvkm_conn_del(&conn);
441 }
442
443 while (!list_empty(&disp->outp)) {
444 outp = list_first_entry(&disp->outp, typeof(*outp), head);
445 list_del(&outp->head);
446 nvkm_outp_del(&outp);
447 }
448
449 while (!list_empty(&disp->ior)) {
450 struct nvkm_ior *ior =
451 list_first_entry(&disp->ior, typeof(*ior), head);
452 nvkm_ior_del(&ior);
453 }
454
455 while (!list_empty(&disp->head)) {
456 struct nvkm_head *head =
457 list_first_entry(&disp->head, typeof(*head), head);
458 nvkm_head_del(&head);
459 }
460
461 return data;
462 }
463
464 static const struct nvkm_engine_func
465 nvkm_disp = {
466 .dtor = nvkm_disp_dtor,
467 .oneinit = nvkm_disp_oneinit,
468 .init = nvkm_disp_init,
469 .fini = nvkm_disp_fini,
470 .intr = nvkm_disp_intr,
471 .base.sclass = nvkm_disp_class_get,
472 };
473
474 int
475 nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
476 int index, struct nvkm_disp *disp)
477 {
478 disp->func = func;
479 INIT_LIST_HEAD(&disp->head);
480 INIT_LIST_HEAD(&disp->ior);
481 INIT_LIST_HEAD(&disp->outp);
482 INIT_LIST_HEAD(&disp->conn);
483 return nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
484 }
485
486 int
487 nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
488 int index, struct nvkm_disp **pdisp)
489 {
490 if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
491 return -ENOMEM;
492 return nvkm_disp_ctor(func, device, index, *pdisp);
493 }